content
stringlengths 5
1.05M
|
|---|
import pandas as pd
import os
os.chdir("/home/jana/Documents/PhD/CompBio/TestingGBLUP/")
blupSol = pd.read_csv('renumbered_Solutions', header=None,
sep='\s+', names=['renID', 'ID', 'Solution'])
AlphaPed = pd.read_table("PedigreeAndGeneticValues_cat.txt", sep=" ")
AlphaSelPed = AlphaPed.loc[:, ['Generation', 'Indiv', 'Father', 'Mother','cat', 'gvNormUnres1']]
AlphaSelPed.loc[:, 'EBV'] = blupSol.Solution
AlphaSelPed = AlphaSelPed.loc[AlphaSelPed.Generation.isin([40])]
import numpy
print numpy.corrcoef(AlphaSelPed.EBV, AlphaSelPed.gvNormUnres1)
AlphaSelPed.to_csv('GenPed_EBV.txt', index=None)
|
# -*- coding: utf-8 -*-
# Copyright 2017-2019 ControlScan, Inc.
#
# This file is part of Cyphon Engine.
#
# Cyphon Engine is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# Cyphon Engine is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cyphon Engine. If not, see <http://www.gnu.org/licenses/>.
"""
A collection of methods to operate on all the different target packages.
"""
# standard library
import importlib
def create_targets(data):
"""
Takes a dictionary of data and creates target objects from it. Each
key in the dictionary must align to an attribute of the target object
it's trying to create.
"""
target_objects = {}
for key, value in data.iteritems():
try:
target = importlib.import_module(key)
except ImportError as error:
raise error
else:
target_objects[key] = create_target_object(target, value)
return target_objects
def create_target_object(object_package, data_list):
"""
Creates a list of target objects given the target object package
and a list of dictionaries whose keys map to the objects attributes.
"""
targets = []
for data in data_list:
serializer = object_package.serializers.ObjectSerializer(data)
if serializer.is_valid():
targets.append(serializer.save())
else:
return
return targets
|
"""
Script to upload water quality data into influxdb.
# Data quality checks:
=====================================================
## Data quality issues handled by the ingestion script:
1. `TIME` values set to `:` are replaced by empty string.
2. rows without `DATE` are dropped.
3. cell values "NA" and "ND" changed to empty
## manual changes I have made to the dataset before uploading
1. Time value '11:545' changed to '11:45'
2. semicolons in `TIME` column replaced with colons
3. 02/29/2019 (invalid date) replaced with 02/28/2019
4. replaced 0..07 with 0.07 in column "TURB-S"
=====================================================
Note that for some dates (< 1997) in the WS file there is no time value.
Without the time value the database will have duplicate points on those days.
This will mess up the display of the data; I think only one of the values on each day will be retained
"""
import pytz
import dateutil
import logging
import numpy as np
import os
import pandas as pd
import subprocess
def handle_wq_data_ws(filepath, form_args):
"""
Pushes data from water quality ws excel file into influxdb
"""
assert ".xlsx" in filepath
INFLUXDB_SERVER = os.environ["INFLUXDB_HOSTNAME"]
DBNAME = "fwc_coral_disease" # TODO: this doesn't work?!? need to update the grafana db provissioning file
SHEET_NAME = "Data in ppm"
MEASUREMENT = "Walton_Smith"
TAGS = [ # metadata items attached to each measurment
"SURV",
"BASIN",
"SEGMENT",
"ZONE",
"STATION",
"SITE",
"LATDEC",
"LONDEC",
"DEPTH",
]
FIELDS = [
"NOX-S", "NOX-B",
"NO3_S", "NO3_B",
"NO2-S", "NO2-B",
"NH4-S", "NH4-B",
"TN-S", "TN-B",
"DIN-S", "DIN-B",
"TON-S", "TON-B",
"TP-S", "TP-B",
"SRP-S", "SRP-B",
"APA-S", "APA-B",
"CHLA-S", "CHLA-B",
"TOC-S", "TOC-B",
"SiO2-S", "SiO2-B",
"TURB-S", "TURB-B",
"SAL-S", "SAL-B",
"TEMP-S", "TEMP-B",
"DO-S", "DO-B",
"Kd",
"pH",
"TN:TP",
"N:P",
"DIN:TP",
"Si:DIN",
"%SAT-S", "%SAT_B",
"%Io",
"DSIGT"
]
DATETIME = [
"DATE",
"TIME"
]
NA_REP = -999.0 # influxdb doesn't handle NA, NaN, null
dataframe = pd.read_excel(
filepath,
sheet_name=SHEET_NAME,
header=0,
parse_dates=DATETIME,
na_filter=False
# date_parser=lambda d0, d1: dateutil.parser.parse(f"{d0} {d1}")
)
# drop rows with no date
dataframe["DATE"].replace('', np.nan, inplace=True)
dataframe.dropna(subset=["DATE"], inplace=True)
# fix the few columns with empty time but still have a colon
dataframe["TIME"].replace(':', '', inplace=True)
for field_column in FIELDS:
# change the few 'NA' values to empty cells
dataframe[field_column].replace('NA', '', inplace=True)
dataframe[field_column].replace('ND', '', inplace=True)
# set na value on all empty cells
dataframe[field_column].replace('', NA_REP, inplace=True)
# combine date and time rows
dataframe['date_time'] = dataframe.apply(
lambda row: str(row["DATE"]).replace(
"00:00:00",
str(row["TIME"])
) if len(str(row["TIME"])) > 0 else str(row["DATE"]),
axis=1
)
# clean up the datetime & add timezone
timezone = pytz.timezone("US/Eastern")
dataframe["date_time"] = dataframe["date_time"].apply(
lambda d: timezone.localize(
dateutil.parser.parse(d, ignoretz=True)
)
)
# get timestamp from combined date and time
dataframe["timestamp"] = dataframe["date_time"].apply(
lambda d: int(d.timestamp())
)
filepath = f"{filepath}.csv"
dataframe.to_csv(filepath, na_rep=NA_REP)
# === submit to influxdb server
logging.info("loading {}'s fields ({}) with tags={}".format(
MEASUREMENT, FIELDS, TAGS
))
# export_csv_to_influx --csv /tmp/WS_Aug_updated.xlsx.csv --dbname fwc_coral_disease --measurement Walton_Smith --field_columns 'NOX-S,NOX-B,NO3_S,NO3_B,NO2-S,NO2-B,NH4-S,NH4-B,TN-S,TN-B,DIN-S,DIN-B,TON-S,TON-B,TP-S,TP-B,SRP-S,SRP-B,APA-S,APA-B,CHLA-S,CHLA-B,TOC-S,TOC-B,SiO2-S,SiO2-B,TURB-S,TURB-B,SAL-S,SAL-B,TEMP-S,TEMP-B,DO-S,DO-B,Kd,pH,TN:TP,N:P,DIN:TP,Si:DIN,%SAT-S,%SAT_B,%Io,DSIGT' --tag_columns 'SURV,BASIN,SEGMENT,ZONE,STATION,SITE,LATDEC,LONDEC,DEPTH' --force_insert_even_csv_no_update True --server $INFLUXDB_HOSTNAME --time_column timestamp --force_float_columns 'NOX-S,NOX-B,NO3_S,NO3_B,NO2-S,NO2-B,NH4-S,NH4-B,TN-S,TN-B,DIN-S,DIN-B,TON-S,TON-B,TP-S,TP-B,SRP-S,SRP-B,APA-S,APA-B,CHLA-S,CHLA-B,TOC-S,TOC-B,SiO2-S,SiO2-B,TURB-S,TURB-B,SAL-S,SAL-B,TEMP-S,TEMP-B,DO-S,DO-B,Kd,pH,TN:TP,N:P,DIN:TP,Si:DIN,%SAT-S,%SAT_B,%Io,DSIGT'
subprocess.run([
"export_csv_to_influx",
"--csv", filepath,
"--dbname", DBNAME,
"--measurement", MEASUREMENT,
"--field_columns", ",".join(FIELDS),
"--force_float_columns", ",".join(FIELDS),
"--tag_columns", ','.join(TAGS),
"--force_insert_even_csv_no_update", "True",
"--server", INFLUXDB_SERVER,
"--time_column", "timestamp"
], check=True)
|
import csv
import os
import pandas
from datetime import datetime
from BlackBoxAuditing.model_factories import SVM, DecisionTree, NeuralNetwork
from BlackBoxAuditing.model_factories.SKLearnModelVisitor import SKLearnModelVisitor
from BlackBoxAuditing.loggers import vprint
from BlackBoxAuditing.GradientFeatureAuditor import GradientFeatureAuditor
from BlackBoxAuditing.audit_reading import graph_audit, graph_audits, rank_audit_files, group_audit_ranks
from BlackBoxAuditing.consistency_graph import graph_prediction_consistency
from BlackBoxAuditing.measurements import get_conf_matrix, accuracy, BCR
from BlackBoxAuditing.find_contexts import context_finder, load
from BlackBoxAuditing.data import load_data, load_from_file, load_testdf_only
class Auditor():
def __init__(self):
self.measurers = [accuracy, BCR]
self.model_options = {}
self.verbose = True
self.REPAIR_STEPS = 10
self.RETRAIN_MODEL_PER_REPAIR = False
self.WRITE_ORIGINAL_PREDICTIONS = True
self.ModelFactory = SVM
self.trained_model = None
self.kdd = False
self._audits_data = {}
def __call__(self, data, output_dir=None, dump_all=False, features_to_audit=None):
start_time = datetime.now()
headers, train_set, test_set, response_header, features_to_ignore, correct_types = data
self._audits_data = {"headers" : headers, "train" : train_set, "test" : test_set,
"response" : response_header, "ignore" : features_to_ignore,
"types" : correct_types,
"full_audit" : True if features_to_audit is None else False
}
if self.trained_model == None:
"""
ModelFactories require a `build` method that accepts some training data
with which to train a brand new model. This `build` method should output
a Model object that has a `test` method -- which, when given test data
in the same format as the training data, yields a confusion table detailing
the correct and incorrect predictions of the model.
"""
all_data = train_set + test_set
model_factory = self.ModelFactory(all_data, headers, response_header,
features_to_ignore=features_to_ignore,
options=self.model_options)
if self.trained_model != None:
model_or_factory = self.trained_model
elif not self.RETRAIN_MODEL_PER_REPAIR:
vprint("Training initial model.",self.verbose)
model = model_factory.build(train_set)
# Check the quality of the initial model on verbose runs.
if self.verbose:
print("Calculating original model statistics on test data:")
print("\tTraining Set:")
train_pred_tuples = model.test(train_set)
train_conf_matrix = get_conf_matrix(train_pred_tuples)
print("\t\tConf-Matrix:", train_conf_matrix)
for measurer in self.measurers:
print("\t\t{}: {}".format(measurer.__name__, measurer(train_conf_matrix)))
print("\tTesting Set:")
test_pred_tuples = model.test(test_set)
test_conf_matrix = get_conf_matrix(test_pred_tuples)
print("\t\tConf-Matrix", test_conf_matrix)
for measurer in self.measurers:
print("\t\t{}: {}".format(measurer.__name__, measurer(test_conf_matrix)))
model_or_factory = model
else:
model_or_factory = model_factory
# Translate the headers into indexes for the auditor.
audit_indices_to_ignore = [headers.index(f) for f in features_to_ignore]
# Don't audit the response feature.
audit_indices_to_ignore.append(headers.index(response_header))
# Prepare the auditor.
auditor = GradientFeatureAuditor(model_or_factory, headers, train_set, test_set,
repair_steps=self.REPAIR_STEPS, kdd=self.kdd,
features_to_ignore=audit_indices_to_ignore,
features_to_audit=features_to_audit,
output_dir=output_dir,dump_all=dump_all)
# Perform the Gradient Feature Audit and dump the audit results into files.
audit_filenames = auditor.audit(verbose=self.verbose)
# Retrieve repaired data from audit
self._audits_data["rep_test"] = auditor._rep_test
ranked_features = []
for measurer in self.measurers:
vprint("Ranking audit files by {}.".format(measurer.__name__),self.verbose)
#ranked_graph_filename = "{}/{}.png".format(auditor.OUTPUT_DIR, measurer.__name__)
ranks = rank_audit_files(audit_filenames, measurer)
vprint("\t{}".format(ranks), self.verbose)
ranked_features.append( (measurer, ranks) )
end_time = datetime.now()
# Store a summary of this experiment.
model_id = model_factory.factory_name if self.trained_model == None else "Pretrained"
model_name = model_factory.verbose_factory_name if self.trained_model == None else "Pretrained"
summary = [
"Audit Start Time: {}".format(start_time),
"Audit End Time: {}".format(end_time),
"Retrained Per Repair: {}".format(self.RETRAIN_MODEL_PER_REPAIR),
"Model Factory ID: {}".format(model_id),
"Model Type: {}".format(model_name),
"Non-standard Model Options: {}".format(self.model_options),
"Train Size: {}".format(len(train_set)),
"Test Size: {}".format(len(test_set)),
"Non-standard Ignored Features: {}".format(features_to_ignore),
"Features: {}\n".format(headers)]
# Print summary
for line in summary:
print(line)
for ranker, ranks in ranked_features:
print("Ranked Features by {}: {}".format(ranker.__name__, ranks))
groups = group_audit_ranks(audit_filenames, ranker)
print("\tApprox. Trend Groups: {}\n".format(groups))
if ranker.__name__ == "accuracy":
self._audits_data["ranks"] = ranks
# Dump all experiment results if opted
if dump_all:
vprint("Dumping original training data.", self.verbose)
# Dump the train data to the log.
train_dump = "{}/original_train_data".format(auditor.OUTPUT_DIR)
with open(train_dump + ".csv", "w") as f:
writer = csv.writer(f)
writer.writerow(headers)
for row in train_set:
writer.writerow(row)
if self.WRITE_ORIGINAL_PREDICTIONS:
# Dump the predictions on the test data.
with open(train_dump + ".predictions", "w") as f:
writer = csv.writer(f)
file_headers = ["Response", "Prediction"]
writer.writerow(file_headers)
for response, guess in train_pred_tuples:
writer.writerow([response, guess])
vprint("Dumping original testing data.", self.verbose)
# Dump the train data to the log.
test_dump = "{}/original_test_data".format(auditor.OUTPUT_DIR)
with open(test_dump + ".csv", "w") as f:
writer = csv.writer(f)
writer.writerow(headers)
for row in test_set:
writer.writerow(row)
if self.WRITE_ORIGINAL_PREDICTIONS:
# Dump the predictions on the test data.
with open(test_dump + ".predictions", "w") as f:
writer = csv.writer(f)
file_headers = ["Response", "Prediction"]
writer.writerow(file_headers)
for response, guess in test_pred_tuples:
writer.writerow([response, guess])
# Graph the audit files.
vprint("Graphing audit files.",self.verbose)
for audit_filename in audit_filenames:
audit_image_filename = audit_filename + ".png"
graph_audit(audit_filename, self.measurers, audit_image_filename)
# Store a graph of how many predictions change as features are repaired.
vprint("Graphing prediction changes throughout repair.",self.verbose)
output_image = auditor.OUTPUT_DIR + "/similarity_to_original_predictions.png"
graph_prediction_consistency(auditor.OUTPUT_DIR, output_image)
for measurer in self.measurers:
ranked_graph_filename = "{}/{}.png".format(auditor.OUTPUT_DIR, measurer.__name__)
graph_audits(audit_filenames, measurer, ranked_graph_filename)
# Store a summary of this experiment to file.
summary_file = "{}/summary.txt".format(auditor.OUTPUT_DIR)
with open(summary_file, "w") as f:
for line in summary:
f.write(line+'\n')
for ranker, ranks in ranked_features:
f.write("Ranked Features by {}: {}\n".format(ranker.__name__, ranks))
groups = group_audit_ranks(audit_filenames, ranker)
f.write("\tApprox. Trend Groups: {}\n".format(groups))
vprint("Summary file written to: {}\n".format(summary_file), self.verbose)
def find_contexts(self, removed_attr, output_dir, beam_width=10, min_covered_examples=1, max_rule_length=5, by_original=True, epsilon=0.05):
# retrive data from the audit
audits_data = self._audits_data
full_audit = audits_data["full_audit"]
# Make sure a full audit was completed
if not full_audit:
raise RuntimeError("Only partial audit completed. Must run a full audit to call find_contexts.")
orig_train = audits_data["train"]
orig_test = audits_data["test"]
obscured_test_data = audits_data["rep_test"][removed_attr]
headers = audits_data["headers"]
response_header = audits_data["response"]
features_to_ignore = audits_data["ignore"]
correct_types = audits_data["types"]
obscured_tag = "-no"+removed_attr
# Create directory to dump results
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Extract influence scores
ranks = audits_data["ranks"]
influence_scores = {}
for element in ranks:
influence_scores[element[0]] = float(element[1])
influence_scores[element[0]+obscured_tag] = 0.0
# Get obscured data from file:
obscured_test = []
obscured_test_reader = csv.reader(open(obscured_test_data, 'r'))
for row in obscured_test_reader:
obscured_test.append(row)
# load data from audit to prepare it for context finding process
audit_params = (orig_train, orig_test, obscured_test, headers, response_header, features_to_ignore, correct_types, obscured_tag)
orig_train_tab, orig_test_tab, merged_data = load(audit_params, output_dir)
# run the context_finder
context_finder(orig_train, orig_test, obscured_test, orig_train_tab, orig_test_tab, merged_data, obscured_tag, output_dir, influence_scores, beam_width, min_covered_examples, max_rule_length, by_original, epsilon)
### Tests and examples below
def german_example_audit():
# format data
data = load_data("german")
# set the auditor
auditor = Auditor()
auditor.model = SVM
# call the auditor
auditor(data, output_dir="german_audit_output", dump_all=False)
auditor.find_contexts("age_cat",output_dir="german_context_output")
def test():
test_noinfluence()
test_highinfluence()
def test_noinfluence():
auditor = Auditor()
auditor.trained_model = SKLearnModelVisitor(MockModelPredict1(), 1)
df = pandas.DataFrame({"a": [1.0,2.0,3.0,4.0]})
y_df = pandas.DataFrame({"b": [0,0,0,0]})
data = load_testdf_only(df, y_df)
auditor(data)
ranks = auditor._audits_data["ranks"]
print("pretrained model, no influence rank correct? --", ranks[0] == ('a',0.0))
def test_highinfluence():
auditor = Auditor()
auditor.trained_model = SKLearnModelVisitor(MockModelPredict1234(), 1)
df = pandas.DataFrame({"a": [1.0,2.0,3.0,4.0]})
y_df = pandas.DataFrame({"b": [0,0,0,0]})
data = load_testdf_only(df, y_df)
auditor(data)
ranks = auditor._audits_data["ranks"]
print("pretrained model, high influence rank correct? --", ranks[0] == ('a',1.0))
class MockModelPredict1():
def predict(self, X):
return [1 for x in X]
class MockModelPredict1234():
def predict(self, X):
"""
Only predicts [0,0,0,0] if given [1,2,3,4].
"""
if X[0] == [1.0] and X[1] == [2.0] and X[2] == [3.0] and X[3] == [4.0]:
return [0,0,0,0]
else:
return [1,1,1,1]
return prediction
if __name__ == "__main__":
# german_example_audit()
test()
|
"""Resource module for login resources."""
import json
from aiohttp import web
from user_service.services import (
LoginService,
UnknownUserException,
WrongPasswordException,
)
class LoginView(web.View):
"""Class representing login resource."""
async def post(self) -> web.Response:
"""Login route function."""
db = self.request.app["db"]
try:
body = await self.request.json()
except json.decoder.JSONDecodeError as e:
raise web.HTTPBadRequest(reason="Invalid data in request body.") from e
username = body.get("username", None)
password = body.get("password", None)
try:
jwt_token = await LoginService.login(db, username, password)
except UnknownUserException as e:
raise web.HTTPUnauthorized(reason=f"Unknown user {username}.") from e
except WrongPasswordException as e:
raise web.HTTPUnauthorized(
reason=f"Wrong password for user {username}."
) from e
return web.json_response({"token": jwt_token})
|
"""
Imports all submodules
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
#__version__ = '1.3.1'
import dcCustom.data
import dcCustom.feat
import dcCustom.hyper
#import deepchem.metalearning
import dcCustom.metrics
import dcCustom.models
#import dcCustom.nn
import dcCustom.splits
import dcCustom.trans
import dcCustom.utils
#import deepchem.dock
import dcCustom.molnet
#import deepchem.rl
|
import requests
from bs4 import BeautifulSoup
import pandas as pd
from selenium import webdriver #to handle news source's dynamic website
import datetime
import time
from google_trans_new import google_translator
from statistics import mean
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import quandl
import sys
import xlwt
import openpyxl
import boto3
class MT_Sentiment_Analyser:
''' CAN INCREASE SCROLL TIME THUS NUMBER OF ARTICLES : m = MT_Sentiment_Analyser(['सेंसेक्स'] ,scroll = 10)
Scrapes hindi News websites to extract information
of a particular index we are interested in'''
def __init__(self,keywords,scroll = 10):
self.home_link = 'https://www.bhaskar.com/business/'
#self.markup = requests.get(self.home_link).text
self.keywords = keywords
self.stime = time.time()
self.scroll = scroll
print("Initialized home link and keywords in --- %s seconds ---\n" % (time.time() - self.stime))
def sentiment_score_calculator(self,hin_eng_df):
print("Begun Sentiment score calculation on --- %s th second ---\n" % (time.time() - self.stime))
t_art_score = []
t_hl_score = []
vader = SentimentIntensityAnalyzer()
for index, row in enumerate (hin_eng_df.values):
t_para_score = []
score = 0
date = hin_eng_df.index[index]
hl , art , t_art,t_hl = row
for para in t_art:
score = vader.polarity_scores(para)['compound']
t_para_score.append(score)
t_hl_score.append(vader.polarity_scores(t_hl)['compound'])
t_art_score.append(mean(t_para_score))
#f = lambda x:vader.polarity_scores(x)['compound']
sys.stdout.write('\rScored Articles: {}/{} ...{} sec\n'.format(index+1,len(hin_eng_df.values),(time.time() - self.stime)))
sys.stdout.flush()
hin_eng_df['para_avg_Senti_Score'] = t_art_score
hin_eng_df['headline_Senti_Score'] = t_hl_score
#data = quandl.get("BSE/SENSEX", authtoken="xxxxxxxxxxxxxxx",start_date = hin_eng_df.index.date[-1],end_date = hin_eng_df.index.date[0])
#data['sensex_open_to_close_price'] = ((data['Close'] - data['Open'])/data['Open'] )*100
hin_eng_df.to_excel('SentimentScoreForSensexV2.xlsx', sheet_name='Sheet1', index=True, encoding=None)
#data.to_excel('Sensex_dataV2.xlsx', sheet_name='Sheet1', index=True, encoding=None)
print("\n1 : xls file is successfully created! named : SentimentScoreForSensexV2.xlsx")
print(hin_eng_df)
def translator_hack(self,data_fr):
'''Divides the original dataframe(data_fr) into smaller chunks of dataframe with predefined number of articles
,Passes each chunk to translator() func,then Concats chunks into one translated dataframe '''
print("Begun Translation on --- %s th second ---\n" % (time.time() - self.stime))
art_per_chunk = 10
chunks = [data_fr[i:i+art_per_chunk] for i in range(0,data_fr.shape[0],art_per_chunk)] #Chunker_by_list_comprehension
translated_chunks = []
for i,chunk in enumerate(chunks):
try:
translated_chunks.append(self.translator(chunk))
except Exception as e:
print('Error has occured,{} this exception is handled\nProgram Continues...\n'.format(e))
#translated_chunks.append(translator(chunk , proxies={'http':'209.127.191.180:9279'}))
sys.stdout.write('\rChunk Processed: {}/{} ...{} sec'.format(i+1,len(chunks),(time.time() - self.stime)))
sys.stdout.flush()
trans_df = pd.concat(translated_chunks)
hin_eng_df = data_fr.merge(trans_df,how = 'inner',left_index=True,right_index=True)
self.sentiment_score_calculator(hin_eng_df)
def translator(self,df_section):
'''INPUT: Untranslated Dataframe
OUTPUT: Translated Dataframe
issue: Has a inbuilt timeout limit; temp solution: Try again in an hour; '''
#translate_text = translator.translate('this great world',lang_tgt='bn')
#translator = google_translator(url_suffix=['translate.google.com','translate.google.co.in'],timeout=15,proxies={'http':'209.127.191.180:9279'})
translate = boto3.client(service_name='translate')#aws Translator for bulk process
saved_translated_articles = []
saved_translated_headlines = []
dates = []
for i, row in enumerate(df_section.values):
translated_article = []
date = df_section.index[i]
hl,art = row
#saved_translated_articles.append(translate.translate_text(Text=art,SourceLanguageCode="hi", TargetLanguageCode="en")['TranslatedText'])
for para in art:
translated_article.append(translate.translate_text(Text=para,SourceLanguageCode="hi", TargetLanguageCode="en")['TranslatedText'])
#time.sleep(2)
saved_translated_articles.append(translated_article)
saved_translated_headlines.append(translate.translate_text(Text=hl,SourceLanguageCode="hi", TargetLanguageCode="en")['TranslatedText'])
#saved_translated_articles.append(translated_article)
dates.append(date)
sys.stdout.write('\rTranslated Headline&Articles: {}/{} ...{} sec\n'.format(i+1,len(df_section),(time.time() - self.stime)))
sys.stdout.flush()
dic = {'Translated_Articles': saved_translated_articles,'Translated_Headlines': saved_translated_headlines}
df = pd.DataFrame(dic,index = dates)
df.index.name = 'Published_date_time'
print("\nDone! --- %s seconds ---" % (time.time() - self.stime))
return df
def parse_article(self,links):
'''This function opens individual relevant article through the link provided from the parse() function below
and uses beautiful soup library to extract the article content and their published dates'''
print("Begun extracting each article from fitered links --- %s seconds ---" % (time.time() - self.stime))
self.saved_articles = []
self.saved_article_dates =[]
art_counter = 0
for link in links:#saved_requestable_links:
try:
article = []
article_content = requests.get(link).content
article_soup = BeautifulSoup(article_content,'html.parser')
paras = article_soup.findAll("p",{'style':"word-break:break-word"})
dateandtime = article_soup.find("meta", {"property": "article:published_time"}).attrs['content']
dateandtime = dateandtime[:-6]
for para in paras:
#article = ''.join(para.get_text())
article.append(para.get_text())
self.saved_articles.append(article)
date_time_obj = datetime.datetime.strptime(dateandtime, '%Y-%m-%dT%H:%M:%S')
self.saved_article_dates.append(date_time_obj)
art_counter = art_counter + 1
sys.stdout.write('\rArticles Parsed : {}/{} ...Time Elapsed:{} sec\n'.format(art_counter,len(links),(time.time() - self.stime)))
sys.stdout.flush()
except Exception as e:
print('Excepion Handled while Parsing article handled ! ',e)
saved_articles.append(' ')
dic = {'Headlines':self.saved_links_title,'Articles':self.saved_articles}
hin_df = pd.DataFrame(dic,index = self.saved_article_dates)
hin_df.index.name = 'Published_date_time'
print("Done! --- %s seconds ---" % (time.time() - self.stime))
self.translator_hack(hin_df)
def parse(self):
'''This function opens the website scrolls down for 100 seconds then takes the page source code
to traverse and extract news Headlines and Executable Links of relevant articles using keywords,
Then calls the above function parse_article() with executable link as a parameter'''
print("Begun Parsing and filtering links with keyword --- %s seconds ---" % (time.time() - self.stime))
driver = webdriver.Chrome('C:\Program Files\Google\Chrome\Application\chromedriver')
#url = 'https://www.bhaskar.com/business/'
driver.get(self.home_link)
time.sleep(10)
prev_height = driver.execute_script('return document.body.scrollHeight;')
limit = 0
while limit < self.scroll:
driver.execute_script('window.scrollTo(0, document.body.scrollHeight);')
time.sleep(4)
new_height = driver.execute_script('return document.body.scrollHeight;')
#if new_height == prev_height:
# break
prev_height = new_height
limit += 1
markup = driver.page_source
soup = BeautifulSoup(markup,'html.parser')
links = driver.execute_script
links = soup.findAll("li",{"class" : '_24e83f49 e54ee612'})
self.saved_links = []
self.saved_links_title =[]
self.saved_requestable_links = []
for link in links:
for keyword in self.keywords:
if keyword in link.text:
if link not in self.saved_links: #this condition stops duplicate links
self.saved_links.append(link)
self.saved_links_title.append(link.text)
self.saved_requestable_links.append(str(self.home_link) + str(link('a')[0]['href']))
print("Done! --- %s seconds ---" % (time.time() - self.stime))
print('{} articles to be passed for scraping'.format(len(self.saved_requestable_links)))
self.parse_article(self.saved_requestable_links)
m = MT_Sentiment_Analyser(['सेंसेक्स'],scroll = 10)#'निफ्टी','टाटा स्टील','यस बैंक','5G'])#'बैंकिंग','टाटा डिजिटल',
m.parse()
|
# Copyright (C) 2018 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Contains WithRelationshipCreatedHandler mixin.
Used to handle relationship created event.
"""
from ggrc.models import relationship
from ggrc.services import signals
class WithRelationshipCreatedHandler(object):
"""Mixin for handle relationship created/deleted event"""
__lazy_init__ = True
@classmethod
def init(cls, model):
"""Initialization method to run after models have been initialized."""
cls.set_handlers(model)
def handle_relationship_created(self, target):
"""Override with custom handling"""
pass
@classmethod
def set_handlers(cls, model):
"""Sets up handlers"""
# pylint: disable=unused-argument,unused-variable
@signals.Restful.collection_posted.connect_via(relationship.Relationship)
def handle_object_mapping(sender, objects=None, **kwargs):
"""Handle relationship collection_posted event"""
for rel in objects:
if rel.source_type == model.__name__:
model.handle_relationship_created(rel.source, rel.destination)
elif rel.destination_type == model.__name__:
model.handle_relationship_created(rel.destination, rel.source)
|
#!/usr/bin/env python3
import rospy
import math
from week2.srv import trajectory,trajectoryResponse
from week2.msg import FloatList
def generate_trajectory(request): #x: float,y: float,theta: float,v: float,w:float):
x = request.x
y = request.y
theta = request.theta
v = request.v
w = request.w
n = 50
dt = 0.05
x_points = FloatList()
y_points = FloatList()
x_points_list = []
y_points_list = []
for iter in range(n):
theta = w*dt + theta
x = v*dt*math.cos(theta) + x
y = v*dt*math.sin(theta) + y
x_points_list.append(x)
y_points_list.append(y)
x_points.data = x_points_list
y_points.data = y_points_list
return x_points, y_points
rospy.init_node('service_server') # init node
service = rospy.Service('trajectory_giver', trajectory, generate_trajectory) #service teller
rospy.spin()
|
#!/usr/bin/env python
# coding=utf-8
#
# Copyright 2012 F2E.im
# Do have a faith in what you're doing.
# Make your life a story worth telling.
# cat /etc/mime.types
# application/octet-stream crx
import sys
reload(sys)
sys.setdefaultencoding("utf8")
import os.path
import re
import memcache
import torndb
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import handler.base
import handler.user
import handler.topic
import handler.page
import handler.notification
from tornado.options import define, options
from lib.loader import Loader
from lib.session import Session, SessionManager
from jinja2 import Environment, FileSystemLoader
define("port", default = 80, help = "run on the given port", type = int)
define("mysql_host", default = "mysql_host", help = "community database host")
define("mysql_database", default = "mysql_db_name", help = "community database name")
define("mysql_user", default = "mysql_db_user", help = "community database user")
define("mysql_password", default = "mysql_db_password", help = "community database password")
class Application(tornado.web.Application):
def __init__(self):
settings = dict(
blog_title = u"F2E Community",
template_path = os.path.join(os.path.dirname(__file__), "templates"),
static_path = os.path.join(os.path.dirname(__file__), "static"),
xsrf_cookies = True,
cookie_secret = "cookie_secret_code",
login_url = "/login",
autoescape = None,
jinja2 = Environment(loader = FileSystemLoader(os.path.join(os.path.dirname(__file__), "templates")), trim_blocks = True),
reserved = ["user", "topic", "home", "setting", "forgot", "login", "logout", "register", "admin"],
)
handlers = [
(r"/", handler.topic.IndexHandler),
(r"/t/(\d+)", handler.topic.ViewHandler),
(r"/t/create/(.*)", handler.topic.CreateHandler),
(r"/t/edit/(.*)", handler.topic.EditHandler),
(r"/reply/edit/(.*)", handler.topic.ReplyEditHandler),
(r"/node/(.*)", handler.topic.NodeTopicsHandler),
(r"/u/(.*)/topics", handler.topic.UserTopicsHandler),
(r"/u/(.*)/replies", handler.topic.UserRepliesHandler),
(r"/u/(.*)/favorites", handler.topic.UserFavoritesHandler),
(r"/u/(.*)", handler.topic.ProfileHandler),
(r"/vote", handler.topic.VoteHandler),
(r"/favorite", handler.topic.FavoriteHandler),
(r"/unfavorite", handler.topic.CancelFavoriteHandler),
(r"/notifications", handler.notification.ListHandler),
(r"/members", handler.topic.MembersHandler),
(r"/setting", handler.user.SettingHandler),
(r"/setting/avatar", handler.user.SettingAvatarHandler),
(r"/setting/avatar/gravatar", handler.user.SettingAvatarFromGravatarHandler),
(r"/setting/password", handler.user.SettingPasswordHandler),
(r"/forgot", handler.user.ForgotPasswordHandler),
(r"/login", handler.user.LoginHandler),
(r"/logout", handler.user.LogoutHandler),
(r"/register", handler.user.RegisterHandler),
(r"/(favicon\.ico)", tornado.web.StaticFileHandler, dict(path = settings["static_path"])),
(r"/(sitemap.*$)", tornado.web.StaticFileHandler, dict(path = settings["static_path"])),
(r"/(bdsitemap\.txt)", tornado.web.StaticFileHandler, dict(path = settings["static_path"])),
(r"/(.*)", handler.topic.ProfileHandler),
]
tornado.web.Application.__init__(self, handlers, **settings)
# Have one global connection to the blog DB across all handlers
self.db = torndb.Connection(
host = options.mysql_host, database = options.mysql_database,
user = options.mysql_user, password = options.mysql_password
)
# Have one global loader for loading models and handles
self.loader = Loader(self.db)
# Have one global model for db query
self.user_model = self.loader.use("user.model")
self.topic_model = self.loader.use("topic.model")
self.reply_model = self.loader.use("reply.model")
self.plane_model = self.loader.use("plane.model")
self.node_model = self.loader.use("node.model")
self.notification_model = self.loader.use("notification.model")
self.vote_model = self.loader.use("vote.model")
self.favorite_model = self.loader.use("favorite.model")
# Have one global session controller
self.session_manager = SessionManager(settings["cookie_secret"], ["127.0.0.1:11211"], 0)
# Have one global memcache controller
self.mc = memcache.Client(["127.0.0.1:11211"])
def main():
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
|
from unittest.mock import Mock, patch
from django.test import TestCase
from data_refinery_common import utils
class UtilsTestCase(TestCase):
@patch('data_refinery_common.utils.get_env_variable')
@patch('data_refinery_common.utils.requests.get')
def test_get_worker_id_cloud(self, mock_get, mock_get_env_variable):
"""Test that a request is made and the global value is stored"""
# Ensure utils.INSTANCE_ID hasn't been set yet in case the
# order the tests are run in ever changes
utils.INSTANCE_ID = None
mock_get.return_value = Mock(ok=True)
mock_get.return_value.text = "instance_id"
mock_get_env_variable.return_value = "True"
self.assertEqual(utils.get_worker_id(), "instance_id/MainProcess")
# Ensure that the second call uses the now-set global value.
# (By calling it again and checking that two function calls
# resulted in one call to each of the invoked functions.)
utils.get_worker_id()
mock_get.assert_called_once()
mock_get_env_variable.assert_called_once()
@patch('data_refinery_common.utils.get_env_variable')
def test_get_worker_id_local(self, mock_get_env_variable):
"""Test that local is used for instance id."""
# Ensure utils.INSTANCE_ID hasn't been set yet in case the
# order the tests are run in ever changes
utils.INSTANCE_ID = None
mock_get_env_variable.return_value = "False"
self.assertEqual(utils.get_worker_id(), "local/MainProcess")
# Ensure that the second call uses the now-set global value.
# (By calling it again and checking that two function calls
# resulted in one call to get_env_variable)
utils.get_worker_id()
mock_get_env_variable.assert_called_once()
|
from rest_framework.pagination import LimitOffsetPagination
class LimitedOffsetPagination(LimitOffsetPagination):
max_limit = 100
|
from epubconv.epubconv import convertEPUB, config
import asyncio
import websockets
import os
from threading import Timer, Thread
settings = config.load()
async def api(websocket, path):
file_path = f'./temp/{await websocket.recv()}.epub'
result = await convertEPUB(file_path, lambda x:websocket.send(x))
if (result['status']):
Timer(settings['tempTime'], lambda x: os.remove(x) if os.path.isfile(x) else None, [f"./temp/{result['id']}.epub"]).start()
await websocket.send(">>>>> 正在傳輸轉換結果...")
await websocket.send(result['id'])
else:
await websocket.send(f"轉換失敗。\n錯誤: {result['error']}")
async def start_server():
print("///// EPUB 轉換服務已啟動 /////")
await websockets.serve(api, settings["wsHost"], settings["wsPort"])
print(f'ws://{settings["wsHost"]}:{settings["wsPort"]}')
loop = asyncio.get_event_loop()
loop.create_task(start_server())
loop.run_forever()
|
from .core import FaceDetector
|
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Struct Class
# this is a auto generated file generated by Cheetah
# Namespace: com.sun.star.chart2
# Libre Office Version: 7.3
from ooo.oenv.env_const import UNO_NONE
import typing
class DataPointLabel(object):
"""
Struct Class
**since**
LibreOffice 7.1
See Also:
`API DataPointLabel <https://api.libreoffice.org/docs/idl/ref/structcom_1_1sun_1_1star_1_1chart2_1_1DataPointLabel.html>`_
"""
__ooo_ns__: str = 'com.sun.star.chart2'
__ooo_full_ns__: str = 'com.sun.star.chart2.DataPointLabel'
__ooo_type_name__: str = 'struct'
typeName: str = 'com.sun.star.chart2.DataPointLabel'
"""Literal Constant ``com.sun.star.chart2.DataPointLabel``"""
def __init__(self, ShowNumber: typing.Optional[bool] = False, ShowNumberInPercent: typing.Optional[bool] = False, ShowCategoryName: typing.Optional[bool] = False, ShowLegendSymbol: typing.Optional[bool] = False, ShowCustomLabel: typing.Optional[bool] = None, ShowSeriesName: typing.Optional[bool] = None) -> None:
"""
Constructor
Arguments:
ShowNumber (bool, optional): ShowNumber value.
ShowNumberInPercent (bool, optional): ShowNumberInPercent value.
ShowCategoryName (bool, optional): ShowCategoryName value.
ShowLegendSymbol (bool, optional): ShowLegendSymbol value.
ShowCustomLabel (bool, optional): ShowCustomLabel value.
ShowSeriesName (bool, optional): ShowSeriesName value.
"""
super().__init__()
if isinstance(ShowNumber, DataPointLabel):
oth: DataPointLabel = ShowNumber
self.ShowNumber = oth.ShowNumber
self.ShowNumberInPercent = oth.ShowNumberInPercent
self.ShowCategoryName = oth.ShowCategoryName
self.ShowLegendSymbol = oth.ShowLegendSymbol
self.ShowCustomLabel = oth.ShowCustomLabel
self.ShowSeriesName = oth.ShowSeriesName
return
kargs = {
"ShowNumber": ShowNumber,
"ShowNumberInPercent": ShowNumberInPercent,
"ShowCategoryName": ShowCategoryName,
"ShowLegendSymbol": ShowLegendSymbol,
"ShowCustomLabel": ShowCustomLabel,
"ShowSeriesName": ShowSeriesName,
}
self._init(**kargs)
def _init(self, **kwargs) -> None:
self._show_number = kwargs["ShowNumber"]
self._show_number_in_percent = kwargs["ShowNumberInPercent"]
self._show_category_name = kwargs["ShowCategoryName"]
self._show_legend_symbol = kwargs["ShowLegendSymbol"]
self._show_custom_label = kwargs["ShowCustomLabel"]
self._show_series_name = kwargs["ShowSeriesName"]
@property
def ShowNumber(self) -> bool:
"""
if TRUE, the value that is represented by a data point is displayed next to it.
"""
return self._show_number
@ShowNumber.setter
def ShowNumber(self, value: bool) -> None:
self._show_number = value
@property
def ShowNumberInPercent(self) -> bool:
"""
This is only effective, if ShowNumber is TRUE.
If this member is also TRUE, the numbers are displayed as percentages of a category.
That means, if a data point is the first one of a series, the percentage is calculated by using the first data points of all available series.
"""
return self._show_number_in_percent
@ShowNumberInPercent.setter
def ShowNumberInPercent(self, value: bool) -> None:
self._show_number_in_percent = value
@property
def ShowCategoryName(self) -> bool:
"""
The caption contains the category name of the category to which a data point belongs.
"""
return self._show_category_name
@ShowCategoryName.setter
def ShowCategoryName(self, value: bool) -> None:
self._show_category_name = value
@property
def ShowLegendSymbol(self) -> bool:
"""
The symbol of data series is additionally displayed in the caption.
"""
return self._show_legend_symbol
@ShowLegendSymbol.setter
def ShowLegendSymbol(self, value: bool) -> None:
self._show_legend_symbol = value
@property
def ShowCustomLabel(self) -> bool:
"""
The caption contains a custom label text, which belongs to a data point label.
**since**
LibreOffice 7.1
"""
return self._show_custom_label
@ShowCustomLabel.setter
def ShowCustomLabel(self, value: bool) -> None:
self._show_custom_label = value
@property
def ShowSeriesName(self) -> bool:
"""
The name of the data series is additionally displayed in the caption.
**since**
LibreOffice 7.2
"""
return self._show_series_name
@ShowSeriesName.setter
def ShowSeriesName(self, value: bool) -> None:
self._show_series_name = value
__all__ = ['DataPointLabel']
|
from psutil import Process # импорт из библиотеки psutil класса для получения значений занятой памяти процесса
from os import getpid # импорт из библиотеки os метода для получения идентификатора текущего процесса
def memory_func(func):
"""
декоратор для замера памяти занимаемой функцией в оперативной памяти.
"""
def wrapper(*args, **kwargs):
proc = Process(getpid()) # получение идентификатора текущего процесса и объявление класса
start_memory = proc.memory_info().rss # сохранение начального значения занятой памяти
result = func(*args, **kwargs) # выполнение функции с параметрами
end_memory = proc.memory_info().rss # замер объема занятой памяти после выполнения функции
print(f"Физ. память используемая функцией {func.__name__}: {end_memory-start_memory} байт") # вывод результата
return result
return wrapper
@memory_func # вызов декоратора для определения занимаемой функцией памяти
def spisok(n):
"""
Функция создания списка числ на 1000000 элементов
"""
result = [] # объявление результирующего списка
for x in range(n): # для каждого из чисел от 0 до 999999 делай
result.append(x) # добавление в результирующий список очередного элемента
return result
@memory_func
def sp_gen(n):
"""
Функция создания псевдогенератора
"""
for i in range(n):
yield i
spisok(10000000) # вызов декорируемых функций
sp_gen(10000000)
|
from django.utils.functional import SimpleLazyObject
from django.utils.deprecation import MiddlewareMixin
from .utils import get_jwt_value_from_cookies, check_payload, check_user
def get_user(request):
if not hasattr(request, '_cached_user'):
session_id = get_jwt_value_from_cookies(request.COOKIES)
if not session_id:
return None
request._cached_user = check_user(check_payload(session_id))
return request._cached_user
class UserMiddleware(MiddlewareMixin):
def process_request(self, request):
request.user = SimpleLazyObject(lambda: get_user(request))
|
import struct
import redis
from Jumpscale import j
from redis import ResponseError
from ..ZDBClientBase import ZDBClientBase
from ..ZDBAdminClientBase import ZDBAdminClientBase
MODE = "seq"
class ZDBClientSeqMode(ZDBClientBase):
def _key_encode(self, key):
if key is None:
key = ""
else:
key = struct.pack("<I", key)
return key
def _key_decode(self, key):
return struct.unpack("<I", key)[0]
def set(self, data, key=None):
key1 = self._key_encode(key)
res = self.redis.execute_command("SET", key1, data)
if not res: # data already present and the same, 0-db did nothing.
return res
key = self._key_decode(res)
return key
def delete(self, key):
key1 = self._key_encode(key)
try:
self.redis.execute_command("DEL", key1)
except ResponseError as e:
if str(e).find("Key not found") != -1:
return
else:
raise e
def get(self, key):
key = self._key_encode(key)
return self.redis.execute_command("GET", key)
def exists(self, key):
key = self._key_encode(key)
return self.redis.execute_command("EXISTS", key) == 1
class ZDBClientSeqModeAdmin(ZDBClientSeqMode, ZDBAdminClientBase):
pass
|
from mock import MagicMock, patch
from tests.app.app_context_test_case import AppContextTestCase
from app.templating.summary.question import Question
from app.data_model.answer_store import AnswerStore, Answer
from app.utilities.schema import load_schema_from_params
class TestQuestion(AppContextTestCase): # pylint: disable=too-many-public-methods
def setUp(self):
super().setUp()
self.answer_schema = MagicMock()
self.answer_store = AnswerStore()
self.schema = MagicMock()
self.metadata = {}
def test_create_question(self):
# Given
question_title = 'question_title'
question_schema = {'id': 'question_id', 'title': question_title, 'type': 'GENERAL', 'answers': [self.answer_schema]}
# When
with patch('app.templating.summary.question.get_question_title', return_value=question_title):
question = Question(question_schema, self.answer_store, self.metadata, self.schema, 0)
# Then
self.assertEqual(question.id, 'question_id-0')
self.assertEqual(question.title, question_title)
self.assertEqual(len(question.answers), 1)
def test_create_question_with_no_answers(self):
# Given
question_title = 'question_title'
question_schema = {'id': 'question_id', 'title': question_title, 'type': 'GENERAL', 'answers': []}
# When
with patch('app.templating.summary.question.get_question_title', return_value=question_title):
question = Question(question_schema, self.answer_store, self.metadata, self.schema, 0)
# Then
self.assertEqual(question.id, 'question_id-0')
self.assertEqual(question.title, question_title)
self.assertEqual(len(question.answers), 0)
def test_create_question_with_conditional_title(self):
# Given
self.answer_store.add_or_update(Answer(
answer_id='answer_1',
value='Han',
))
title_when = [{
'id': 'answer_1',
'condition': 'equals',
'value': 'Han'
}]
question_schema = {'id': 'question_id', 'titles': [{'value': 'conditional_title', 'when': title_when},
{'value': 'question_title'}], 'type': 'GENERAL', 'answers': [self.answer_schema]}
# When
with patch('app.templating.utils.evaluate_when_rules', side_effect=[True, False]) as evaluate_when_rules:
question = Question(question_schema, self.answer_store, self.metadata, self.schema, 0)
# Then
evaluate_when_rules.assert_called_once_with(title_when, self.schema, self.metadata, self.answer_store, 0, group_instance_id=None)
self.assertEqual(question.id, 'question_id-0')
self.assertEqual(question.title, 'conditional_title')
self.assertEqual(len(question.answers), 1)
def test_create_question_with_answer_label_when_empty_title(self):
# Given
answer_schema = {'type': 'Number', 'id': 'age-answer', 'mandatory': True, 'label': 'Age'}
question_schema = {'id': 'question_id', 'title': '', 'type': 'GENERAL', 'answers': [answer_schema]}
# When
with patch('app.templating.summary.question.get_question_title', return_value=False):
question = Question(question_schema, self.answer_store, self.metadata, self.schema, 0)
# Then
self.assertEqual(question.title, 'Age')
self.assertEqual(len(question.answers), 1)
def test_create_question_with_multiple_answers(self):
# Given
self.answer_store.add_or_update(Answer(
answer_id='answer_1',
value='Han',
))
self.answer_store.add_or_update(Answer(
answer_id='answer_2',
value='Solo',
))
first_answer_schema = {
'id': 'answer_1',
'label': 'First name',
'type': 'text'
}
second_answer_schema = {
'id': 'answer_2',
'label': 'Surname',
'type': 'text'
}
question_schema = {'id': 'question_id', 'title': 'question_title', 'type': 'GENERAL',
'answers': [first_answer_schema, second_answer_schema]}
# When
with patch('app.templating.summary.question.get_question_title', return_value=False):
question = Question(question_schema, self.answer_store, self.metadata, self.schema, 0)
# Then
self.assertEqual(len(question.answers), 2)
self.assertEqual(question.answers[0]['value'], 'Han')
self.assertEqual(question.answers[1]['value'], 'Solo')
def test_create_question_with_relationship_answers(self):
with self.app_request_context():
schema = load_schema_from_params('test', 'routing_on_answer_from_driving_repeating_group')
answers = [
{'group_instance': 0, 'group_instance_id': 'aaa', 'answer_id': 'primary-name', 'answer_instance': 0, 'value': 'Aaa'},
{'group_instance': 0, 'group_instance_id': 'bbb', 'answer_id': 'repeating-name', 'answer_instance': 0, 'value': 'Bbb'},
{'group_instance': 1, 'group_instance_id': 'ccc', 'answer_id': 'repeating-name', 'answer_instance': 0, 'value': 'Ccc'},
{'group_instance': 0, 'group_instance_id': 'aaa', 'answer_id': 'who-is-related', 'answer_instance': 0, 'value': 'Husband or wife'},
{'group_instance': 0, 'group_instance_id': 'aaa', 'answer_id': 'who-is-related', 'answer_instance': 1, 'value': 'Mother or father'},
{'group_instance': 1, 'group_instance_id': 'bbb', 'answer_id': 'who-is-related', 'answer_instance': 0, 'value': 'Relation - other'},
]
answer_store = AnswerStore(answers)
question_schema = {
'answers': [{
'label': '%(current_person)s is the .... of %(other_person)s',
'id': 'who-is-related',
'options': [
{'label': 'Husband or wife', 'value': 'Husband or wife'},
{'label': 'Mother or father', 'value': 'Mother or father'},
{'label': 'Relation - other', 'value': 'Relation - other'},
],
'type': 'Relationship',
'parent_id': 'relationship-question'
}],
'id': 'relationship-question',
'title': 'Describe how this person is related to the others',
'description': 'If members are not related, select the ‘unrelated’ option, including foster parents and foster children.',
'member_label': "answers['primary-name'] | default(answers['repeating-name'])",
'type': 'Relationship',
'parent_id': 'relationships'
}
question = Question(question_schema, answer_store, self.metadata, schema, 0)
self.assertEqual(len(question.answers), 2)
self.assertEqual(question.answers[0]['value'], 'Husband or wife')
self.assertEqual(question.answers[1]['value'], 'Mother or father')
question = Question(question_schema, answer_store, self.metadata, schema, 1)
self.assertEqual(len(question.answers), 1)
self.assertEqual(question.answers[0]['value'], 'Relation - other')
def test_merge_date_range_answers(self):
# Given
self.answer_store.add_or_update(Answer(
answer_id='answer_1',
value='13/02/2016',
))
self.answer_store.add_or_update(Answer(
answer_id='answer_2',
value='13/09/2016',
))
first_date_answer_schema = {'id': 'answer_1', 'label': 'From', 'type': 'date'}
second_date_answer_schema = {'id': 'answer_2', 'label': 'To', 'type': 'date'}
question_schema = {'id': 'question_id', 'title': 'question_title', 'type': 'DateRange',
'answers': [first_date_answer_schema, second_date_answer_schema]}
# When
with patch('app.templating.summary.question.get_question_title', return_value=False):
question = Question(question_schema, self.answer_store, self.metadata, self.schema, 0)
# Then
self.assertEqual(len(question.answers), 1)
self.assertEqual(question.answers[0]['value']['from'], '13/02/2016')
self.assertEqual(question.answers[0]['value']['to'], '13/09/2016', '%d/%m/%Y')
def test_merge_multiple_date_range_answers(self):
# Given
self.answer_store.add_or_update(Answer(
answer_id='answer_1',
value='13/02/2016',
))
self.answer_store.add_or_update(Answer(
answer_id='answer_2',
value='13/09/2016',
))
self.answer_store.add_or_update(Answer(
answer_id='answer_3',
value='13/03/2016',
))
self.answer_store.add_or_update(Answer(
answer_id='answer_4',
value='13/10/2016',
))
first_date_answer_schema = {'id': 'answer_1', 'label': 'From', 'type': 'date'}
second_date_answer_schema = {'id': 'answer_2', 'label': 'To', 'type': 'date'}
third_date_answer_schema = {'id': 'answer_3', 'label': 'First period', 'type': 'date'}
fourth_date_answer_schema = {'id': 'answer_4', 'label': 'Second period', 'type': 'date'}
question_schema = {'id': 'question_id', 'title': 'question_title', 'type': 'DateRange', 'answers':
[first_date_answer_schema, second_date_answer_schema, third_date_answer_schema, fourth_date_answer_schema]}
# When
question = Question(question_schema, self.answer_store, self.metadata, self.schema, 0)
# Then
self.assertEqual(len(question.answers), 2)
self.assertEqual(question.answers[0]['value']['from'], '13/02/2016')
self.assertEqual(question.answers[0]['value']['to'], '13/09/2016', '%d/%m/%Y')
self.assertEqual(question.answers[1]['value']['from'], '13/03/2016', '%d/%m/%Y')
self.assertEqual(question.answers[1]['value']['to'], '13/10/2016', '%d/%m/%Y')
def test_checkbox_button_options(self):
# Given
self.answer_store.add_or_update(Answer(
answer_id='answer_1',
value=['Light Side', 'Dark Side'],
))
options = [{
'label': 'Light Side label',
'value': 'Light Side',
}, {
'label': 'Dark Side label',
'value': 'Dark Side',
}]
answer_schema = {'id': 'answer_1', 'label': 'Which side?', 'type': 'Checkbox', 'options': options}
question_schema = {'id': 'question_id', 'title': 'question_title', 'type': 'GENERAL', 'answers': [answer_schema]}
# When
with patch('app.templating.summary.question.get_question_title', return_value=False):
question = Question(question_schema, self.answer_store, self.metadata, self.schema, 0)
# Then
self.assertEqual(len(question.answers[0]['value']), 2)
self.assertEqual(question.answers[0]['value'][0].label, 'Light Side label')
self.assertEqual(question.answers[0]['value'][1].label, 'Dark Side label')
def test_checkbox_button_detail_answer_empty(self):
# Given
self.answer_store.add_or_update(Answer(
answer_id='answer_1',
value=['other', ''],
))
options = [{
'label': 'Light Side',
'value': 'Light Side',
}, {
'label': 'Other option label',
'value': 'other',
'other': {
'label': 'Please specify other'
}
}]
answer_schema = {'id': 'answer_1', 'label': 'Which side?', 'type': 'Checkbox', 'options': options}
question_schema = {'id': 'question_id', 'title': 'question_title', 'type': 'GENERAL', 'answers': [answer_schema]}
# When
with patch('app.templating.summary.question.get_question_title', return_value=False):
question = Question(question_schema, self.answer_store, self.metadata, self.schema, 0)
# Then
self.assertEqual(len(question.answers[0]['value']), 1)
self.assertEqual(question.answers[0]['value'][0].label, 'Other option label')
self.assertEqual(question.answers[0]['value'][0].detail_answer_value, None)
def test_checkbox_answer_with_detail_answer_returns_the_value(self):
# Given
self.answer_store.add_or_update(Answer(
answer_id='answer_1',
value=['Light Side', 'Other'],
))
self.answer_store.add_or_update(Answer(
answer_id='child_answer',
value='Test',
))
options = [{
'label': 'Light Side',
'value': 'Light Side',
}, {
'label': 'Other',
'value': 'Other',
'detail_answer': {
'id': 'child_answer',
'type': 'TextField'
}
}]
answer_schema = [{
'id': 'answer_1',
'label': 'Which side?',
'type': 'Checkbox',
'options': options
}]
question_schema = {'id': 'question_id', 'title': 'question_title', 'type': 'GENERAL',
'answers': answer_schema}
# When
with patch('app.templating.summary.question.get_question_title', return_value=False):
question = Question(question_schema, self.answer_store, self.metadata, self.schema, 0)
# Then
self.assertEqual(len(question.answers[0]['value']), 2)
self.assertEqual(question.answers[0]['value'][1].detail_answer_value, 'Test')
def test_checkbox_button_other_option_text(self):
# Given
self.answer_store.add_or_update(Answer(
answer_id='answer_1',
value=['Light Side', 'other'],
))
self.answer_store.add_or_update(Answer(
answer_id='child_answer',
value='Neither',
))
options = [{
'label': 'Light Side',
'value': 'Light Side',
}, {
'label': 'other',
'value': 'other',
'detail_answer': {'id': 'child_answer'}
}]
answer_schema = {'id': 'answer_1', 'label': 'Which side?', 'type': 'Checkbox', 'options': options}
question_schema = {'id': 'question_id', 'title': 'question_title', 'type': 'GENERAL', 'answers': [answer_schema]}
# When
with patch('app.templating.summary.question.get_question_title', return_value=False):
question = Question(question_schema, self.answer_store, self.metadata, self.schema, 0)
# Then
self.assertEqual(len(question.answers[0]['value']), 2)
self.assertEqual(question.answers[0]['value'][0].label, 'Light Side')
self.assertEqual(question.answers[0]['value'][1].detail_answer_value, 'Neither')
def test_checkbox_button_none_selected_should_be_none(self):
# Given
self.answer_store.add_or_update(Answer(
answer_id='answer_1',
value=[],
))
options = [{
'label': 'Light Side',
'value': 'Light Side',
}]
answer_schema = {'id': 'answer_1', 'label': 'Which side?', 'type': 'Checkbox', 'options': options}
question_schema = {'id': 'question_id', 'title': 'question_title', 'type': 'GENERAL', 'answers': [answer_schema]}
# When
with patch('app.templating.summary.question.get_question_title', return_value=False):
question = Question(question_schema, self.answer_store, self.metadata, self.schema, 0)
# Then
self.assertEqual(question.answers[0]['value'], None)
def test_radio_button_none_selected_should_be_none(self):
# Given
options = [{
'label': 'Light Side',
'value': 'Light Side',
}]
answer_schema = {'id': 'answer_1', 'label': 'Which side?', 'type': 'Radio', 'options': options, 'group_instance': 0}
question_schema = {'id': 'question_id', 'title': 'question_title', 'type': 'GENERAL', 'answers': [answer_schema]}
# When
with patch('app.templating.summary.question.get_question_title', return_value=False):
question = Question(question_schema, self.answer_store, self.metadata, self.schema, 0)
# Then
self.assertEqual(question.answers[0]['value'], None)
def test_radio_answer_with_detail_answer_returns_the_value(self):
# Given
self.answer_store.add_or_update(Answer(
answer_id='answer_1',
value='Other',
))
self.answer_store.add_or_update(Answer(
answer_id='child_answer',
value='Test',
))
options = [{
'label': 'Other',
'value': 'Other',
'detail_answer': {
'id': 'child_answer',
'type': 'TextField'
}
}]
answer_schema = [{
'id': 'answer_1',
'label': 'Which side?',
'type': 'Radio',
'options': options
}]
question_schema = {'id': 'question_id', 'title': 'question_title', 'type': 'GENERAL',
'answers': answer_schema}
# When
with patch('app.templating.summary.question.get_question_title', return_value=False):
question = Question(question_schema, self.answer_store, self.metadata, self.schema, 0)
# Then
self.assertEqual(question.answers[0]['value']['detail_answer_value'], 'Test')
def test_build_answers_repeating_answers(self):
# Given
self.answer_store.add_or_update(Answer(
answer_id='answer',
value='Value',
))
self.answer_store.add_or_update(Answer(
answer_id='answer',
value='Value 2',
group_instance=1,
group_instance_id='group-1',
))
self.answer_store.add_or_update(Answer(
answer_id='answer',
value='Value 3',
group_instance=2,
group_instance_id='group-2',
))
answer_schema = {'id': 'answer', 'title': '', 'type': '', 'label': ''}
question_schema = {'id': 'question_id', 'title': 'question_title', 'type': 'RepeatingAnswer',
'answers': [answer_schema]}
# When
with patch('app.templating.summary.question.get_question_title', return_value=False):
question = Question(question_schema, self.answer_store, self.metadata, self.schema, 0)
# Then
self.assertEqual(len(question.answers), 1)
self.assertEqual(question.answers[0]['value'], 'Value')
question = Question(question_schema, self.answer_store, self.metadata, self.schema, 1)
# Then
self.assertEqual(len(question.answers), 1)
self.assertEqual(question.answers[0]['value'], 'Value 2')
question = Question(question_schema, self.answer_store, self.metadata, self.schema, 2)
# Then
self.assertEqual(len(question.answers), 1)
self.assertEqual(question.answers[0]['value'], 'Value 3')
def test_dropdown_none_selected_should_be_none(self):
# Given
options = [{
'label': 'Light Side',
'value': 'Light Side',
}]
answer_schema = {'id': 'answer_1', 'label': 'Which side?', 'type': 'Dropdown', 'options': options}
question_schema = {'id': 'question_id', 'title': 'question_title', 'type': 'GENERAL', 'answers': [answer_schema]}
# When
with patch('app.templating.summary.question.get_question_title', return_value=False):
question = Question(question_schema, self.answer_store, self.metadata, self.schema, 0)
# Then
self.assertEqual(question.answers[0]['value'], None)
def test_dropdown_selected_option_label(self):
# Given
options = [{
'label': 'Light Side label',
'value': 'Light Side',
}, {
'label': 'Dark Side label',
'value': 'Dark Side',
}]
answer_schema = {'id': 'answer_1', 'label': 'Which side?', 'type': 'Dropdown', 'options': options}
question_schema = {'id': 'question_id', 'title': 'question_title', 'type': 'GENERAL', 'answers': [answer_schema]}
self.answer_store.add_or_update(Answer(
answer_id='answer_1',
value='Dark Side',
))
# When
with patch('app.templating.summary.question.get_question_title', return_value=False):
question = Question(question_schema, self.answer_store, self.metadata, self.schema, 0)
# Then
self.assertEqual(question.answers[0]['value'], 'Dark Side label')
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
try:
from lxml import etree
except ImportError:
etree = None
from keystoneauth1 import exceptions
from keystoneauth1.identity import v3
class _Saml2TokenAuthMethod(v3.AuthMethod):
_method_parameters = []
def get_auth_data(self, session, auth, headers, **kwargs):
raise exceptions.MethodNotImplemented('This method should never '
'be called')
class BaseSAMLPlugin(v3.FederationBaseAuth):
HTTP_MOVED_TEMPORARILY = 302
HTTP_SEE_OTHER = 303
_auth_method_class = _Saml2TokenAuthMethod
def __init__(self, auth_url,
identity_provider, identity_provider_url,
username, password, protocol,
**kwargs):
"""Class constructor accepting following parameters.
:param auth_url: URL of the Identity Service
:type auth_url: string
:param identity_provider: Name of the Identity Provider the client
will authenticate against. This parameter
will be used to build a dynamic URL used to
obtain unscoped OpenStack token.
:type identity_provider: string
:param identity_provider_url: An Identity Provider URL, where the
SAML2 auhentication request will be
sent.
:type identity_provider_url: string
:param username: User's login
:type username: string
:param password: User's password
:type password: string
:param protocol: Protocol to be used for the authentication.
The name must be equal to one configured at the
keystone sp side. This value is used for building
dynamic authentication URL.
Typical value would be: saml2
:type protocol: string
"""
super(BaseSAMLPlugin, self).__init__(
auth_url=auth_url, identity_provider=identity_provider,
protocol=protocol,
**kwargs)
self.identity_provider_url = identity_provider_url
self.username = username
self.password = password
@staticmethod
def _first(_list):
if len(_list) != 1:
raise IndexError('Only single element list is acceptable')
return _list[0]
@staticmethod
def str_to_xml(content, msg=None, include_exc=True):
try:
return etree.XML(content)
except etree.XMLSyntaxError as e:
if not msg:
msg = str(e)
else:
msg = msg % e if include_exc else msg
raise exceptions.AuthorizationFailure(msg)
@staticmethod
def xml_to_str(content, **kwargs):
return etree.tostring(content, **kwargs)
|
#!/usr/bin/env python
import unittest
import numpy as np
import pandas as pd
from __init__ import *
# testing __init__ functions
class LambTest(unittest.TestCase):
"""Test lambdata_joshdsolis functions"""
# def test_checknulls(self):
# df = pd.DataFrame(np.ones(100))
# self.assertEqual(check_nulls(df).tolist(), df.isna().sum()).tolist()
# Testing more_rows functions in init
def test_morerows(self):
df = pd.DataFrame(np.ones(100))
more_rows(df, 100)
self.assertEqual(df.shape, (200, 1))
if __name__ == '__main__':
unittest.main()
|
"""1359. Count All Valid Pickup and Delivery Options
https://leetcode.com/problems/count-all-valid-pickup-and-delivery-options/
"""
class Solution:
def countOrders(self, n: int) -> int:
if n == 1:
return 1
mode = int(1e9 + 7)
ans = 1
for i in range(2, n + 1):
ans = ans * (2 * i * i - i) % mode
return ans
|
from .main import main
from .text_formatter import skim
|
"""
.. currentmodule:: neet.boolean
.. testsetup:: sensitivity
from neet.boolean.examples import c_elegans, s_pombe
"""
import copy
import numpy as np
import numpy.linalg as linalg
import math
import itertools as itt
class SensitivityMixin(object):
"""
SensitivityMixin provides methods for sensitivity analysis. That is,
methods to quantify the degree to which perturbations of a network's state
propagate and spread. As part of this, we also provide methods for
identifying "canalizing edges": edges for which a state of the source node
uniquely determines the state of the target regardless of other sources.
.. autosummary::
:nosignatures:
sensitivity
average_sensitivity
lambdaQ
difference_matrix
average_difference_matrix
is_canalizing
canalizing_edges
canalizing_nodes
The :class:`neet.boolean.BooleanNetwork` class derives from
SensitivityMixin to provide sensitivity analysis to all of Neet's Boolean
network models.
"""
def sensitivity(self, state, transitions=None):
"""
Compute the Boolean sensitivity at a given network state.
The sensitivity of a Boolean function :math:`f` on state vector
:math:`x` is the number of Hamming neighbors of :math:`x` on which the
function value is different than on :math:`x`, as defined in
[Shmulevich2004]_.
This method calculates the average sensitivity over all :math:`N`
boolean functions, where :math:`N` is the number of nodes in the
network.
.. rubric:: Examples
.. doctest:: sensitivity
>>> s_pombe.sensitivity([0, 0, 0, 0, 0, 1, 1, 0, 0])
1.0
>>> s_pombe.sensitivity([0, 1, 1, 0, 1, 0, 0, 1, 0])
0.4444444444444444
>>> c_elegans.sensitivity([0, 0, 0, 0, 0, 0, 0, 0])
1.75
>>> c_elegans.sensitivity([1, 1, 1, 1, 1, 1, 1, 1])
1.25
Optionally, the user can provide a pre-computed array of state
transitions to improve performance when this function is repeatedly
called.
.. doctest:: sensitivity
>>> trans = list(map(s_pombe.decode, s_pombe.transitions))
>>> s_pombe.sensitivity([0, 0, 0, 0, 0, 1, 1, 0, 0], transitions=trans)
1.0
>>> s_pombe.sensitivity([0, 1, 1, 0, 1, 0, 0, 1, 0], transitions=trans)
0.4444444444444444
:param state: a single network state
:type state: list, numpy.ndarray
:param transitions: precomputed state transitions (*optional*)
:type transitions: list, numpy.ndarray, None
:return: the sensitivity at the provided state
.. seealso:: :func:`average_sensitivity`
"""
encoder = self._unsafe_encode
distance = self.distance
neighbors = self.hamming_neighbors(state)
#neighbors_copy = [neighbor.copy() for neighbor in neighbors]
nextState = self.update(state)
# count sum of differences found in neighbors of the original
s = 0.
#debugging_index = 0
for neighbor in neighbors:
if transitions is not None:
newState = transitions[encoder(neighbor)]
else:
newState = self._unsafe_update(neighbor)
s += distance(newState, nextState)
"""print("testing whether the hamming neighbors are correct")
print("1. neighbor: ", neighbors_copy[debugging_index])
print("2. state: ", state,"\n")
print("1. newState: ", newState)
print("2. nextState: ", nextState,"\n\n")
debugging_index += 1"""
#s += distance(nextState, nextState)#DEBUGGING CODE! DO NOT LEAVE IN
# DO NOT LEAVE THIS LINE UNCOMMENTED WHILE THE ABOVE LINE IS COMMENTED
#print("testing if the distance between the same input is anything other than 0")
return s / self.size
def difference_matrix(self, state, transitions=None):
"""
Compute the difference matrix at a given state.
For a network with :math:`N` nodes, with Boolean functions :math:`f_i`,
the difference matrix is a :math:`N \\times N` matrix
.. math::
A_{ij} = f_i(x) \\oplus f_i(x \\oplus e_j)
where :math:`e_j` is the network state with the :math:`j`-th node in
the :math:`1` state while all others are :math:`0`. In other words, the
element :math:`A_{ij}` signifies whether or not flipping the
:math:`j`-th node's state changes the subsequent state of the
:math:`i`-th node.
.. rubric:: Examples
.. doctest:: sensitivity
>>> s_pombe.difference_matrix([0, 0, 0, 0, 0, 0, 0, 0, 0])
array([[0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 1., 1., 0., 0., 0., 0.],
[0., 0., 1., 0., 0., 0., 0., 0., 1.],
[0., 0., 0., 1., 0., 0., 0., 0., 1.],
[0., 0., 0., 0., 0., 1., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 1., 0.],
[0., 0., 0., 0., 0., 0., 1., 0., 1.],
[0., 1., 0., 0., 0., 0., 0., 1., 0.],
[0., 0., 0., 0., 1., 0., 0., 0., 0.]])
>>> c_elegans.difference_matrix([0, 0, 0, 0, 0, 0, 0, 0])
array([[1., 0., 0., 0., 0., 0., 0., 1.],
[0., 0., 1., 1., 0., 0., 0., 0.],
[0., 0., 1., 0., 1., 0., 0., 0.],
[0., 0., 1., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 1., 1., 0., 1.],
[0., 0., 0., 0., 0., 1., 1., 0.],
[1., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 1.]])
:param state: the starting state
:type state: list, numpy.ndarray
:param transitions: precomputed state transitions (*optional*)
:type transitions: list, numpy.ndarray, None
:return: the difference matrix
.. seealso:: :func:`average_difference_matrix`
"""
# set up empty matrix
N = len(state)
Q = np.empty((N, N))
# list Hamming neighbors (in order!)
encoder = self._unsafe_encode
neighbors = self.hamming_neighbors(state)
nextState = self.update(state)
# count differences found in neighbors of the original
for j, neighbor in enumerate(neighbors):
if transitions is not None:
newState = transitions[encoder(neighbor)]
else:
newState = self._unsafe_update(neighbor)
Q[:, j] = [(nextState[i] + newState[i]) % 2 for i in range(N)]
return Q
def average_difference_matrix(self, states=None, weights=None, calc_trans=True):
"""
Compute the difference matrix, averaged over some states.
.. rubric:: Examples
.. doctest:: sensitivity
>>> s_pombe.average_difference_matrix()
array([[0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. ],
[0. , 0. , 0.25 , 0.25 , 0.25 , 0. , 0. , 0. ,
0. ],
[0.25 , 0.25 , 0.25 , 0. , 0. , 0.25 , 0. , 0. ,
0.25 ],
[0.25 , 0.25 , 0. , 0.25 , 0. , 0.25 , 0. , 0. ,
0.25 ],
[0. , 0. , 0. , 0. , 0. , 1. , 0. , 0. ,
0. ],
[0. , 0. , 0.0625, 0.0625, 0.0625, 0. , 0.0625, 0.0625,
0. ],
[0. , 0.5 , 0. , 0. , 0. , 0. , 0.5 , 0. ,
0.5 ],
[0. , 0.5 , 0. , 0. , 0. , 0. , 0. , 0.5 ,
0.5 ],
[0. , 0. , 0. , 0. , 1. , 0. , 0. , 0. ,
0. ]])
>>> c_elegans.average_difference_matrix()
array([[0.25 , 0.25 , 0. , 0. , 0. , 0.25 , 0.25 , 0.25 ],
[0. , 0. , 0.5 , 0.5 , 0. , 0. , 0. , 0. ],
[0.5 , 0. , 0.5 , 0. , 0.5 , 0. , 0. , 0. ],
[0. , 0. , 1. , 0. , 0. , 0. , 0. , 0. ],
[0. , 0.3125, 0.3125, 0.3125, 0.3125, 0.3125, 0. , 0.3125],
[0.5 , 0. , 0. , 0. , 0. , 0.5 , 0.5 , 0. ],
[1. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[0. , 0. , 0. , 0. , 0. , 0. , 0.5 , 0.5 ]])
:param states: the states to average over; all states if ``None``
:type states: list, numpy.ndarray, None
:param weights: weights for a weighted average over ``states``; uniform
weighting if ``None``
:type weights: list, numpy.ndarray, None
:param calc_trans: pre-compute all state transitions; ignored if
``states`` or ``weights`` is ``None``
:type calc_trans: bool
:return: the difference matrix as a :meth:`numpy.ndarray`.
.. seealso:: :func:`difference_matrix`
"""
N = self.size
Q = np.zeros((N, N))
if (states is not None) or (weights is not None):
# explicitly calculate difference matrix for each state
# optionally pre-calculate transitions
if calc_trans:
decoder = self.decode
trans = list(map(decoder, self.transitions))
else:
trans = None
# currently changes state generators to lists.
# is there a way to avoid this?
if states is None:
states = list(self)
else:
states = list(states)
if weights is None:
weights = np.ones(len(states))
else:
weights = list(weights)
if np.shape(weights) != (len(states),):
msg = "Weights must be a 1D array with length same as states"
raise(ValueError(msg))
norm = np.sum(weights)
for i, state in enumerate(states):
Q += weights[i] * self.difference_matrix(state, trans) / norm
else: # make use of sparse connectivity to be more efficient
state0 = np.zeros(N, dtype=int)
subspace = self.subspace
for i in range(N):
nodesInfluencingI = list(self.neighbors_in(i))
for jindex, j in enumerate(nodesInfluencingI):
# for each state of other nodes, does j matter?
otherNodes = copy.copy(nodesInfluencingI)
otherNodes.pop(jindex)
otherNodeStates = list(subspace(otherNodes, state0))
for state in otherNodeStates:
iState = state[i]
state[j] = 0
jOffNext = self._unsafe_update(state, index=i)[i]
state[i] = iState
state[j] = 1
jOnNext = self._unsafe_update(state, index=i)[i]
# are the results different?
Q[i, j] += (jOffNext + jOnNext) % 2
Q[i, j] /= float(len(otherNodeStates))
return Q
def is_canalizing(self, x, y):
"""
Determine whether a given network edge is canalizing.
An edge :math:`(y,x)` is canalyzing if :math:`x`'s value at :math:`t+1`
is fully determined when :math:`y`'s value has a particular value at
:math:`t`, regardless of the values of other nodes.
According to (Stauffer 1987):
::
"A rule [...] is called forcing, or canalizing, if at least one of
its :math:`K` arguments has the property that the result of the
function is already fixed if this argument has one particular
value, regardless of the values for the :math:`K-1` other
arguments." Note that this is a definition for whether a node's
rule is canalizing, whereas this function calculates whether a
specific edge is canalizing. Under this definition, if a node has
any incoming canalizing edges, then its rule is canalizing.
.. rubric:: Examples
.. doctest:: sensitivity
>>> s_pombe.is_canalizing(1, 2)
True
>>> s_pombe.is_canalizing(2, 1)
False
>>> c_elegans.is_canalizing(7, 7)
True
>>> c_elegans.is_canalizing(1, 3)
True
>>> c_elegans.is_canalizing(4, 3)
False
:param x: target node's index
:type x: int
:param y: source node's index
:type y: int
:return: whether or not the edge ``(y,x)`` is canalizing; ``None`` if
the edge does not exist
.. seealso:: :func:`canalizing_edges`, :func:`canalizing_nodes`
"""
nodesInfluencingI = list(self.neighbors_in(x))
if (y not in nodesInfluencingI) or (x not in range(self.size)):
# can't be canalizing if j has no influence on i
return None # or False?
else:
jindex = nodesInfluencingI.index(y)
subspace = self.subspace
# for every state of other nodes, does j determine i?
otherNodes = list(copy.copy(nodesInfluencingI))
otherNodes.pop(jindex)
otherNodeStates = list(subspace(otherNodes, np.zeros(self.size, dtype=int)))
jOnForced, jOffForced = True, True
jOnForcedValue, jOffForcedValue = None, None
stateindex = 0
while (jOnForced or jOffForced) and stateindex < len(otherNodeStates):
state = otherNodeStates[stateindex]
# first hold j off
if jOffForced:
jOff = copy.copy(state)
jOff[y] = 0
jOffNext = self._unsafe_update(jOff, index=x)[x]
if jOffForcedValue is None:
jOffForcedValue = jOffNext
elif jOffForcedValue != jOffNext:
# then holding j off does not force i
jOffForced = False
# now hold j on
if jOnForced:
jOn = copy.copy(state)
jOn[y] = 1
jOnNext = self._unsafe_update(jOn, index=x)[x]
if jOnForcedValue is None:
jOnForcedValue = jOnNext
elif jOnForcedValue != jOnNext:
# then holding j on does not force i
jOnForced = False
stateindex += 1
# if we have checked all states, then the edge must be forcing
# print "jOnForced,jOffForced",jOnForced,jOffForced
return jOnForced or jOffForced
def canalizing_edges(self):
"""
Get the set of all canalizing edges in the network.
.. rubric:: Examples
.. doctest:: sensitivity
>>> s_pombe.canalizing_edges()
{(1, 2), (5, 4), (0, 0), (1, 3), (4, 5), (5, 6), (5, 7), (1, 4), (8, 4), (5, 2), (5, 3)}
>>> c_elegans.canalizing_edges()
{(1, 2), (3, 2), (1, 3), (7, 6), (6, 0), (7, 7)}
:return: the set of canalizing edges as in the form ``(target, source)``
.. seealso:: :func:`is_canalizing`, :func:`canalizing_nodes`
"""
canalizing_edges = set()
for x in range(self.size):
for y in self.neighbors_in(x):
if self.is_canalizing(x, y):
canalizing_edges.add((x, y))
return canalizing_edges
def canalizing_nodes(self):
"""
Get a set of all nodes with at least one incoming canalizing edge.
.. rubric:: Examples
.. doctest:: sensitivity
>>> s_pombe.canalizing_nodes()
{0, 1, 4, 5, 8}
>>> c_elegans.canalizing_nodes()
{1, 3, 6, 7}
:return: the set indices of nodes with at least one canalizing input edge
.. seealso:: :func:`is_canalizing`, :func:`canalizing_edges`
"""
nodes = [e[0] for e in self.canalizing_edges()]
return set(np.unique(nodes))
def lambdaQ(self, **kwargs):
"""
Compute the sensitivity eigenvalue, :math:`\\lambda_Q`. That is, the
largest eigenvalue of the sensitivity matrix
:func:`average_difference_matrix`.
This is analogous to the eigenvalue calculated in [Pomerance2009]_.
.. rubric:: Examples
.. doctest:: sensitivity
>>> s_pombe.lambdaQ()
0.8265021276831896
>>> c_elegans.lambdaQ()
1.263099227661824
:return: the sensitivity eigenvalue (:math:`\\lambda_Q`) of ``net``
.. seealso:: :func:`average_difference_matrix`
"""
Q = self.average_difference_matrix(**kwargs)
return max(abs(linalg.eigvals(Q)))
def average_sensitivity(self, states=None, weights=None, calc_trans=True):
"""
Calculate average Boolean network sensitivity, as defined in
[Shmulevich2004]_.
The sensitivity of a Boolean function :math:`f` on state vector :math:`x`
is the number of Hamming neighbors of :math:`x` on which the function
value is different than on :math:`x`.
The average sensitivity is an average taken over initial states.
.. rubric:: Examples
.. doctest:: sensitivity
>>> c_elegans.average_sensitivity()
1.265625
>>> c_elegans.average_sensitivity(states=[[0, 0, 0, 0, 0, 0, 0, 0],
... [1, 1, 1, 1, 1, 1, 1, 1]])
...
1.5
>>> c_elegans.average_sensitivity(states=[[0, 0, 0, 0, 0, 0, 0, 0],
... [1, 1, 1, 1, 1, 1, 1, 1]], weights=[0.9, 0.1])
...
1.7
>>> c_elegans.average_sensitivity(states=[[0, 0, 0, 0, 0, 0, 0, 0],
... [1, 1, 1, 1, 1, 1, 1, 1]], weights=[9, 1])
...
1.7
:param states: The states to average over; all states if ``None``
:type states: list, numpy.ndarray, None
:param weights: weights for a weighted average over ``states``; all
:math:`1`s if ``None``.
:type weights: list, numpy.ndarray, None
:param calc_trans: pre-compute all state transitions; ignored if
``states`` or ``weights`` is ``None``.
:return: the average sensitivity of ``net``
.. seealso:: :func:`sensitivity`
"""
Q = self.average_difference_matrix(states=states, weights=weights,
calc_trans=calc_trans)
return np.sum(Q) / self.size
def C_sensitivity_at_x(self, state, transitions=None, c=1):
"""C-Sensitivity modification of the regular sensitivity function. I deleted the
doctest code because it was cluttering my screen"""
"""The c-sensitivity of f(x1, . . ., xn) at x is defined as the number of
c-Hamming neighbors of x on which the function value is different from its value on x. That is,"""
#print("\n\nC-sensitivity for f(x) at x = ", state)
encoder = self._unsafe_encode
distance = self.distance
#neighbors = self.hamming_neighbors(state)
state_copy = copy.copy(state)
nextState = self.update(state)
"""
Returns an iterator for each vector I which is a strict subset of {1,...,n} and where |I| = c
note: if c = 0, this will return an empty tuple
"""
#I_comb_iter = itt.combinations(range(self.size), c)
#print(list(I_comb_iter))
I_comb_iter = itt.combinations(range(self.size), c)
"""
Generator function which returns a new hamming neighbor
Each hamming neighbor is simply the product of self.state XOR I
"""
def c_hamming_neighbors(self, state, c):
#print("c: ",c)
#first_bitmask = [1] * c + [0] * (self.length - c)
#c_bitmask_iterator = itt.permutations(first_bitmask, self.length)
try:
nxt = next(I_comb_iter)
XORed = copy.copy(state_copy)
for i in nxt:
#if i is None:
#print("i is none")
#else:
#print("i: ",i)
#print("nxt: ",nxt)
#XORed[i] ^= 1
XORed[i] ^= 1
#print("XORed:",XORed)
return XORed
except StopIteration:
return None
#yield XORed
#print("XORed:",XORed)
#yield XORed
"""
#OK, so I messed with the function and it's only ~kinda~ a generator function now...
Also a generator function. It's automatically advanced in the for loop, which
acts as a "try: next(neighbors); catch StopIteration:". This behavior is built
into Python and is idiomatic.
"""
#neighbors = c_hamming_neighbors(self, state, c)
# count sum of differences found in neighbors of the original
#c0 = 0
s = 0.
neighbors_copy = []
copy_counter = 0
neighbor = c_hamming_neighbors(self,state,c)
while neighbor is not None:
neighbors_copy.append(copy.copy(neighbor))
#print("neighbor: ",neighbor)
#if c == 0:
#print("c is zero, this shouldn't print! ", c)
if transitions is not None:
newState = transitions[encoder(neighbor)]
else:
newState = self._unsafe_update(neighbor)
# the paper which describes c-sensitivity uses an indicator function
# instead of a distance function. That is what will be used here
#s += distance(newState, nextState) #Do not use this
"""if c == 0:
#print("this shouldn't print")
if c0 == 0:
print("neighbors: ",list(neighbor))
print("state: ", state)
c0=1"""
if distance(newState, nextState) > 0:
#if not (list(newState) == list(nextState)):
if c == 0:
print("this also shouldn't print")
print("1. neighbor: ", neighbors_copy[copy_counter])
print("2. state: ", state_copy,"\n")
print("1. newState: ", newState)
print("2. nextState: ", nextState,"\n\n")
s += distance(newState, nextState)
copy_counter += 1
neighbor = c_hamming_neighbors(self,state,c)
#print("s / size", s / self.size)
return s / copy_counter# / self.size#/ math.pow(2, self.size)
def Average_c_sensitivity(self, states=None, calc_trans=True, c=1):
s = 0
if states is not None:
if calc_trans:
decoder = self.decode
trans = list(map(decoder, self.transitions))
else:
trans = None
for state in states:
s += self.C_sensitivity_at_x(state, trans, c)
s = s / np.power(2, len(states))
return s
else:
if calc_trans:
decoder = self.decode
trans = list(map(decoder, self.transitions))
else:
trans = None
for n in range(self.size):
state_gen = itt.combinations(range(self.size),n)
for state in state_gen:
#print("state:",state)#debugging
state_array = [0 for x in range(self.size)]
for index in state:
state_array[index] = 1
#print("state:",state_array)#debugging
s += self.C_sensitivity_at_x(state_array, trans, c)
#print("s / self.size", s / self.size)
#s2 = s / self.size
s = s / np.power(2, self.size)
""" s is now the average C-Sensitivity of f and must lie in the interval [0, (n choose c)]
where n is the size of the network."""
upper_bound = math.factorial(self.size) / (math.factorial(c) * math.factorial(n - c))
if s > upper_bound or s < 0:
raise ValueError('This value of S should not be possible and the code is therefore wrong')
print("s / upper_bound = normalized average c-sensitivity: ", s / upper_bound)
#print("s2 / upper_bound = normalized average c-sensitivity: ", s2 / upper_bound)
return s
#yield s / upper_bound # yields the normalized average c-sensitivity
|
# Generated by Django 2.2.5 on 2019-09-05 00:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('challenge', '0003_challenge_image'),
]
operations = [
migrations.AddField(
model_name='release',
name='version',
field=models.CharField(default='2019_01', max_length=31),
preserve_default=False,
),
]
|
#!/usr/bin/env python3
import retrieve_author_ppn as autppn
import retrieve_references as refs
import zot_helpers as pyzot
from itertools import islice
researchers = autppn.constructOutput('test.csv')
autppn.writeCsv('out.csv', researchers)
for researcher in researchers:
ppn = researcher['ppn']
creator_names = researcher['firstname']+" "+researcher['lastname']
collection_name = researcher['lastname'].lower()+"_"+researcher['ppn']
if ppn != "":
json_loaded = refs.getReferences(ppn)
biblio = refs.getRefsByRole(json_loaded, 'aut', creator_names)
total_items = len(biblio)
print(f"Pushing {total_items} items in Zotero bibliography : {collection_name}")
collection_id = pyzot.create_collection(collection_name)
# print(collection_id)
for i in range(0, total_items, 50):
start = i
if i+50 <= total_items:
end = i+50
else :
end = total_items
pyzot.create_items(collection_id, list(islice(biblio, start, end)))
|
"""
***************************************************************************
OshLanduse.py
---------------------
Date : Nov 2020
Copyright : (C) 2020 by Ong See Hai
Email : ongseehai at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Ong See Hai'
__date__ = 'Nov 2020'
__copyright__ = '(C) 2020, Ong See Hai'
from qgis.PyQt.QtCore import QCoreApplication, QVariant
from qgis.core import (QgsProcessing,
QgsProcessingAlgorithm,
QgsProcessingContext,
QgsProcessingException,
QgsProcessingParameterMapLayer,
QgsProcessingParameterFeatureSink,
QgsProcessingParameterVectorDestination,
QgsProcessingUtils,
QgsFeatureSink,
QgsFeature, QgsField, QgsFields,
QgsProject,
QgsExpressionContextUtils,
QgsCoordinateReferenceSystem
)
from qgis import processing
class Landuse(QgsProcessingAlgorithm):
INPUT = 'INPUT'
INPUT2 = 'INPUT2'
INPUT3 = 'INPUT3'
INPUT4 = 'INPUT4'
INPUT5 = 'INPUT5'
OUTPUT = 'OUTPUT'
def name(self):
return 'landuse'
def displayName(self):
return 'Landuse'
def createInstance(self):
return Landuse()
def group(self):
return 'IMP Tools'
def groupId(self):
return 'imp'
def shortHelpString(self):
return ( 'Create landuse polygons '
'\n'
'Road polygons are automatically identified and '
'a road landuse code is inserted into the field luc.'
'\n'
'The Landuse line and Site boundary line input map layers can be AutoCAD DXF files. '
'\n'
'After running this algorithm, the user can edit and insert other landuse codes with QGIS tools.'
'\n'
'If the landuse polygons do not form correctly, '
'snap and trim the intersecting lines from the '
'Road casing, Landuse line and Site boundary line map layers.'
)
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterMapLayer(
self.INPUT, 'INPUT: Road casing',
types=[QgsProcessing.TypeVectorLine],defaultValue='Casing'))
self.addParameter(QgsProcessingParameterMapLayer(
self.INPUT2, 'INPUT2: Landuse line',
types=[QgsProcessing.TypeVectorLine],defaultValue='Landuse_line'))
self.addParameter(QgsProcessingParameterMapLayer(
self.INPUT3, 'INPUT3: Site boundary line',
types=[QgsProcessing.TypeVectorLine],defaultValue='Site_boundary'))
self.addParameter(QgsProcessingParameterMapLayer(
self.INPUT4, 'INPUT4: Road node',
types=[QgsProcessing.TypeVectorPoint],defaultValue='Node'))
self.addParameter(QgsProcessingParameterMapLayer(
self.INPUT5, 'INPUT5: Road segment',
types=[QgsProcessing.TypeVectorLine],defaultValue='Segment'))
self.addParameter(QgsProcessingParameterFeatureSink(
self.OUTPUT, 'Landuse' ) )
def processAlgorithm(self, parameters, context, feedback):
caslay = self.parameterAsVectorLayer(parameters,
self.INPUT, context )
plolinlay = self.parameterAsVectorLayer(parameters,
self.INPUT2, context )
sitbdylay = self.parameterAsVectorLayer(parameters,
self.INPUT3, context )
nodlay = self.parameterAsVectorLayer(parameters,
self.INPUT4, context )
seglay = self.parameterAsVectorLayer(parameters,
self.INPUT5, context )
# Project variables
project = QgsProject.instance()
scope = QgsExpressionContextUtils.projectScope(project)
crs = scope.variable('project_crs')
try:
projcrs = QgsCoordinateReferenceSystem( crs )
except:
raise QgsProcessingException ('Project coordinate reference system not set')
# Merge plot and site boundary lines
layer = processing.run('native:mergevectorlayers',
{'CRS': projcrs,
'LAYERS': [ caslay, plolinlay, sitbdylay ],
'OUTPUT': 'TEMPORARY_OUTPUT' } ,
context=context, feedback=feedback, is_child_algorithm=True
) ['OUTPUT']
# Polygonize
layer = processing.run( "native:polygonize",
{'INPUT':layer,'KEEP_FIELDS':False,
'OUTPUT':'TEMPORARY_OUTPUT'} ,
context=context, feedback=feedback, is_child_algorithm=True
) ['OUTPUT']
layer = processing.run('native:refactorfields',
{'FIELDS_MAPPING': [
{'expression': '$id','length': 0,'name': 'plotid','precision': 0,'type': 4},
{'expression': '', 'length': 0,'name': 'luc','precision': 0,'type': 10} ],
'INPUT': layer,
'OUTPUT': 'TEMPORARY_OUTPUT'},
context=context, feedback=feedback, is_child_algorithm=True)['OUTPUT']
processing.run("native:createspatialindex",
{'INPUT': layer },
context=context, feedback=feedback, is_child_algorithm=True)
processing.run("native:selectbylocation",
{'INPUT':layer,'INTERSECT':seglay,
'PREDICATE':[0],'METHOD':0},
context=context, feedback=feedback, is_child_algorithm=True)
lulay = QgsProcessingUtils.mapLayerFromString(layer, context)
sf = lulay.selectedFeatures()
lulay.startEditing()
for f in sf:
f['luc'] = 'rod'
lulay.updateFeature(f)
lulay.commitChanges()
lulay.removeSelection()
totarea = totrdarea = 0
for f in lulay.getFeatures():
totarea = totarea + f.geometry().area()
luc = f['luc']
if luc:
if luc in ('rod'):
totrdarea = totrdarea + f.geometry().area()
totarea = totarea/10000
totarea = round (totarea,1)
totrdarea = totrdarea/10000
totrdarea = round (totrdarea,1)
(sink, self.dest_id) = self.parameterAsSink(
parameters,
self.OUTPUT,
context,
lulay.fields(),
lulay.wkbType(),
lulay.sourceCrs()
)
sink.addFeatures(lulay.getFeatures(),QgsFeatureSink.FastInsert)
feedback.pushInfo( '\n\n ######################################\n')
feedback.pushInfo( '\n\n {} LANDUSE POLYGONS CREATED '.format(lulay.featureCount() ) )
feedback.pushInfo( 'TOTAL AREA: {} HECTARES'.format(totarea) )
feedback.pushInfo( 'TOTAL ROAD AREA: {} HECTARES OR {}%'.format(totrdarea, round((totrdarea/totarea*100),1) ) )
feedback.pushInfo( '\n\nOshLanduse.py v2.1\n'
'######################################\n\n')
return {self.OUTPUT: self.dest_id }
def postProcessAlgorithm(self, context, feedback):
project = QgsProject.instance()
scope = QgsExpressionContextUtils.projectScope(project)
projfold = scope.variable('project_folder')
qml = projfold + '\\qsettings\\Landuse.qml'
layer2 = QgsProcessingUtils.mapLayerFromString(self.dest_id, context)
layer2.loadNamedStyle(qml)
return {self.OUTPUT: self.dest_id}
|
SECRET_KEY = 'abc123'
TEMPLATE_DIRS = (
'jsinclude/templates'
)
TEMPLATE_DEBUG = False
JSINCLUDE_STATIC_PATH = 'static/test/path'
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Power by Zongsheng Yue 2020-09-23 10:23:45
import cv2
import argparse
import numpy as np
from pathlib import Path
from scipy.io import savemat
from math import ceil
parser = argparse.ArgumentParser()
parser.add_argument('--ntu_path', type=str, default='/ssd1t/NTURain/original/',
help="Path of the original NTURain datasets, (default: None)")
parser.add_argument('--train_path', type=str, default='/ssd1t/NTURain/train/',
help="Path to save the prepared training datasets, (default: None)")
parser.add_argument('--patch_size', type=int, default=64,
help="Path to save the prepared training datasets, (default: None)")
parser.add_argument('--batch_size', type=int, default=12,
help="Path to save the prepared training datasets, (default: None)")
args = parser.parse_args()
overlap = int(args.patch_size/4)
step_size = args.patch_size - overlap
for ii in range(1, 9):
print('Scene {:d}'.format(ii))
# ground truth data
gt_floder = Path(args.ntu_path) / Path('t'+str(ii)+'_GT')
gt_im_list = sorted([x for x in gt_floder.glob('*.jpg')])
for jj, im_path in enumerate(gt_im_list):
im = cv2.imread(str(im_path), flags=cv2.IMREAD_COLOR)[:, :, ::-1].transpose([2,0,1])
if jj == 0:
gt_data_temp = im[:, np.newaxis, ] # c x 1 x h x w
else:
gt_data_temp = np.concatenate((gt_data_temp, im[:, np.newaxis,]), axis=1) # c x num_frame x h x w
# crop groundtruth into patch
c, num_frame, h, w = gt_data_temp.shape
inds_h = list(range(0, h-args.patch_size, step_size)) + [h-args.patch_size,]
inds_w = list(range(0, w-args.patch_size, step_size)) + [w-args.patch_size,]
num_patch = len(inds_h) * len(inds_w)
gt_data = np.zeros(shape=[num_patch, c, num_frame, args.patch_size, args.patch_size], dtype=np.uint8)
iter_patch = 0
for hh in inds_h:
for ww in inds_w:
gt_data[iter_patch, ] = gt_data_temp[:, :, hh:hh+args.patch_size, ww:ww+args.patch_size]
iter_patch += 1
for kk in range(ceil(num_patch / args.batch_size)):
start = kk * args.batch_size
end = min((kk+1)*args.batch_size, num_patch)
gt_data_batch = gt_data[start:end,]
save_path = Path(args.train_path) / ('t'+str(ii)+'_gt_'+str(kk+1)+'.mat')
if save_path.exists():
save_path.unlink()
savemat(str(save_path), {'gt_data':gt_data_batch})
# rain data
rain_floders = sorted([x for x in Path(args.ntu_path).glob('t'+str(ii)+'_Rain_*')])
for kk, current_floder in enumerate(rain_floders):
print(' Rain type: {:d}'.format(kk+1))
rain_im_list = sorted([x for x in current_floder.glob('*.jpg')])
for jj, im_path in enumerate(rain_im_list):
im = cv2.imread(str(im_path), flags=cv2.IMREAD_COLOR)[:, :, ::-1].transpose([2,0,1])
if jj == 0:
rain_data_temp = im[:, np.newaxis, ] # c x 1 x h x w
else:
rain_data_temp = np.concatenate((rain_data_temp, im[:, np.newaxis,]), axis=1) # c x num_frame x h x w
assert gt_data_temp.shape == rain_data_temp.shape
# crop rain data into patch
rain_data = np.zeros(shape=[num_patch, c, num_frame, args.patch_size, args.patch_size], dtype=np.uint8)
iter_patch = 0
for hh in inds_h:
for ww in inds_w:
rain_data[iter_patch, ] = rain_data_temp[:, :, hh:hh+args.patch_size, ww:ww+args.patch_size]
iter_patch += 1
for ss in range(ceil(num_patch / args.batch_size)):
start = ss * args.batch_size
end = min((ss+1)*args.batch_size, num_patch)
rain_data_batch = rain_data[start:end,]
save_path = Path(args.train_path) / ('t'+str(ii)+'_rain_'+str(kk+1)+'_'+str(ss+1)+'.mat')
if save_path.exists():
save_path.unlink()
savemat(str(save_path), {'rain_data':rain_data_batch})
|
pkgname = "elftoolchain"
_commit = "f7e9afc6f9ad0d84ea73b4659c5d6d13275d2306"
pkgver = "0.7.1_svn20210623"
pkgrel = 0
build_style = "makefile"
makedepends = ["libarchive-devel"]
make_build_args = [
"WITH_ADDITIONAL_DOCUMENTATION=no",
"WITH_TESTS=no", "MANTARGET=man"
]
# work around all sorts of bmake weirdness
make_install_args = make_build_args + [
"LIBOWN=", "BINOWN=", "BINMODE=755", "NONBINMODE=644", "DIRMODE=755",
"MANTARGET=man", "MANDIR=/usr/share/man"
]
make_use_env = True
depends = [f"libelf={pkgver}-r{pkgrel}"]
pkgdesc = "BSD licensed ELF toolchain"
maintainer = "q66 <q66@chimera-linux.org>"
license = "BSD-2-Clause"
url = "https://sourceforge.net/projects/elftoolchain"
sources = [f"https://github.com/{pkgname}/{pkgname}/archive/{_commit}.tar.gz"]
sha256 = ["3d9e0513af4b7cb8ac7944d98057b8d61fcc4ff326b030a7b06006c0abb7922c"]
options = ["bootstrap", "!check"]
if not current.bootstrapping:
hostmakedepends = ["bsdm4", "byacc", "flex"]
def init_build(self):
flags = self.get_cflags(shell = True) + " " + \
self.get_ldflags(shell = True)
self.env["SHLIB_LDADD"] = flags
# abuse this to work around elftoolchain's fucky build system
self.env["LDSTATIC"] = flags
def post_install(self):
self.install_license("LICENSE")
# fix some permissions
for f in (self.destdir / "usr/lib").glob("*.so.*"):
f.chmod(0o755)
# install a musl-compatible elfdefinitions.h
self.install_file(self.files_path / "elfdefinitions.h", "usr/include/sys")
@subpackage("elftoolchain-devel")
def _devel(self):
self.depends = [f"{pkgname}={pkgver}-r{pkgrel}"]
self.pkgdesc = pkgdesc + " - development files"
return [
"usr/include",
"usr/lib/*.a",
"usr/lib/*.so",
"usr/share/man/man3"
]
@subpackage("libelf")
def _libelf(self):
self.pkgdesc = pkgdesc + " - libelf"
return [
"usr/lib/*.so.*"
]
|
import torch
import torch.nn as nn
import numpy as np
from models import ModelBuilder
from torch.autograd import Variable
import torch.optim as optim
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
from dataset import TrainDataset
import os
import nibabel as nib
import argparse
from utils import AverageMeter
from distutils.version import LooseVersion
import math
def train(train_loader, model, criterion, optimizer, epoch, args):
losses = AverageMeter()
model.train()
for iteration, sample in enumerate(train_loader):
image = sample['images'].float()
target = sample['labels'].long()
image = Variable(image).cuda()
label = Variable(target).cuda()
# The dimension of out should be in the dimension of B,C,W,H,D
# transform the prediction and label
out = model(image)
out = out.permute(0,2,3,4,1).contiguous().view(-1, args.num_classes)
# extract the center part of the labels
start_index = []
end_index = []
for i in range(3):
start = int((args.crop_size[i] - args.center_size[i])/2)
start_index.append(start)
end_index.append(start + args.center_size[i])
label = label[:, start_index[0]:end_index[0], start_index[1]: end_index[1], start_index[2]: end_index[2]]
label = label.contiguous().view(-1).cuda()
loss = criterion(out, label)
losses.update(loss.data[0],image.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# adjust learning rate
cur_iter = iteration + (epoch - 1) * args.epoch_iters
adjust_learning_rate(optimizer, cur_iter, args)
print(' * i {} | lr: {:.6f} | Training Loss: {losses.avg:.3f}'.format(iteration, args.running_lr, losses=losses))
print(' * EPOCH {epoch} | Training Loss: {losses.avg:.3f}'.format(epoch=epoch, losses=losses))
def save_checkpoint(state, epoch, args):
filename = args.ckpt + '/' + str(epoch) + '_checkpoint.pth.tar'
print(filename)
torch.save(state, filename)
def adjust_learning_rate(optimizer, cur_iter, args):
scale_running_lr = ((1. - float(cur_iter) / args.max_iters) ** args.lr_pow)
args.running_lr = args.lr * scale_running_lr
for param_group in optimizer.param_groups:
param_group['lr'] = args.running_lr
def main(args):
# import network architecture
builder = ModelBuilder()
model = builder.build_net(
arch=args.id,
num_input=args.num_input,
num_classes=args.num_classes,
num_branches=args.num_branches,
padding_list=args.padding_list,
dilation_list=args.dilation_list)
model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpus))).cuda()
cudnn.benchmark = True
# collect the number of parameters in the network
print("------------------------------------------")
print("Network Architecture of Model %s:" % (args.id))
num_para = 0
for name, param in model.named_parameters():
num_mul = 1
for x in param.size():
num_mul *= x
num_para += num_mul
print(model)
print("Number of trainable parameters %d in Model %s" % (num_para, args.id))
print("------------------------------------------")
# set the optimizer and loss
optimizer = optim.RMSprop(model.parameters(), args.lr, alpha=args.alpha, eps=args.eps, weight_decay=args.weight_decay, momentum=args.momentum)
criterion = nn.CrossEntropyLoss()
if args.resume:
if os.path.isfile(args.resume):
print("=> Loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['opt_dict'])
print("=> Loaded checkpoint (epoch {})".format(checkpoint['epoch']))
else:
print("=> No checkpoint found at '{}'".format(args.resume))
# loading data
tf = TrainDataset(train_dir, args)
train_loader = DataLoader(tf, batch_size=args.batch_size, shuffle=args.shuffle, num_workers=args.num_workers, pin_memory=True)
print("Start training ...")
for epoch in range(args.start_epoch + 1, args.num_epochs + 1):
train(train_loader, model, criterion, optimizer, epoch, args)
# save models
if epoch > args.particular_epoch:
if epoch % args.save_epochs_steps == 0:
save_checkpoint({'epoch': epoch, 'state_dict': model.state_dict(), 'opt_dict': optimizer.state_dict()}, epoch, args)
print("Training Done")
if __name__ == '__main__':
assert LooseVersion(torch.__version__) >= LooseVersion('0.3.0'), \
'PyTorch>=0.3.0 is required'
parser = argparse.ArgumentParser()
# Model related arguments
parser.add_argument('--id', default='AFN1',
help='a name for identitying the model. Choose from the following options: AFN1-6, Basic, ASPP_c, ASPP_s.')
parser.add_argument('--padding_list', default=[0,4,8,12], nargs='+', type=int,
help='list of the paddings in the parallel convolutions')
parser.add_argument('--dilation_list', default=[2,6,10,14], nargs='+', type=int,
help='list of the dilation rates in the parallel convolutions')
parser.add_argument('--num_branches', default=4, type=int,
help='the number of parallel convolutions in autofocus layer')
# Path related arguments
parser.add_argument('--train_path', default='datalist/train_list.txt',
help='text file of the name of training data')
parser.add_argument('--root_path', default='./',
help='root directory of data')
parser.add_argument('--ckpt', default='./saved_models',
help='folder to output checkpoints')
# Data related arguments
parser.add_argument('--crop_size', default=[75,75,75], nargs='+', type=int,
help='crop size of the input image (int or list)')
parser.add_argument('--center_size', default=[47,47,47], nargs='+', type=int,
help='the corresponding output size of the input image (int or list)')
parser.add_argument('--num_classes', default=5, type=int,
help='number of classes')
parser.add_argument('--num_input', default=5, type=int,
help='number of input image for each patient plus the mask')
parser.add_argument('--num_workers', default=16, type=int,
help='number of data loading workers')
parser.add_argument('--random_flip', default=True, type=bool,
help='if horizontally flip images when training')
parser.add_argument('--normalization', default=True, type=bool,
help='normalizae the data before training')
parser.add_argument('--shuffle', default=True, type=bool,
help='if shuffle the data during training')
parser.add_argument('--mask', default=True, type=bool,
help='if have the mask')
# optimization related arguments
parser.add_argument('--num_gpus', default=4, type=int, help='number of GPUs to use')
parser.add_argument('--batch_size', default=10, type=int,
help='training batch size')
parser.add_argument('--num_epochs', default=400, type=int,
help='epochs for training')
parser.add_argument('--start_epoch', default=0, type=int,
help='epoch to start training. useful if continue from a checkpoint')
parser.add_argument('--lr', default=1e-3, type=float,
help='start learning rate')
parser.add_argument('--lr_pow', default=0.9, type=float,
help='power in poly to drop learning rate')
parser.add_argument('--optim', default='RMSprop', help='optimizer')
parser.add_argument('--alpha', default='0.9', type=float, help='alpha in RMSprop')
parser.add_argument('--eps', default=10**(-4), type=float, help='eps in RMSprop')
parser.add_argument('--weight_decay', default=1e-4, type=float, help='weights regularizer')
parser.add_argument('--momentum', default=0.6, type=float, help='momentum for RMSprop')
parser.add_argument('--save_epochs_steps', default=10, type=int,
help='frequency to save models after a particular number of epochs')
parser.add_argument('--particular_epoch', default=200, type=int,
help='after this number, we will save models more frequently')
parser.add_argument('--resume', default='',
help='the checkpoint that resumes from')
parser.add_argument('--num_round', default=1, type=int)
args = parser.parse_args()
print("Input arguments:")
for key, val in vars(args).items():
print("{:16} {}".format(key, val))
train_file = open(args.train_path, 'r')
train_dir = train_file.readlines()
args.ckpt = os.path.join(args.ckpt, args.id, str(args.num_round))
print('Models are saved at %s' % (args.ckpt))
if not os.path.isdir(args.ckpt):
os.makedirs(args.ckpt)
if args.start_epoch > 1:
args.resume = args.ckpt + '/' + str(args.start_epoch) + '_checkpoint.pth.tar'
args.running_lr = args.lr
args.epoch_iters = math.ceil(int(len(train_dir)/args.num_input)/args.batch_size)
args.max_iters = args.epoch_iters * args.num_epochs
assert len(args.padding_list) == args.num_branches, \
'# parallel convolutions should be the same as the length of padding list'
assert len(args.dilation_list) == args.num_branches, \
'# parallel convolutions should be the same as # dilation rates'
assert isinstance(args.crop_size, (int, list))
if isinstance(args.crop_size, int):
args.crop_size = [args.crop_size, args.crop_size, args.crop_size]
assert isinstance(args.center_size, (int, list))
if isinstance(args.center_size, int):
args.center_size = [args.center_size, args.center_size, args.center_size]
main(args)
|
#!/usr/bin/env python
# coding: utf-8
import os
import csv
import re
def get_plain_content_simple(verdict, date, file_num):
try:
content = ''
title = re.search("^\S、\S*(?:上訴|原告).{0,6}(?:主張|意旨)\S*(?:︰|:)", verdict, re.M).group(0)
number_list = ['一', '二' ,'三', '四', '五']
if any(num in title for num in number_list):
content_line = re.split('\n一、|\n二、|\n三、|\n四、|\n五、' ,verdict)
else:
content_line = re.split('\n壹、|\n貳、|\n參、|\n肆、|\n伍、' ,verdict)
for line in content_line:
search_result = re.search("^\S*(?:上訴|原告).{0,6}(?:主張|意旨)\S*(?:︰|:)", line, re.M)
if (search_result != None):
content = line.replace(' ', '')
content_num = len(content)
break;
if content == '':
content = '*'
content_num = -1
except:
content_num = '*'
# save csv file
filepath = 'analysis_' + date + '/plain_content_num_' + date + '.csv'
if not os.path.isfile(filepath):
with open(filepath, 'a', encoding = 'big5', newline='\n') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['案件編號', '原告字數'])
with open(filepath, 'a', encoding = 'big5', newline='\n') as csvfile:
writer = csv.writer(csvfile)
writer.writerow([file_num,content_num])
return content, content_num
|
# reduce把一个函数作用在一个序列[x1, x2, x3, ...]上,这个函数必须接收两个参数,reduce把结果继续和序列的下一个元素做累积计算
#效里如 reduce(f, [x1, x2, x3, x4]) = f(f(f(x1, x2), x3), x4)
from functools import reduce
def add(x,y):
return x + y
res = reduce(add, [1,3,5,7,9])
print(res)
#练习:编写一个prod()函数,可以接受一个list并利用reduce()求积
print('---------------------------')
def prod(x,y):
return x*y
res = reduce(prod,[1,3,5,7,9])
print(res)
|
#!/usr/bin/env python
# ========================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from hadoop.io.Writable import AbstractValueWritable
class BytesWritable(AbstractValueWritable):
def write(self, data_output):
data_output.writeInt(len(self._value))
data_output.write(self._value)
def readFields(self, data_input):
size = data_input.readInt()
self._value = data_input.readFully(size)
def toString(self):
return ''.join(chr(x % 256) for x in self._value)
|
from typing import Tuple, List
import torch
import torch.nn as nn
from torch import Tensor
from lstm_cell_stack.lstm_cell_stack import LSTMCellStack
from lstm_stack.lstm_stack import LSTMStack
class LSTMAutoencoder(nn.Module):
"""
Implementation of the model described in 'Unsupervised Learning of Video
Representations using LSTMs', N. Srivastava, E. Mansimov, R. Salakhutdinov
https://arxiv.org/pdf/1502.04681.pdf
It is composed by an LSTM which acts as an encoder for a video sequence
and one or multiple decoders (LSTM but possibly other models too) that,
given the same input representations, execute various tasks.
"""
def __init__(self, input_size: int, hidden_size: List[int],
batch_first: bool, decoding_steps=-1):
super(LSTMAutoencoder, self).__init__()
self.batch_first = batch_first
self.input_size = input_size
self.decoding_steps = decoding_steps
self.encoder = LSTMStack(
input_size=self.input_size,
hidden_size=hidden_size,
batch_first=False
)
sizes = [self.input_size, *hidden_size]
decoding_sizes = list(reversed(sizes))
self.input_reconstruction = LSTMCellStack(
input_size=self.input_size,
hidden_size=decoding_sizes
)
# self.future_prediction = LSTMCellStack(
# input_size=self.input_size,
# hidden_size=decoding_sizes
# )
def forward(self, input_sequence: Tensor) -> Tuple[Tensor, Tensor]:
sequence = input_sequence.transpose(0,1) if self.batch_first else input_sequence # always work in sequence-first mode
sequence_len = sequence.size(0)
# encode
_, hidden_state = self.encoder(sequence) # discard output, we are interested only in hidden state to initialize the decoders
# LSTM state has shape (num_layers * num_directions, batch, hidden_size) =
# (1, batch, hidden_size) but LSTMCell expects h and c to have shape
# (batch, hidden_size), so we have to remove the first dimension
h_n, c_n = hidden_state
h_n_last, c_n_last = h_n[-1], c_n[-1] # take the last layer's hidden state ...
representation = (h_n_last.squeeze(dim=0), c_n_last.squeeze(dim=0)) # ... and use it as compressed representation of what the model has seen so far
#last_frame = sequence[-1, :]
steps = self.decoding_steps if self.decoding_steps != -1 else sequence_len
# decode for input reconstruction
output_seq_recon = LSTMAutoencoder._decode(self.input_reconstruction, sequence, # last_frame,
representation, steps)
# decode for future prediction
#output_seq_pred = LSTMAutoencoder._decode(self.future_prediction, last_frame,
# representation, steps)
if self.batch_first: # if input was batch_first restore dimension order
reconstruction = output_seq_recon.transpose(0,1)
# prediction = output_seq_pred .transpose(0,1)
else:
reconstruction = output_seq_recon
# prediction = output_seq_pred
return reconstruction # (reconstruction, prediction)
@staticmethod
def _decode(decoder: LSTMCellStack, input_sequence: Tensor,
representation: Tuple[Tensor, Tensor], steps: int) -> Tensor:
output_seq = []
#output = input
sequence_reversed = input_sequence.flip(0)
h_0, c_0 = decoder.init_hidden(input_sequence.size(1))
# use encoder's last layer hidden state to initalize decoders hidden state
h_0[0], c_0[0] = representation[0], representation[1]
state = (h_0, c_0)
for t in range(steps):
#output, state = decoder(output, state)
output, state = decoder(sequence_reversed[t,:], state)
output_seq.append(output)
return torch.stack(output_seq, dim=0) # dim 0 because we are working with batch_first=False
class ImgLSTMAutoencoder(nn.Module):
def __init__(self, image_size: Tuple[int, int, int], hidden_size: List[int],
batch_first: bool, decoding_steps=-1):
super(ImgLSTMAutoencoder, self).__init__()
self.image_size = image_size
self.input_size = image_size[0] * image_size[1] * image_size[2]
self.batch_first = batch_first
self.lstm_autoencoder = LSTMAutoencoder(self.input_size, hidden_size, False, decoding_steps)
def forward(self, input: Tensor) -> Tuple[Tensor, Tensor]:
sequence = input.transpose(0,1) if self.batch_first else input # always work in sequence-first mode
sequence_len = sequence.size(0)
batch_size = sequence.size(1)
flattened_sequence = input.view((sequence_len, batch_size, -1))
# reconstruction, prediction = self.lstm_autoencoder(flattened_sequence)
reconstruction = self.lstm_autoencoder(flattened_sequence)
sequence_shape = (sequence_len, batch_size,) + self.image_size
reconstruction_img = reconstruction.view(sequence_shape)
# prediction_img = prediction .view(sequence_shape)
recon_out = reconstruction_img.transpose(0,1) if self.batch_first else reconstruction_img
# pred_out = prediction_img .transpose(0,1) if self.batch_first else prediction_img
return recon_out # (recon_out, pred_out)
|
import datetime
import json
import random
import re
import time
import requests
import schedule
from login import http_build_query, timestamp, login
from setup import download_json
storage = {}
def run():
from requests.exceptions import RequestException
retries = 5
print('-' * 60)
print('[ OK ]Job started at: ', datetime.datetime.now())
while retries > 0:
try:
report()
except RequestException:
print('\n[FAILED]Network Error, Retrying.')
retries -= 1
continue
else:
break
if retries == 0:
print('[FAILED]Maximum Retries time of 5 times exceeded, Exiting.')
else:
print('[ OK ]Job Finished at: ', datetime.datetime.now())
print('-' * 60)
def report():
# 获取登陆链接
url = login()
# 请求登录链接,设置 Cookie
session = requests.Session()
session.get(url)
# 检查文件更新
check_for_update()
# 从本地文件加载运行时数据
runtime_data = load_runtime_data()
# 获取用户信息
get_user_info(session)
# 获取自动注入信息
auto_data = get_auto_data(session, runtime_data['queries'])
# 获取提交 UUID
uuid = get_form_uuid(session, runtime_data['flow_id'])
# 获取事务节点 ID
node_id = get_node_id(session)
# 合成表单
form_data = assemble_form(
template=runtime_data['template'],
auto_data=auto_data,
assembly=runtime_data['assembly'],
uuid=uuid,
node_id=node_id
)
# 提交表单
submit_form(session, form_data)
return True
def get_form_uuid(session, flow_id):
window_id = random.randint(0, 10000)
t = random.randint(0, 1000000)
url = 'http://jkrb.xjtu.edu.cn/EIP/cooperative/openCooperative.htm?' + http_build_query({
'flowId': flow_id,
'_t': t,
'_winid': window_id
})
print('[ OK ]Requesting: ', url, end='')
response = session.get(url)
uuid = re.search(r'var uuid = \'(.*)\';', response.text).group(1)
print(' ..... done')
return uuid
def get_user_info(session):
url = 'http://jkrb.xjtu.edu.cn/EIP/api/getUserAttribute.htm'
print('[ OK ]Requesting: ', url, end='')
response = session.get(url)
storage['user_info'] = json.loads(response.text)
print(' ..... done')
return
def get_auto_data(session, queries):
base_url = 'http://jkrb.xjtu.edu.cn/EIP/queryservice/query.htm'
storage['form_data'] = {}
data = {}
for query in queries:
url = base_url + '?' + http_build_query({
'snumber': query['snumber'],
query['id_key']: storage['user_info']['userId'],
'_': timestamp()
})
print('[ OK ]Requesting: ', url, end='')
response = session.get(url)
data[query['name']] = json.loads(response.text)[0]
print(' ..... done')
return data
def get_node_id(session):
url = 'http://jkrb.xjtu.edu.cn/EIP/flowNode/createNodeIdByNum.htm'
print('[ OK ]Requesting: ', url, end='')
response = session.post(url, {'num': 1})
node_id = json.loads(response.text)[0]
print(' ..... done')
return node_id
def load_runtime_data():
files = [
{'name': './runtime/queries.json', 'mode': 'r', 'key': 'queries'},
{'name': './runtime/flow_id.json', 'mode': 'r', 'key': 'flow_id'},
{'name': './runtime/template.json', 'mode': 'r', 'key': 'template'},
{'name': './runtime/assembly.json', 'mode': 'r', 'key': 'assembly'},
]
data = {}
for file in files:
print('[ OK ]Loading Model from: ', file['name'], end='')
with open(file['name'], 'r', encoding='utf-8') as read:
datum = json.load(read)
data[file['key']] = datum
print(' ..... done')
return data
def assemble_form(template, auto_data, assembly, uuid, node_id):
for instruction in assembly:
print('[ OK ]Assembling Form Data: ', instruction['offsets'], end='')
index = instruction['offsets'].split('.')
location = 'template'
for i in index:
location += ('[' + i + ']' if i.isdigit() else '[\'' + i + '\']')
loc = locals()
gol = globals()
exec('value = ' + instruction['value'], gol, loc)
exec(location + ' = value', loc)
print(' ..... done')
return template
def submit_form(session, form_data):
with open('trail.json', 'w', encoding='utf-8') as trail:
json.dump(form_data, trail)
url = 'http://jkrb.xjtu.edu.cn/EIP/cooperative/sendCooperative.htm'
print('[ OK ]Submitting to: ', url, end='')
form = {}
for (key, value) in form_data.items():
if value is None:
form[key] = value
elif isinstance(value, str):
form[key] = value
else:
form[key] = json.dumps(value)
response = session.post(url, form)
data = json.loads(response.text)
if data['code'] != '200':
print('\n[FAILED]Submit Error: ', data['desc'])
return False
print(' ..... done')
print('[ OK ]Response: ', data['desc'])
return
def load_schedules():
import os
if not os.path.exists('./runtime/schedule.json'):
print('[FAILED]Schedules File Not Found, Please run setup tool to create')
with open('./runtime/schedule.json') as file:
schedule_list = json.load(file)
return schedule_list
def check_for_update():
with open('./runtime/mode', 'r') as mode_file:
mode = mode_file.read()
print('[ OK ]Checking for update.', end='')
with open('./runtime/version', 'r') as version_file:
current = version_file.read()
print(' Current Version: ', current.strip('\n'), end='.')
response = requests.get(
'https://secure.eeyes.xyz/reporter/' + ('no-return' if mode == '1' else 'returned') + '/version'
)
latest = response.text
print(' Latest Version: ', latest.strip('\n'), end='.')
if current == latest:
print(' Not Updating ..... done')
else:
print(' Updating ..... done')
download_json(mode)
def main(run_immediately):
if run_immediately:
print('[ OK ]Starting Immediately ..... done')
run()
exit(0)
schedules = load_schedules()
print('[ OK ]Creating Scheduler', end='')
for single_schedule in schedules:
schedule.every().day.at(single_schedule).do(run)
print(' ..... done')
print('[ OK ]Starting Scheduler ..... done')
while True:
try:
schedule.run_pending()
time.sleep(1)
except KeyboardInterrupt as i:
exit()
print('[ OK ]Exiting')
|
# Copyright 2018 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Base class for models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from texar.hyperparams import HParams
# pylint: disable=too-many-arguments
__all__ = [
"ModelBase"
]
class ModelBase(object):
"""Base class inherited by all model classes.
A model class implements interfaces that are compatible with
:tf_main:`TF Estimator <estimator/Estimator>`. In particular,
:meth:`_build` implements the
:tf_main:`model_fn <estimator/Estimator#__init__>` interface; and
:meth:`get_input_fn` is for the :attr:`input_fn` interface.
.. document private functions
.. automethod:: _build
"""
def __init__(self, hparams=None):
self._hparams = HParams(hparams, self.default_hparams(),
allow_new_hparam=True)
@staticmethod
def default_hparams():
"""Returns a dictionary of hyperparameters with default values.
"""
hparams = {
"name": "model"
}
return hparams
def __call__(self, features, labels, params, mode, config=None):
"""Used for the :tf_main:`model_fn <estimator/Estimator#__init__>`
argument when constructing
:tf_main:`tf.estimator.Estimator <estimator/Estimator>`.
"""
return self._build(features, labels, params, mode, config=config)
def _build(self, features, labels, params, mode, config=None):
"""Used for the :tf_main:`model_fn <estimator/Estimator#__init__>`
argument when constructing
:tf_main:`tf.estimator.Estimator <estimator/Estimator>`.
"""
raise NotImplementedError
def get_input_fn(self, *args, **kwargs):
"""Returns the :attr:`input_fn` function that constructs the input
data, used in :tf_main:`tf.estimator.Estimator <estimator/Estimator>`.
"""
raise NotImplementedError
@property
def hparams(self):
"""A :class:`~texar.HParams` instance. The hyperparameters
of the module.
"""
return self._hparams
|
import json
import datetime
import os
import sys
import boto3
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "./libs")) # Allow for packaged libs to be included
import urllib3
import requests
ENVIRONMENT = os.environ['ENV']
AWS_LAMBDA_FUNCTION_NAME = os.environ['AWS_LAMBDA_FUNCTION_NAME']
RETRY_ATTEMPTS = 3
LOADER_URL = os.environ['LOADER_URL']
API_KEY_HEADER = "X-Gu-Media-Key"
API_KEY_HEADER_VALUE = os.environ['API_KEY']
ORIGIN_HEADER = "Origin"
ORIGIN_HEADER_VALUE = os.environ['ORIGIN_URL']
CONTENT_HEADER = "Content-Type"
CONTENT_HEADER_VALUE = "application/json"
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def lambda_handler(event, context):
results = {"results": []}
for record in event['Records']:
record_body = record["Sns"]
notification = json.loads(record_body["Message"])
print("Got Record Event: \n{}\n".format(notification))
message_id = notification["message_id"]
image_id = notification['key']
scan_result = notification['scan_result']
error_message = notification['error_message']
results['results'].append(send_to_loader(image_id, scan_result, error_message))
return {
"statusCode": 200,
"body": results
}
def send_to_loader(imageId, scanResult, errorMessage):
attempt = 0
uri = "{}/{}".format(LOADER_URL,imageId)
payload = {'status': 'FAILED', 'errorMessage': errorMessage}
while attempt < RETRY_ATTEMPTS:
print("Updating image upload status with imageId: {} ....\n".format(imageId))
loader_response = requests.post(uri,
headers={CONTENT_HEADER: CONTENT_HEADER_VALUE, ORIGIN_HEADER: ORIGIN_HEADER_VALUE,
API_KEY_HEADER: API_KEY_HEADER_VALUE},
data=payload,
verify=False)
if loader_response.status_code == 200 or loader_response.status_code == 202:
print("...POST completed successfully in {} seconds\n".format(loader_response.elapsed.total_seconds()))
return {imageId: loader_response.json()}
else:
print("Non 200/202 response received from api POST: {}, Reason: {}".format(loader_response.status_code, loader_response.reason))
attempt += 1
if attempt < RETRY_ATTEMPTS:
print("Retrying: {} (attempt {} of {})...".format(imageId, attempt + 1, RETRY_ATTEMPTS))
else:
print("Aborting: {} after {} retries".format(imageId, RETRY_ATTEMPTS))
raise Exception('Failed to update image upload status with imageId: {} after {} retries. '
'(Non 200/202 response received from api POST: {}, Reason: {})'
.format(imageId, RETRY_ATTEMPTS, loader_response.status_code, loader_response.reason))
|
from distutils.core import setup
from Cython.Build import cythonize
setup(
name='func1.pyx',
ext_modules=cythonize("func1.pyx")
)
|
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from functools import wraps
from airflow.exceptions import AirflowException
from kubernetes import client, config
def get_pod_port_ip(*pods, namespace):
def get_k8s_pod_port_ip(func):
@wraps(func)
def k8s_pod_port_ip_get(self, pods_ip_port):
"""This function retrieves Kubernetes Pod Port and IP
information. It can be used to retrieve information of
single pod deployment and/or statefulsets. For instance,
it can be used to retrieve the tiller pod IP and port
information for usage in the Armada Operator.
:param pods_ip_port: IP and port information of the pods
Example::
from get_k8s_pod_port_ip import get_pod_port_ip
@get_pod_port_ip('tiller', namespace='kube-system')
def get_pod_info(self, pods_ip_port={}):
tiller_ip = pods_ip_port['tiller']['ip']
tiller_port = pods_ip_port['tiller']['port']
"""
# Initialize variable
k8s_pods = {}
# The function allows us to query information on multiple
# pods
for pod_name in pods:
# Initialize variables
pod_attr = {}
pod_attr[pod_name] = {}
# Initialize/Reset counter
count = 0
# Make use of kubernetes client to retrieve pod IP
# and port information
# Note that we should use 'in_cluster_config'
# Note that we will only search for pods in the namespace
# that was specified in the request
config.load_incluster_config()
v1 = client.CoreV1Api()
ret = v1.list_namespaced_pod(namespace=namespace,
watch=False)
# Loop through items to extract port and IP information
# of the pod
for i in ret.items:
if pod_name in i.metadata.name:
# Get pod IP
logging.info("Retrieving %s IP", pod_name)
pod_attr[pod_name]['ip'] = i.status.pod_ip
logging.info("%s IP is %s", pod_name,
pod_attr[pod_name]['ip'])
# Get pod port
logging.info("Retrieving %s Port", pod_name)
# It is possible for a pod to have an IP with no
# port. For instance maas-rack takes on genesis
# node IP and has no port associated with it. We
# will assign the value 'None' to the port value
# in such cases.
try:
specs_dict = i.spec.containers[0].__dict__
ports_dict = specs_dict['_ports'][0].__dict__
pod_attr[pod_name]['port'] = (
ports_dict['_container_port'])
logging.info("%s Port is %s", pod_name,
pod_attr[pod_name]['port'])
except:
pod_attr[pod_name]['port'] = 'None'
logging.warning("%s Port is None", pod_name)
# Update k8s_pods with new entry
k8s_pods.update(pod_attr)
# It is possible for different pods to have the same
# partial names. This means that we can end up with
# inconsistent results depending on how the pods were
# ordered in the results for 'list_namespaced_pod'.
# Hence an exception should be raised when the function
# returns results for 2 or more pods.
if count > 0:
raise AirflowException(
"Pod search string is not unique!")
# Step counter
count += 1
# Raise Execptions if the pod does not exits in the
# Kubernetes cluster
if not pod_attr[pod_name]:
raise AirflowException("Unable to locate", pod_name)
return func(self, pods_ip_port=k8s_pods)
return k8s_pod_port_ip_get
return get_k8s_pod_port_ip
|
"""
Gets all gene item in wikidata, where a gene item is an item with an entrez ID, filtering those with no sitelinks
and no items linking to them
Gets all genes in mygene (from the latest mongo dump)
Gets those wd genes that are no longer in mygene, and the proteins they encode (if exists)
Propose for deletion on: https://www.wikidata.org/wiki/Wikidata:Requests_for_deletions
"""
import argparse
import os
from collections import Counter
import itertools
import requests
from HelperBot import get_all_taxa
from tqdm import tqdm
try:
from scheduled_bots.local import WDUSER, WDPASS
except ImportError:
if "WDUSER" in os.environ and "WDPASS" in os.environ:
WDUSER = os.environ['WDUSER']
WDPASS = os.environ['WDPASS']
else:
raise ValueError("WDUSER and WDPASS must be specified in local.py or as environment variables")
from scheduled_bots.geneprotein.GeneBot import wdi_core
from scheduled_bots.geneprotein.Downloader import MyGeneDownloader
from scheduled_bots.utils import login_to_wikidata
# todo: add microbial (checked may 2017 there were only 9 deprecated microbial)
# from scheduled_bots.geneprotein.MicrobeBotResources import get_ref_microbe_taxids
# df = get_ref_microbe_taxids()
# ref_taxids = list(map(str, df['taxid'].tolist()))
def get_deprecated_genes(taxids=None):
if taxids is None:
taxids = set(get_all_taxa()) | {'36329'}
taxid_str = '{' + " ".join(['"' + x + '"' for x in taxids]) + '}'
# get all genes that DONT Have any sitelinks and dont have any item links to them
s = """SELECT DISTINCT ?entrez ?item ?prot WHERE
{
values ?taxids {taxid}
?taxon wdt:P685 ?taxids .
?item wdt:P351 ?entrez .
?item wdt:P703 ?taxon .
FILTER NOT EXISTS {?article schema:about ?item}
OPTIONAL {?item wdt:P688 ?prot}
FILTER NOT EXISTS {?something ?prop ?item }
}""".replace("{taxid}", taxid_str)
bindings = wdi_core.WDItemEngine.execute_sparql_query(s)['results']['bindings']
entrez_qid = {x['entrez']['value']: x['item']['value'].rsplit("/")[-1] for x in bindings}
gene_protein = {x['item']['value'].rsplit("/")[-1]: x['prot']['value'].rsplit("/")[-1] for x in bindings if
'prot' in x}
print("{} wikidata".format(len(entrez_qid)))
wd = set(entrez_qid.keys())
mgd = MyGeneDownloader(fields="entrezgene")
docs, total = mgd.get_mg_cursor(",".join(taxids))
mygene = set([str(x['entrezgene']) for x in tqdm(docs, total=total) if "entrezgene" in x])
print("{} mygene".format(len(mygene)))
missing = wd - mygene
print("{} deprecated".format(len(missing)))
qids = {entrez_qid[x] for x in missing}
# dont delete the protein items because often there is a new gene (that replaced this deprecated gene,
# that now encodes this protein. We should just check them, there are currently only 9 out of
# a thousand something deprecated genes
protein_qids = {gene_protein[x] for x in qids if x in gene_protein}
print("Check these protein items: {}".format(protein_qids))
# qids.update(protein_qids)
return qids
def grouper(n, iterable):
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, n))
if not chunk:
return
yield chunk
def make_deletion_templates(qids, title, reason):
s = '\n=={}==\n'.format(title)
for group in grouper(90, list(qids)):
del_template = "{{subst:Rfd group | {q} | reason = {reason} }}\n".replace("{q}", '|'.join(group)).replace(
"{reason}", reason)
s += del_template
return s
def create_rfd(s: str):
edit_token, edit_cookie = login_to_wikidata(WDUSER, WDPASS)
data = {'action': 'edit', 'title': 'Wikidata:Requests_for_deletions',
'appendtext': s, 'format': 'json', 'token': edit_token}
r = requests.post("https://www.wikidata.org/w/api.php", data=data, cookies=edit_cookie)
r.raise_for_status()
print(r.json())
def get_count_by_species(missing):
# Get counts of deprecated genes by species
# not required, just for fun
s = """Select ?entrez ?taxid where {
?item wdt:P351 ?entrez .
?item wdt:P703 ?taxon .
?taxon wdt:P685 ?taxid .
}
"""
entrez_taxid = {x['entrez']['value']: x['taxid']['value'] for x in
wdi_core.WDItemEngine.execute_sparql_query(s)['results']['bindings']}
return Counter([entrez_taxid[x] for x in missing])
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='run wikidata gene bot')
parser.add_argument('--title', help='deletion request title', type=str, default="Delete deprecated genes")
parser.add_argument('--reason', help='deletion request reason', type=str,
default="These genes are deprecated by NCBI")
parser.add_argument('--force', help='force run if deleting a large number of genes', action='store_true')
args = parser.parse_args()
qids = get_deprecated_genes()
print("|".join(qids))
print(len(qids))
if len(qids) > 200 and not args.force:
raise ValueError("Trying to delete {} genes. If you really want to do this, re run with --force".format(len(qids)))
if len(qids) > 0:
s = make_deletion_templates(qids, args.title, args.reason)
create_rfd(s)
log_path = "deletion_log.txt"
with open(log_path, 'w') as f:
f.write("\n".join(qids))
|
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
import logging
import logging.config
import os
from fastapi.responses import JSONResponse
from fastapi import HTTPException
from infrastructure.routes import (
course_router,
course_media_router,
course_user_router,
course_rating_router,
course_category_router,
user_courses_router,
course_module_router,
course_certificate_router,
)
from infrastructure.db.database import Base, engine, DATABASE_URL
from sqlalchemy.exc import SQLAlchemyError
from exceptions.ubademy_error import UbademyException
from exceptions.auth_error import AuthorizationException
logging_conf_path = os.path.join(os.path.dirname(__file__), "logging.ini")
logging.config.fileConfig(logging_conf_path, disable_existing_loggers=False)
if DATABASE_URL is not None:
Base.metadata.create_all(engine)
app = FastAPI(title="Ubademy - Courses service", description="Courses service API")
origins = [
"*",
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.exception_handler(HTTPException)
async def http_exception_handler(request, exc):
error = {"message": exc.detail}
logging.error(f"status_code: {exc.status_code} message: {exc.detail}")
return JSONResponse(status_code=exc.status_code, content=error)
@app.exception_handler(UbademyException)
async def ubademy_exception_handler(request, exc):
error = {"message": exc.detail}
logging.error(f"status_code: {exc.status_code} message: {exc.detail}")
return JSONResponse(status_code=exc.status_code, content=error)
@app.exception_handler(AuthorizationException)
async def auth_exception_handler(request, exc):
error = {"message": exc.detail}
logging.error(f"status_code: {exc.status_code} message: {exc.detail}")
return JSONResponse(status_code=exc.status_code, content=error)
@app.exception_handler(SQLAlchemyError)
async def sql_exception_handler(request, exc):
# error = {"message": str(exc.__dict__["orig"])}
error = {"message": str(exc.__dict__)}
# logging.critical(f"status_code: 500 message: {str(exc.__dict__['orig'])}")
logging.critical(f"status_code: 500 message: {str(exc.__dict__)}")
return JSONResponse(status_code=500, content=error)
app.include_router(course_router.router, prefix="/courses", tags=["courses"])
app.include_router(course_media_router.router, prefix="/courses/{course_id}/media", tags=["media"])
app.include_router(course_user_router.router, prefix="/courses/{course_id}/users", tags=["users"])
app.include_router(course_rating_router.router, prefix="/courses/{course_id}/ratings", tags=["ratings"])
app.include_router(course_category_router.router, prefix="/courses/category", tags=["category"])
app.include_router(user_courses_router.router, prefix="/courses/user/{user_id}", tags=["user courses"])
app.include_router(course_module_router.router, prefix="/courses/{course_id}/modules", tags=["modules"])
app.include_router(course_certificate_router.router, prefix="/courses/certificates/{user_id}", tags=["certificates"])
|
import json
import time
from .simple import Base
class Frontend(Base):
def testEcho(self):
ws = self.websock()
self.addCleanup(ws.http.close)
ws.connect_only()
ws.client_send_only("ZEROGW:echo:text1")
ws.client_got("ZEROGW:echo:text1")
def testTime(self):
ws = self.websock()
self.addCleanup(ws.http.close)
ws.connect_only()
ws.client_send_only("ZEROGW:timestamp:text2")
msg = ws.client_read().decode('ascii')
self.assertTrue(msg.startswith('ZEROGW:timestamp:text2:'))
self.assertAlmostEqual(float(msg.rsplit(':')[-1]), time.time(), 2)
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
from libs.utils import bbox_transform
from utils.order_points import re_order
class LossRSDet(nn.Module):
def __init__(self, cfgs, device):
super(LossRSDet, self).__init__()
self.cfgs = cfgs
self.device = device
def modulated_rotation_5p_loss(self, targets, preds, anchor_state, ratios, sigma=3.0):
targets = targets.reshape(-1, 5)
sigma_squared = sigma ** 2
indices = torch.where(anchor_state == 1)[0] # .reshape(-1,)
preds = preds[indices]
targets = targets[indices]
ratios = ratios[indices]
normalizer = torch.where(anchor_state == 1)[0].detach()
normalizer = float(normalizer.shape[0])
normalizer = max(1.0, normalizer)
regression_diff = preds - targets
regression_diff = torch.abs(regression_diff)
loss1 = torch.where(regression_diff < 1.0 / sigma_squared, 0.5 * sigma_squared * torch.pow(regression_diff, 2),
regression_diff - 0.5 / sigma_squared)
loss1 = loss1.sum(dim=1)
loss2_1 = preds[:, 0] - targets[:, 0]
loss2_2 = preds[:, 1] - targets[:, 1]
# loss2_3 = preds[:, 2] - targets[:, 3] - tf.log(ratios)
# loss2_4 = preds[:, 3] - targets[:, 2] + tf.log(ratios)
loss2_3 = preds[:, 2] - targets[:, 3] + torch.log(ratios)
loss2_4 = preds[:, 3] - targets[:, 2] - torch.log(ratios)
loss2_5 = torch.min((preds[:, 4] - targets[:, 4] + 1.570796), (targets[:, 4] - preds[:, 4] + 1.570796))
box_diff_2 = torch.stack([loss2_1, loss2_2, loss2_3, loss2_4, loss2_5], 1)
abs_box_diff_2 = torch.abs(box_diff_2)
loss2 = torch.where(abs_box_diff_2 < 1.0 / sigma_squared, 0.5 * sigma_squared * torch.pow(abs_box_diff_2, 2),
abs_box_diff_2 - 0.5 / sigma_squared)
loss2 = loss2.sum(dim=1)
loss = torch.min(loss1, loss2)
loss = loss.sum() / normalizer
return loss
def forward(self, targets, preds, anchor_state, anchors, sigma=3.0):
targets = targets[:, :-1].reshape(-1, 8)
sigma_squared = sigma ** 2
indices = torch.where(anchor_state == 1)[0] # .reshape(-1,)
preds = preds[indices]
targets = targets[indices]
anchors = anchors[indices]
if self.cfgs.METHOD == 'H':
x_c = (anchors[:, 2] + anchors[:, 0]) / 2
y_c = (anchors[:, 3] + anchors[:, 1]) / 2
w = anchors[:, 2] - anchors[:, 0] + 1
h = anchors[:, 3] - anchors[:, 1] + 1
# theta = -90 * tf.ones_like(x_c)
anchors = torch.stack([x_c, y_c, w, h], dim=1)
preds = bbox_transform.qbbox_transform_inv(boxes=anchors, deltas=preds)
targets = re_order(targets.cpu())
targets = torch.as_tensor(targets, device=self.device).reshape(-1, 8)
# prepare for normalization
normalizer = torch.where(anchor_state == 1)[0].detach()
normalizer = float(normalizer.shape[0])
normalizer = max(1.0, normalizer)
# loss1
loss1_1 = (preds[:, 0] - targets[:, 0]) / anchors[:, 2]
loss1_2 = (preds[:, 1] - targets[:, 1]) / anchors[:, 3]
loss1_3 = (preds[:, 2] - targets[:, 2]) / anchors[:, 2]
loss1_4 = (preds[:, 3] - targets[:, 3]) / anchors[:, 3]
loss1_5 = (preds[:, 4] - targets[:, 4]) / anchors[:, 2]
loss1_6 = (preds[:, 5] - targets[:, 5]) / anchors[:, 3]
loss1_7 = (preds[:, 6] - targets[:, 6]) / anchors[:, 2]
loss1_8 = (preds[:, 7] - targets[:, 7]) / anchors[:, 3]
box_diff_1 = torch.stack([loss1_1, loss1_2, loss1_3, loss1_4, loss1_5, loss1_6, loss1_7, loss1_8], dim=1)
box_diff_1 = torch.abs(box_diff_1)
loss_1 = torch.where(box_diff_1 < (1.0 / sigma_squared), 0.5 * sigma_squared * torch.pow(box_diff_1, 2),
box_diff_1 - 0.5 / sigma_squared)
loss_1 = loss_1.sum(dim=1)
# loss2
loss2_1 = (preds[:, 0] - targets[:, 2]) / anchors[:, 2]
loss2_2 = (preds[:, 1] - targets[:, 3]) / anchors[:, 3]
loss2_3 = (preds[:, 2] - targets[:, 4]) / anchors[:, 2]
loss2_4 = (preds[:, 3] - targets[:, 5]) / anchors[:, 3]
loss2_5 = (preds[:, 4] - targets[:, 6]) / anchors[:, 2]
loss2_6 = (preds[:, 5] - targets[:, 7]) / anchors[:, 3]
loss2_7 = (preds[:, 6] - targets[:, 0]) / anchors[:, 2]
loss2_8 = (preds[:, 7] - targets[:, 1]) / anchors[:, 3]
box_diff_2 = torch.stack([loss2_1, loss2_2, loss2_3, loss2_4, loss2_5, loss2_6, loss2_7, loss2_8], 1)
box_diff_2 = torch.abs(box_diff_2)
loss_2 = torch.where(box_diff_2 < 1.0 / sigma_squared, 0.5 * sigma_squared * torch.pow(box_diff_2, 2),
box_diff_2 - 0.5 / sigma_squared)
loss_2 = loss_2.sum(dim=1)
# loss3
loss3_1 = (preds[:, 0] - targets[:, 6]) / anchors[:, 2]
loss3_2 = (preds[:, 1] - targets[:, 7]) / anchors[:, 3]
loss3_3 = (preds[:, 2] - targets[:, 0]) / anchors[:, 2]
loss3_4 = (preds[:, 3] - targets[:, 1]) / anchors[:, 3]
loss3_5 = (preds[:, 4] - targets[:, 2]) / anchors[:, 2]
loss3_6 = (preds[:, 5] - targets[:, 3]) / anchors[:, 3]
loss3_7 = (preds[:, 6] - targets[:, 4]) / anchors[:, 2]
loss3_8 = (preds[:, 7] - targets[:, 5]) / anchors[:, 3]
box_diff_3 = torch.stack([loss3_1, loss3_2, loss3_3, loss3_4, loss3_5, loss3_6, loss3_7, loss3_8], dim=1)
box_diff_3 = torch.abs(box_diff_3)
loss_3 = torch.where(box_diff_3 < 1.0 / sigma_squared, 0.5 * sigma_squared * torch.pow(box_diff_3, 2),
box_diff_3 - 0.5 / sigma_squared)
loss_3 = loss_3.sum(dim=1)
loss = torch.min(torch.min(loss_1, loss_2), loss_3)
loss = torch.sum(loss) / normalizer
return loss
|
# coding=utf-8
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import json
import time
import typing
from typing import Any, List, Optional
import attr
import cattr
import timestring
import yaml
from attr import attrib, attrs
@attrs(auto_attribs=True)
class SparkApplication(object):
state: Optional[str]
name: Optional[str]
submitter: Optional[str]
compute: Optional[str]
sparkPoolName: Optional[str]
sparkApplicationId: Optional[str]
livyId: Optional[str]
timing: List[Any]
jobType: Optional[str]
submitTime: Optional[str]
endTime: Optional[str]
queuedDuration: Optional[str]
runningDuration: Optional[str]
totalDuration: Optional[str]
_queued_duration_seconds: Optional[int] = attrib(default=None)
_running_duration_seconds: Optional[int] = attrib(default=None)
_total_duration_seconds: Optional[int] = attrib(default=None)
@property
def spark_pool_name(self):
return self.sparkPoolName
@property
def spark_application_id(self):
return self.sparkApplicationId
@property
def livy_id(self):
return self.livyId
@property
def job_type(self):
return self.jobType
@property
def submit_time(self):
return self.submitTime
@property
def submit_time_seconds(self):
return int(time.mktime(timestring.Date(self.submitTime).date.timetuple()))
@property
def end_time(self):
return self.endTime
@property
def end_time_seconds(self):
if self.end_time:
return int(time.mktime(timestring.Date(self.endTime).date.timetuple()))
return 0
@property
def queued_duration_seconds(self):
if self._queued_duration_seconds is None:
self._queued_duration_seconds = self._convert_to_seconds(self.queuedDuration)
return self._queued_duration_seconds
@property
def running_duration_seconds(self):
if self._running_duration_seconds is None:
self._running_duration_seconds = self._convert_to_seconds(self.runningDuration)
return self._running_duration_seconds
@property
def total_duration_seconds(self):
if self._total_duration_seconds is None:
self._total_duration_seconds = self._convert_to_seconds(self.totalDuration)
return self._total_duration_seconds
def _convert_to_seconds(self, s):
return sum(map(lambda x: len(timestring.Range(x)), s.split(' ')))
def spark_application_from_dict(d):
obj = cattr.structure(d, SparkApplication)
return obj
@attrs(auto_attribs=True)
class PrometheusStaticConfig(object):
targets: typing.List[str] = attrib()
labels: dict = attrib()
@attrs(auto_attribs=True)
class PrometheusFileSdConfig(object):
refresh_interval: str = attrib(default='10s')
files: typing.List[str] = attrib(factory=list)
@attrs(auto_attribs=True)
class SynapseScrapeConfig(object):
job_name: str = attrib()
bearer_token: str = attrib(default=None)
static_configs: typing.List[PrometheusStaticConfig] = attrib(default=None)
file_sd_configs: typing.List[PrometheusFileSdConfig] = attrib(default=None)
@attrs(auto_attribs=True)
class SynapseScrapeConfigs(object):
configs: typing.List[SynapseScrapeConfig] = attrib(factory=list)
def to_yaml(self):
return to_yaml(self.configs)
def to_dict(self):
return to_dict(self.configs)
def to_yaml(obj):
return yaml.safe_dump(cattr.unstructure(obj))
def to_dict(obj):
return cattr.unstructure(obj)
def to_json(obj):
return json.dumps(to_dict(obj), indent=2)
|
"""
Support for functionality to have conversations with Home Assistant.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/conversation/
"""
import logging
import re
import voluptuous as vol
from homeassistant import core
from homeassistant.components import http
from homeassistant.components.http.data_validator import (
RequestDataValidator)
from homeassistant.components.cover import (INTENT_OPEN_COVER,
INTENT_CLOSE_COVER)
from homeassistant.const import EVENT_COMPONENT_LOADED
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers import intent
from homeassistant.loader import bind_hass
from homeassistant.setup import (ATTR_COMPONENT)
_LOGGER = logging.getLogger(__name__)
ATTR_TEXT = 'text'
DEPENDENCIES = ['http']
DOMAIN = 'conversation'
REGEX_TURN_COMMAND = re.compile(r'turn (?P<name>(?: |\w)+) (?P<command>\w+)')
REGEX_TYPE = type(re.compile(''))
UTTERANCES = {
'cover': {
INTENT_OPEN_COVER: ['Open [the] [a] [an] {name}[s]'],
INTENT_CLOSE_COVER: ['Close [the] [a] [an] {name}[s]']
}
}
SERVICE_PROCESS = 'process'
SERVICE_PROCESS_SCHEMA = vol.Schema({
vol.Required(ATTR_TEXT): cv.string,
})
CONFIG_SCHEMA = vol.Schema({DOMAIN: vol.Schema({
vol.Optional('intents'): vol.Schema({
cv.string: vol.All(cv.ensure_list, [cv.string])
})
})}, extra=vol.ALLOW_EXTRA)
@core.callback
@bind_hass
def async_register(hass, intent_type, utterances):
"""Register utterances and any custom intents.
Registrations don't require conversations to be loaded. They will become
active once the conversation component is loaded.
"""
intents = hass.data.get(DOMAIN)
if intents is None:
intents = hass.data[DOMAIN] = {}
conf = intents.get(intent_type)
if conf is None:
conf = intents[intent_type] = []
for utterance in utterances:
if isinstance(utterance, REGEX_TYPE):
conf.append(utterance)
else:
conf.append(_create_matcher(utterance))
async def async_setup(hass, config):
"""Register the process service."""
config = config.get(DOMAIN, {})
intents = hass.data.get(DOMAIN)
if intents is None:
intents = hass.data[DOMAIN] = {}
for intent_type, utterances in config.get('intents', {}).items():
conf = intents.get(intent_type)
if conf is None:
conf = intents[intent_type] = []
conf.extend(_create_matcher(utterance) for utterance in utterances)
async def process(service):
"""Parse text into commands."""
text = service.data[ATTR_TEXT]
_LOGGER.debug('Processing: <%s>', text)
try:
await _process(hass, text)
except intent.IntentHandleError as err:
_LOGGER.error('Error processing %s: %s', text, err)
hass.services.async_register(
DOMAIN, SERVICE_PROCESS, process, schema=SERVICE_PROCESS_SCHEMA)
hass.http.register_view(ConversationProcessView)
# We strip trailing 's' from name because our state matcher will fail
# if a letter is not there. By removing 's' we can match singular and
# plural names.
async_register(hass, intent.INTENT_TURN_ON, [
'Turn [the] [a] {name}[s] on',
'Turn on [the] [a] [an] {name}[s]',
])
async_register(hass, intent.INTENT_TURN_OFF, [
'Turn [the] [a] [an] {name}[s] off',
'Turn off [the] [a] [an] {name}[s]',
])
async_register(hass, intent.INTENT_TOGGLE, [
'Toggle [the] [a] [an] {name}[s]',
'[the] [a] [an] {name}[s] toggle',
])
@callback
def register_utterances(component):
"""Register utterances for a component."""
if component not in UTTERANCES:
return
for intent_type, sentences in UTTERANCES[component].items():
async_register(hass, intent_type, sentences)
@callback
def component_loaded(event):
"""Handle a new component loaded."""
register_utterances(event.data[ATTR_COMPONENT])
hass.bus.async_listen(EVENT_COMPONENT_LOADED, component_loaded)
# Check already loaded components.
for component in hass.config.components:
register_utterances(component)
return True
def _create_matcher(utterance):
"""Create a regex that matches the utterance."""
# Split utterance into parts that are type: NORMAL, GROUP or OPTIONAL
# Pattern matches (GROUP|OPTIONAL): Change light to [the color] {name}
parts = re.split(r'({\w+}|\[[\w\s]+\] *)', utterance)
# Pattern to extract name from GROUP part. Matches {name}
group_matcher = re.compile(r'{(\w+)}')
# Pattern to extract text from OPTIONAL part. Matches [the color]
optional_matcher = re.compile(r'\[([\w ]+)\] *')
pattern = ['^']
for part in parts:
group_match = group_matcher.match(part)
optional_match = optional_matcher.match(part)
# Normal part
if group_match is None and optional_match is None:
pattern.append(part)
continue
# Group part
if group_match is not None:
pattern.append(
r'(?P<{}>[\w ]+?)\s*'.format(group_match.groups()[0]))
# Optional part
elif optional_match is not None:
pattern.append(r'(?:{} *)?'.format(optional_match.groups()[0]))
pattern.append('$')
return re.compile(''.join(pattern), re.I)
async def _process(hass, text):
"""Process a line of text."""
intents = hass.data.get(DOMAIN, {})
for intent_type, matchers in intents.items():
for matcher in matchers:
match = matcher.match(text)
if not match:
continue
response = await hass.helpers.intent.async_handle(
DOMAIN, intent_type,
{key: {'value': value} for key, value
in match.groupdict().items()}, text)
return response
class ConversationProcessView(http.HomeAssistantView):
"""View to retrieve shopping list content."""
url = '/api/conversation/process'
name = "api:conversation:process"
@RequestDataValidator(vol.Schema({
vol.Required('text'): str,
}))
async def post(self, request, data):
"""Send a request for processing."""
from homeassistant.components import ais_ai_service as ais_ai_service
hass = request.app['hass']
try:
intent_result = await ais_ai_service._process(
hass, data['text'], None)
except intent.IntentHandleError as err:
intent_result = intent.IntentResponse()
intent_result.async_set_speech(str(err))
if intent_result is None:
intent_result = intent.IntentResponse()
intent_result.async_set_speech("Sorry, I didn't understand that")
return self.json(intent_result)
|
#Import packages
import pandas as pd
import numpy as np
from SyntheticControlMethods import Synth, DiffSynth
#Import data
data_dir = "https://raw.githubusercontent.com/OscarEngelbrektson/SyntheticControlMethods/master/examples/datasets/"
df = pd.read_csv(data_dir + "smoking_data" + ".csv")
#Fit Differenced Synthetic Control
sc = Synth(df, "cigsale", "state", "year", 1989, "California", n_optim=10, pen="auto")
print(sc.original_data.weight_df)
print(sc.original_data.comparison_df)
print(sc.original_data.pen)
#Visualize
sc.plot(["original", "pointwise", "cumulative"], treated_label="California",
synth_label="Synthetic California", treatment_label="Proposal 99")
#In-time placebo
#Placebo treatment period is 1982, 8 years earlier
sc.in_time_placebo(1982)
#Visualize
sc.plot(['in-time placebo'],
treated_label="California",
synth_label="Synthetic California")
#Compute in-space placebos
sc.in_space_placebo(1)
sc.original_data.rmspe_df.to_csv("rmspe_df.csv")
#Visualize
sc.plot(['rmspe ratio'], treated_label="California",
synth_label="Synthetic California")
sc.plot(['in-space placebo'], in_space_exclusion_multiple=5, treated_label="California",
synth_label="Synthetic California")
|
from itertools import combinations, combinations_with_replacement
T2 = lambda n: (n * (n+1)) / 2
T3 = lambda n: (n * (n+1) * (n+2)) / 6
mappings = {'2i': lambda D, i, j: T2(D-1) - (T2(D-i-1) - (j-i-1)),
'2p': lambda D, i, j: T2(D) - (T2(D-i) - (j-i)),
'3i': lambda D, i, j, k: T3(D-2) - (T3(D-i-3) + T2(D-j-1) - (k-j-1)),
'3p': lambda D, i, j, k: T3(D) - (T3(D-i-1) + T2(D-j) - (k-j))}
def DoIt(D, degree, kind):
assert kind in 'ip'
assert degree in (2, 3)
code = '%s%s' % (degree, kind)
mapping = mappings[code]
inds = range(D)
iter_funct = combinations if kind is 'i' else combinations_with_replacement
expected = 0
for combo in iter_funct(inds, degree):
output_ind = mapping(D, *combo)
assert expected == output_ind
expected += 1
print expected
def DoAll():
for D in [5, 10, 20, 100]:
DoIt(D, 2, 'i')
DoIt(D, 2, 'p')
DoIt(D, 3, 'i')
DoIt(D, 3, 'p')
if __name__ == '__main__':
DoAll()
|
from django.apps import AppConfig
class PersonsConfig(AppConfig):
name = 'danibraz.persons'
verbose_name = 'Pessoas'
|
from piestats.web.player_names import remove_redundant_player_names
def test_remove_redundant_player_names():
assert remove_redundant_player_names(['foobar']) == ['foobar']
assert remove_redundant_player_names(['foobar', 'foofoo']) == ['foobar', 'foofoo']
assert remove_redundant_player_names(['foobar', 'Major']) == ['foobar', 'Major']
assert remove_redundant_player_names(['Major', 'Major(1)']) == ['Major']
assert remove_redundant_player_names(['Major', 'Major(1)', 'Major(2)']) == ['Major']
assert remove_redundant_player_names(['Major', 'Major(1)', 'Major(2)', 'Major(2)']) == ['Major']
assert remove_redundant_player_names(['Major', 'Major(1)', 'Major(2)', 'Major(3)', 'John(5)']) == ['Major', 'John(5)']
|
from typing import List
class Solution:
def numPairsDivisibleBy60(self, time: List[int]) -> int:
result = 0
count = [0] * 60
n = len(time)
for i in range(n):
pos = time[i] % 60
result += count[(60 - pos) % 60]
count[pos] += 1
return result
s = Solution()
print(s.numPairsDivisibleBy60([30, 20, 150, 100, 40]))
print(s.numPairsDivisibleBy60([60, 60, 60]))
|
import random
import threading
def calculate_average() -> None:
numbers = [random.randint(0, 100) for _ in range(10)]
average = sum(numbers) / len(numbers)
print(f"Wynik mojego działania to: {average}")
def run_example() -> None:
for _ in range(10):
threading.Thread(target=calculate_average).start()
if __name__ == "__main__":
run_example()
|
from imouto.web import RequestHandler, Application
class Redirect_1(RequestHandler):
async def get(self):
self.redirect('/2')
class Redirect_2(RequestHandler):
async def get(self):
self.write('redirect successful')
app = Application([
(r'/1', Redirect_1),
(r'/2', Redirect_2)
])
app.run()
|
#coding:utf-8
from django.contrib import admin
from models import Comment
# Register your models here.
class CommentAdmin(admin.ModelAdmin):
list_display = ("block", "article", "comment", "owner", "status", "create_time", "update_time")
search_fields = ("content",)
list_filter = ("block", )
admin.site.register(Comment, CommentAdmin)
|
class DriverConfigurationError(AttributeError):
pass
class UnsupportedImageProduct(KeyError):
pass
|
import urllib.request as urllib2
from urllib.parse import quote_plus
from bs4 import BeautifulSoup
from models.SplitTextManager import CharSplit
from models.Common import Common
import re
# Unoficial dict.cc client
class Dict(object):
@classmethod
def Check(cls, word, from_language = 'de', to_language = 'en'):
response_body = cls.GetResponse(word, from_language, to_language)
result = cls.ParseResponse(response_body)
return result
@classmethod
def GetResponse(cls, word, from_language, to_language):
subdomain = from_language.lower()+to_language.lower()
url = f"https://{subdomain}.dict.cc/?s={quote_plus(word.encode('utf-8'))}"
req = urllib2.Request(
url,
None,
{'User-agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:30.0) Gecko/20100101 Firefox/30.0'}
)
res = urllib2.urlopen(req).read()
return res.decode("utf-8")
@classmethod
def ParseResponse(cls, response_body):
soup = BeautifulSoup(response_body, "html.parser")
verb = [tds.find_all("a") for tds in soup.find_all("tr", attrs={'title': "infinitive | preterite | pp"})]
noun = [tds.find_all("a") for tds in soup.find_all("tr", attrs={'title': "article sg | article pl"})]
if len(verb) > 0 and len(noun) > 0:
return ['noun', noun]
elif len(verb) > 0:
return ['verb', verb]
elif len(noun) > 0:
return ['noun', noun]
else:
return ['', '']
@classmethod
def CheckPredicates(cls, predicates, isVerbInfiniteForm=False, isNounPlural=False, isAccusative=False):
newPredicates = []
countVerb = 0
countNoun = 0
for pred in predicates:
dictAnswer = cls.Check(pred)
if dictAnswer[0] == 'verb':
dictVerb = dictAnswer[1][0][0].text if isVerbInfiniteForm else pred
newPredicates.append(dictVerb)
countVerb += 1
elif dictAnswer[0] == 'noun':
dictNoun = None
if len(dictAnswer[1]) > 1:
for answer in dictAnswer[1]:
if dictNoun is None:
dictNoun = answer
elif not dictNoun[0].text.startswith('der') and answer[0].text.startswith('der'):
dictNoun = answer
dictNoun = dictNoun[1].text.strip() if isNounPlural else dictNoun[0].text
else:
dictNoun = dictAnswer[1][0][1].text.strip() if isNounPlural else dictAnswer[1][0][0].text
dictNoun = re.sub(r'\[.*\]', '', dictNoun).strip()
dictNoun = dictNoun.replace('der', 'den') if 'der' in dictNoun and isAccusative else dictNoun
newPredicates.append(dictNoun)
countNoun += 1
else:
newPredicates.append(cls.CheckCompoundWords(pred, isNounPlural, isAccusative))
if countVerb == 0:
predVerb = 'haben' if isVerbInfiniteForm else 'hat'
newPredicates.insert(0, predVerb)
return newPredicates
@classmethod
def CheckType(cls, type, isNounPlural=False):
dictAnswer = cls.Check(type)
if dictAnswer[0] == 'noun':
dictNoun = dictAnswer[1][0][1].text.strip() if isNounPlural else dictAnswer[1][0][0].text
dictNoun = re.sub(r'\[.*\]', '', dictNoun).strip()
return dictNoun
else:
return cls.CheckCompoundWords(type, isNounPlural)
@classmethod
def CheckCompoundWords(cls, word, isPlural=False, isAccusative=False):
if any(x in word for x in Common.germanArticles): return word if not 'der' in word and isAccusative else word.replace('der', 'den')
stemWords = CharSplit.SplitCompoundWord(word)
if not isinstance(stemWords, list): return word
if len(stemWords) != 2: return word
dictAnswer = cls.Check(stemWords[1])
if dictAnswer[0] != 'noun': return word
dictNoun = dictAnswer[1][0][1].text.strip() if isPlural else dictAnswer[1][0][0].text
dictNoun = re.sub(r'\[.*\]', '', dictNoun).strip()
if not any(x in dictNoun for x in Common.germanArticles): return word
article, noun = dictNoun.split(' ', 1)
article = 'den' if article == 'der' and isAccusative else article
return article + ' ' + stemWords[0].title() + noun.lower()
|
class PMod:
"""
---
PMod算法
---
对不规整列表进行均匀采样的算法之一,对原表长度a和样本容量b进行取模等操作,
具体采样方法如下:
定义方法
+ M(a, b, flag):
+ + a / b = d ... r1
+ + a / d = a’ ... r2
+ + b’ = a’ - b
+ + if r1 == r2 : # 此时 b’恒等于0
+ + + END
+ + else:
+ + + M(a’, b’, -flag)
"""
def __init__(self, a: int, b: int):
"""Define 'a' and 'b'
Args:
a (int): length of source
b (int): samples
"""
self.a = a
self.b = b
@staticmethod
def _dvd(numerator: int, denominator: int) -> tuple:
"""整数的除法运算
Args:
numerator (int): 被除数
denominator (int): 除数
Returns:
tuple: (商, 余数)
"""
quotient = int(numerator/denominator)
remainder = numerator - quotient*denominator
return quotient, remainder
@staticmethod
def _mod(a: int, b: int) -> int:
"""求模
Args:
a (int): 被取模数
b (int): 取模数
Returns:
int: 模
"""
return a-b*int(a/b)
def _pm(self, a: int, b: int) -> tuple:
"""PMod算法主体
Args:
a (int): 母集长度
b (int): 子集长度
Returns:
tuple: distance, a', b', r1, r2
"""
d, r1 = self._dvd(a, b)
af, r2 = self._dvd(a, d)
bf = af-b
return d, af, bf, r1, r2
def _iter(self, s: list, flag: int, d: int) -> tuple:
"""迭代方法
Args:
s (list): 输入集
flag (int): flag
d (int): 距离
Returns:
tuple: 输出集, 新flag, 距离
"""
sret = []
flag = 1-flag
for i in range(len(s)):
if self._mod(i+1, d) == 0:
sret.append(flag)
else:
sret.append(s[i])
return sret, flag, d
def get_sample_flags(self) -> list:
"""获取样本标记表
Returns:
list: 标记表
"""
a = self.a
b = self.b
d = 1
flag = 0
s = [0]*a
while True:
dn, af, bf, _, _ = self._pm(a, b)
d = d*dn
print("--------")
print(f"a:{a},b:{b},d:{d},dn:{dn},af:{af},bf:{bf},flag:{flag}")
# print(f"s:{s}")
a = af
b = bf
s, flag, d = self._iter(s, flag, d)
# print(f"sn:{s}")
print("--------")
if bf == 0:
break
return s
def get_sample_by_list(self, ll: list) -> list:
"""取样方法
Args:
ll (list): 样本列表
Returns:
list: [description]
"""
return [li for li, g in zip(ll, self.get_sample_flags()) if g]
if __name__ == "__main__":
pm = PMod(10235, 2200)
rst = pm.get_sample_by_list([i for i in range(10235)])
print(rst)
|
# -*- coding: utf-8 -*-
# 快速排序
def quicksort(arr):
if len(arr) < 2: # 基线条件
return arr
else: # 递归条件
# 基准值
pivot = arr[0]
# 比基准值小的元素
less = [i for i in arr[1:] if i <= pivot]
# 比基准值大的元素
greater = [i for i in arr[1:] if i > pivot]
# 分别对子数组进行快速排序
return quicksort(less) + [pivot] + quicksort(greater)
def main():
arr = [6, 5, 8, 3, 9, 7, 4]
print(quicksort(arr))
if __name__ == "__main__":
main()
|
# richard -- video index system
# Copyright (C) 2012, 2013 richard contributors. See AUTHORS.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
from nose.tools import eq_
from richard.base.templatetags.batch import batch
from richard.base.templatetags.duration import seconds_to_hms
from richard.base.templatetags.duration import duration
from richard.base.templatetags.duration import duration_iso8601
class BatchFilterTest(unittest.TestCase):
def test_batch(self):
eq_(batch([], '2'), [])
eq_(batch([1, 2, 3, 4, 5], '2'), [[1, 2], [3, 4], [5]])
eq_(batch([1, 2, 3, 4, 5], '3'), [[1, 2, 3], [4, 5]])
def test_batch_edgecases(self):
eq_(batch([1, 2, 3, 4, 5], '0'), [])
eq_(batch([1, 2, 3, 4, 5], '1'), [1, 2, 3, 4, 5])
def test_padwith(self):
eq_(batch([1, 2, 3, 4, 5], '2,FOO'), [[1, 2], [3, 4], [5, 'FOO']])
class Seconds_to_hmsTest(unittest.TestCase):
def test_seconds(self):
eq_((0, 0, 15), seconds_to_hms(15))
eq_((0, 0, 1), seconds_to_hms(1))
def test_minutes(self):
eq_((0, 1, 1), seconds_to_hms(61))
eq_((0, 1, 0), seconds_to_hms(60))
eq_((0, 2, 0), seconds_to_hms(120))
def test_hours(self):
eq_((1, 0, 0), seconds_to_hms(3600))
eq_((1, 0, 2), seconds_to_hms(3602))
eq_((2, 0, 0), seconds_to_hms(7200))
eq_((2, 2, 0), seconds_to_hms(7320))
eq_((2, 2, 1), seconds_to_hms(7321))
eq_((2, 2, 2), seconds_to_hms(7322))
def test_days(self):
# NOTE: Represents times greater than one day as more than 24 hours
eq_((25, 0, 1), seconds_to_hms(90001))
class DurationFilterTest(unittest.TestCase):
def test_seconds(self):
eq_("00:15", duration('15'))
eq_("00:01", duration('1'))
def test_minutes(self):
eq_("01:01", duration('61'))
eq_("01:00", duration('60'))
eq_("02:00", duration('120'))
def test_hours(self):
eq_("01:00:00", duration('3600'))
eq_("01:00:02", duration('3602'))
eq_("02:00:00", duration('7200'))
eq_("02:02:00", duration('7320'))
eq_("02:02:01", duration('7321'))
eq_("02:02:02", duration('7322'))
def test_bad(self):
eq_('', duration(None))
class DurationISO8601FilterTest(unittest.TestCase):
def test_seconds(self):
eq_("PT00H00M00S", duration_iso8601(0))
eq_("PT00H00M01S", duration_iso8601(1))
def test_minutes(self):
eq_("PT00H01M01S", duration_iso8601(61))
eq_("PT00H01M00S", duration_iso8601(60))
eq_("PT00H02M00S", duration_iso8601(120))
def test_hours(self):
eq_("PT01H00M00S", duration_iso8601(3600))
eq_("PT01H00M02S", duration_iso8601(3602))
eq_("PT02H00M00S", duration_iso8601(7200))
eq_("PT02H02M00S", duration_iso8601(7320))
eq_("PT02H02M01S", duration_iso8601(7321))
eq_("PT02H02M02S", duration_iso8601(7322))
def test_bad(self):
eq_("PT00H00M00S", duration_iso8601(None))
|
from cs50 import get_string
from sys import argv
from sys import exit
if len(argv) == 2:
k = int(argv[1])
else:
print("Usage: python caesar.py k")
exit(1)
msg = get_string("plaintext: ")
print("ciphertext: ", end="")
for c in msg:
if c.isalpha():
if c.islower():
print(chr(((ord(c) + k - 97) % 26) + 97), end="")
elif c.isupper():
print(chr(((ord(c) + k - 65) % 26) + 65), end="")
else:
print(c, end="")
print("")
|
import OLink
from inputLink import inputLink
class RectDia:
def __init__(self,tab):
self.points=[]
for x in tab:
self.points.append(self.point(x[0],x[1],0,0))
self.isOriented=0
class point:############ NOT immutable
def __init__(self,x,y,isO,ori):
self.x=x
self.y=y
if isO: self.ori=ori
def castle(self,i,d,n):
if d==0:
if self.x==i: self.x+=1
else:
self.x-=1
if d==1:
if self.y==i: self.y+=1
else:
self.y-=1
self.x%=n
self.y%=n
def __inv(self,b):
if b==1:
(p.x,p.y)=(p.y,p.x)
def copy(self):
r=RectDia([])
for p in self.points:
r.points.append(self.point(p.x,p.y,0,0))
return r
def getSize(self):
return len(self.points)/2
def isCorrectNonOri(self):
n=self.getSize()
if len(self.points)%2==1:return 0
x=[0]*n
y=[0]*n
for p in self.points:
if p.x<0 or p.y<0 : return 0
x[p.x]+=1
y[p.y]+=1
for i in range(n):
if x[i]!=2 or y[i]!=2: return 0
return 1
def orderPoints(self,direction):
f=lambda p,q: cmp(p.x,q.x)
if direction:f=lambda p,q: cmp(p.y,q.y)
self.points.sort(f)
def ____compact(self,l,d):
for p in self.points:
if d==0 and p.x>l:p.x-=1
if d==1 and p.y>l:p.y-=1
def __compact(self):
n=100
x=[0]*n
y=[0]*n
for p in self.points:
x[p.x]+=1
y[p.y]+=1
for i in range(n):
if x[n-1-i]==0: self.____compact(n-1-i,0)
if y[n-1-i]==0: self.____compact(n-1-i,1)
def __has(self,x,y):
for p in self.points:
if p.x==x and p.y==y:
return 1
return 0
def __del(self,x,y):
for p in self.points:
if p.x==x and p.y==y:
self.points.remove(p)
if self.isOriented: return p.ori
############################# The moves #############################3
def __unlinked(self,i,j,k,l):
if i==j or i==k or i==l or j==k or j==l or k==l: return 0
if j<i: (i,j)=(j,i)
if l<k: (k,l)=(l,k)
if (i<k<j and j<l) or (i<l<j and k<i): return 0
return 1
def m_cycle(self,dx,dy):
n=self.getSize()
for p in self.points:
p.x+=dx
p.y+=dy
if p.x>=n: p.x-=n
if p.y>=n: p.y-=n
def is_castling(self,i,direction):
n=self.getSize()
self.orderPoints(direction)
p1=self.points[2*i]
p2=self.points[2*i+1]
if i!=n-1:
q1=self.points[2*i+2]
q2=self.points[2*i+3]
else:
q1=self.points[0]
q2=self.points[1]
d=1-direction
if direction and self.__unlinked(p1.x,p2.x,q1.x,q2.x): return 1
if d and self.__unlinked(p1.y,p2.y,q1.y,q2.y): return 1
return 0
def m_castling(self,i,direction):## if impossible throws exception
n=self.getSize()
self.orderPoints(direction)
p1=self.points[2*i]
p2=self.points[2*i+1]
if i!=n-1:
q1=self.points[2*i+2]
q2=self.points[2*i+3]
else:
q1=self.points[0]
q2=self.points[1]
d=1-direction
if direction and not self.__unlinked(p1.x,p2.x,q1.x,q2.x): raise Errors
if d and not self.__unlinked(p1.y,p2.y,q1.y,q2.y): raise Errors
p1.castle(i,direction,n)
p2.castle(i,direction,n)
q1.castle(i,direction,n)
q2.castle(i,direction,n)
def is_stabilisation(self):
for p in self.points:
if p.x==p.y and self.getSize()-1==p.x: return 1
return 0
def m_stabilisation(self,kind):
if is_stabilisation(kind)==0:raise errors
n=self.getSize()-1
ori=0
if kind==0:
for p in self.points:
if p.x==p.y and p.x==n:
if self.isOriented: ori=p.ori
self.points.remove(p)
break
self.points+=[self.point(n,n+1,self.isOriented,ori),self.point(n+1,n,self.isOriented,ori),self.point(n+1,n+1,self.isOriented,1-ori)]
if kind==1 or kind==2:
i=-1
for p in self.points:
if kind==2 and p.x==n and p.y!=n or kind==1 and p.y==n and p.x!=n:
if kind==2: i=p.y
else: i=p.x
if self.isOriented: ori=p.ori
self.points.remove(p)
break
self.points+=[self.point(i,n+1,self.isOriented,ori).__inv(kind-1)]
self.points+=[self.point(n+1,n+1,self.isOriented,ori).__inv(kind-1)]
self.points+=[self.point(n+1,n,self.isOriented,1-ori).__inv(kind-1)]
if kind==3:
i=-1
j=-1
for p in self.points:
if self.isOriented and p.x==n and p.y!=n:
p.ori=1-p.ori
if p.x==n and p.y!=n:
i=p.y
if self.isOriented: ori=p.ori
self.points.remove(p)
if p.y==n and p.x!=n:
j=p.x
self.points.remove(p)
self.points+=[self.point(i,n+1,self.isOriented,ori)]
self.points+=[self.point(n+1,j,self.isOriented,ori)]
self.points+=[self.point(n,n+1,self.isOriented,1-ori)]
self.points+=[self.point(n+1,n,self.isOriented,1-ori)]
def is_destabilisation(self):
n=self.getSize()-1
nn=self.__has(n,n)
mn=self.__has(n-1,n)
nm=self.__has(n,n-1)
mm=self.__has(n-1,n-1)
if mn and nm and nn and (not mm): return 0
if mm and mn and nn and (not nm): return 1
if mm and nm and nn and (not mn): return 2
if mm and mn and nm and (not nn): return 3
return -1
def m_destabilisation(self,kind):##use following is_desta
n=self.getSize()-1
if kind==0:
self.__del(n,n)
self.__del(n-1,n)
ori=self.__del(n,n-1)
self.points+=[self.point(n-1,n-1,self.isOriented,ori)]
if kind==1:
self.__del(n,n)
self.__del(n-1,n)
if kind==2:
self.__del(n,n)
self.__del(n,n-1)
if kind==3:
self.__del(n,n-1)
self.__del(n-1,n)
if self.isOriented:
for p in self.points:
if p.x==n and p.y==n:
p.ori=1-p.ori
for p in self.points:
if p.x==n:
p.x-=1
if p.y==n:
p.y-=1
################### a "perfect" hash function
def hashInt(self):
n=self.getSize()
res=0
self.orderPoints(1)
self.orderPoints(0)
for i in range(2*n):
res*=n
res+=self.points[i].y
return (res*2+1)*pow(2,n)
#######################building lists of successors by the moves
def succCy(self):
succ=[]
for i in range(self.getSize()):
for j in range(self.getSize()):
tmp=self.copy()
tmp.m_cycle(i,j)
succ.append(tmp)
return succ
def succCa(self):
succ=[]
for d in range(2):
for i in range(self.getSize()):
tmp=self.copy()
if tmp.is_castling(i,d) :
tmp.m_castling(i,d)
succ.append(tmp)
return succ
def succDe(self):
succ=[]
tmpp=tmp.is_destabilisation()
if tmpp!=-1:
tmp=self.copy()
tmp.m_destabilisation(tmpp)
succ.append(tmp)
return succ
def succTrDe(self):
succ=[]
n=self.getSize()
for k in self.points:
tmp=self.copy()
tmp.m_cycle(n-1-k.x,n-1-k.y)
s=tmp.is_destabilisation()
if s!=-1:
tmp.m_destabilisation(s)
succ.append(tmp)
return succ
def succSt(self):
succ=[]
if tmp.is_stabilisation():
for d in range(4):
tmp=self.copy()
tmp.m_stabilisation(d)
succ.append(tmp)
return succ
def successors(self):
return self.succCy()+self.succCa()+self.succDe()+self.succSt()
##Flipe-move part##################################
def __fetch(self,x,y,dx,dy):
l=[]
for p in self.points:
if p.x>=x and p.y>=y and p.x<x+dx and p.y<y+dy:
l.append(p)
return l
def __hasNoneDiag(self,x,y,s):
for p in self.points:
if p.x>=x and p.y>=y and p.x<x+s and p.y<y+s and p.x-x!=p.y-y:
return 0
return 1
def __hasFullDiag(self,x,y,s):
n=self.getSize()
d=[-1]*min(min(n-x,n-y),s)
for p in self.points:
if p.x>=x and p.y>=y and p.x<x+s and p.y<y+s:
if p.x-x==p.y-y: d[p.x-x]=1
else: return 0
if d.count(-1)==0: return 1
else: return 0
## def __count(self,l,x,d)
## tot=0
## for p in l:
## if (p.x==x and d==0) or (p.y==y and d==1):tot+=1
## return tot
def is_flipe(self,a,b):
if self.__hasFullDiag(0,b,a) and self.__hasFullDiag(0,a,b):
if len(self.__fetch(a,b,b,a))==0:
return 1
return 0
def m_flipe(self,a,b):
flipped=self.__fetch(0,0,a,b)
dx=[0]*self.getSize()
dy=[0]*self.getSize()
for p in flipped:
dx[p.x]+=1
dy[p.y]+=1
aList=self.__fetch(0,b,a,a)
bList=self.__fetch(a,0,b,b)
aListX=[p.x for p in aList]
bListY=[p.y for p in bList]
for p in flipped:
if dx[p.x]!=2 and aListX.count(p.x)==0:
self.points.append(self.point(p.x,b+p.x,0,0))
else:
self.__del(p.x,b+p.x)
if dy[p.y]!=2 and bListY.count(p.y)==0:
self.points.append(self.point(a+p.y,p.y,0,0))
else:
self.__del(a+p.y,p.y)
(p.x,p.y)=(a+p.y,b+p.x)
## print [p.x for p in self.points]
## print [p.y for p in self.points]
self.__compact()
def __rotate(self):
n=self.getSize()
for p in self.points:
(p.x,p.y)=(n-1-p.y,p.x)
def succfl(self):
succ=[]
n=self.getSize()
for r in range(4):
for i in range(n):
for j in range(n):
for a in range(1,n+1):###########3too small!!
for b in range(1,n+1):
tmp=self.copy()
tmp.m_cycle(i,j)
if tmp.is_flipe(a,b):
## print (i,j,a,b)
tmp.m_flipe(a,b)
succ.append(tmp)
## tmp.draw()
self.__rotate()
return succ
#######################################################
def draw(self):
import Tkinter
import visu2
root=Tkinter.Tk()
fig=Tkinter.Canvas(root,width=800,height=800)
fig.pack()
visu2.drawRectDia(self,fig)
root.mainloop()
#############from Olink#######################################
def fromOlink(self,link):
tangle=link.word
print tangle
points=[]
section=[-1,1]
forbidden=[-1,1]
def findFree(s,e):
x=(s+e)/2.0
while(1):
x=(s+x)/2.0
if forbidden.count(x)==0:
return x
levelCounter=0
for level in tangle:
if level[0]==3:
points.append((section[level[1]+1],levelCounter))
points.append((section[level[1]+2],levelCounter))
section[level[1]+1:level[1]+3]=[]
if level[0]==2:
tmp1=findFree(section[level[1]],section[level[1]+1])
tmp2=findFree(tmp1,section[level[1]+1])
points.append((tmp1,levelCounter))
points.append((tmp2,levelCounter))
forbidden+=[tmp1,tmp2]
section[level[1]+1:level[1]+1]=[tmp1,tmp2]
if level[0]==0:
tmp1=findFree(section[level[1]+2],section[level[1]+3])
points.append((tmp1,levelCounter))
points.append((section[level[1]+1],levelCounter))
forbidden+=[tmp1]
section[level[1]+3:level[1]+3]=[tmp1]
section[level[1]+1:level[1]+2]=[]
if level[0]==1:
tmp1=findFree(section[level[1]],section[level[1]+1])
points.append((tmp1,levelCounter))
points.append((section[level[1]+2],levelCounter))
forbidden+=[tmp1]
section[level[1]+2:level[1]+3]=[]
section[level[1]+1:level[1]+1]=[tmp1]
levelCounter+=1
forbidden.sort()
print points
self.points=[]
for x in range(len(forbidden)):
for p in points:
if(p[0]==forbidden[x]):
self.points.append(self.point(x-1,p[1],0,0))
def toString(self):
s=''
self.orderPoints(0)
self.orderPoints(1)
for c in range(len(self.points)/2):
s+='.'*self.points[2*c].x+'o'+'-'*(self.points[2*c+1].x-self.points[2*c].x-1)+'o'
s+="""
"""
return s
def toStringNice(self):
s=''
self.orderPoints(0)
self.orderPoints(1)
n=len(self.points)/2
tab=[[" "]*n for i in range(n)]
for i in range(2*n):
tab[self.points[i].y][self.points[i].x]="o"
for i in range(n):
for j in range(self.points[2*i].x+1,self.points[2*i+1].x):
tab[i][j]="-"
self.orderPoints(0)
for i in range(n):
for j in range(self.points[2*i].y+1,self.points[2*i+1].y):
if tab[j][i]=="-": tab[j][i]="+"
else:tab[j][i]="|"
## print tab
for i in range(n):
for j in range(n):
s+=tab[i][j]
s+="""
"""
return s
#############################################################
if __name__ == "__main__":
dd=RectDia([(0,0),(0,3),(1,1),(1,2),(2,2),(2,3),(3,0),(3,1)])
## dd=RectDia([(0,0),(0,1),(1,0),(1,2),(2,2),(2,1)])
## dd=RectDia([(1,0),(0,1),(2,0),(0,2),(1,2),(2,1)])
dd=RectDia([(2,0),(1,1),(0,2),(0,4),(1,3),(2,2),(3,1),(4,0),(4,3),(3,4)])
print dd.toStringNice()
## dd=RectDia([])
##
## f=inputLink()
#### f=OLink.OLink(f,0)
## print 1
## dd.fromOlink(f)
## print 2
## dd.draw()
## print 3
## print dd.toString()
#### t=dd.succfl()
#### for x in t:
#### x.draw()
##
##
|
from django.test import TestCase
from django.http import QueryDict
from maintenance import views
from django.contrib.auth.models import User
from django.test import Client
import user_variables as uv
import requests, json
from dynatrace.requests.request_handler import no_ssl_verification
class ViewsTests(TestCase):
def test_mock_sample2(self):
mock_server_ex = 'https://localhost:1080/mockserver/expectation'
data = [{
"httpRequest" : {
"method" : "POST",
"path" : "/api/config/v1/maintenanceWindows",
"queryStringParameters" : {
"Api-Token" : [ "sample_api_token" ]
},
"body": {
"type": "JSON",
"json": {'name': 'MockServer2', 'description': 'MockServer', 'suppression': 'DETECT_PROBLEMS_AND_ALERT', 'schedule': {'recurrenceType': 'ONCE', 'start': '2019-01-15 23:00', 'end': '2019-01-15 23:04', 'zoneId': 'America/Toronto'}, 'type': 'PLANNED', 'scope': {'entities': [], 'matches': [{'type': 'OS', 'managementZoneId': 'null', 'tags': [{'context': 'CONTEXTLESS', 'key': 'Windows'}]}]}},
"matchType": "ONLY_MATCHIG_FIELDS"
}
},
"httpResponse" : {
"body" : {
"type": "JSON",
"json": {'id': '90916273-5320-410c-b7a0-5548711b52f1', 'name': 'MockServer2', 'description': 'MockServer'}
}
}
}]
headers = {"Content-Type": "application/json"}
with no_ssl_verification():
response = requests.put(mock_server_ex, data=json.dumps(data), verify=False, headers=headers)
print(response)
user = User.objects.create(username='testuser')
user.set_password('12345')
user.save()
c = Client()
c.login(username='testuser', password='12345')
response = c.post('/maintenance/submit_create', {'csrfmiddlewaretoken': ['qtotVIAInv38Qwrc2uwFAL5M4An06P8j1aBjscPwMpSnTUbBmYVMFYihe1lqzjC8'], 'cluster_name': ['Dynatrace_LIVE'], 'tenant_name': ['tenant1'], 'window_name': ['MockServer2'], 'window_planned': ['True'], 'window_description': ['MockServer'], 'window_recurrence': ['ONCE'], 'window_day_of_week': [''], 'window_day_of_month': ['1'], 'window_supression': ['DETECT_PROBLEMS_AND_ALERT'], 'window_start_time': [''], 'window_duration': [''], 'window_maintenance_start': ['2019-01-15 23:00'], 'window_maintenance_end': ['2019-01-15 23:04'], 'form-TOTAL_FORMS': ['1'], 'form-INITIAL_FORMS': ['0'], 'form-MIN_NUM_FORMS': ['0'], 'form-MAX_NUM_FORMS': ['10'], 'form-0-entity_type': ['OS'], 'form-0-tags_or_entities': ['TAGS'], 'form-0-filter_value': ['Windows'], 'form-__prefix__-entity_type': [''], 'form-__prefix__-tags_or_entities': [''], 'form-__prefix__-filter_value': ['']})
self.assertEquals(response.status_code, 200)
|
import sys
import utils
from parameters import *
from sagan_models import Generator, Discriminator
from tester import Tester
if __name__ == '__main__':
config = get_parameters()
config.command = 'python ' + ' '.join(sys.argv)
print(config)
tester = Tester(config)
tester.test()
|
from datetime import datetime
from api import s3util
from api.utils import get_ext, random_id
from fileupload.models import PODFile
from team.models import ManualBooking, LrNumber
MIMEANY = '*/*'
MIMEJSON = 'application/json'
MIMETEXT = 'text/plain'
def response_mimetype(request):
"""response_mimetype -- Return a proper response mimetype, accordingly to
what the client accepts, as available in the `HTTP_ACCEPT` header.
request -- a HttpRequest instance.
"""
can_json = MIMEJSON in request.META['HTTP_ACCEPT']
can_json |= MIMEANY in request.META['HTTP_ACCEPT']
return MIMEJSON if can_json else MIMETEXT
def get_new_serial(model, **kwargs):
retry = 0
while True:
if retry > 8:
raise AssertionError('Max retry reached, something is not right')
serial = random_id(num_digits=8)
exists = model.objects.filter(serial=serial, **kwargs).exists()
if not exists:
return serial
retry += 1
def create_pod_file(lr_number, upload_file, user, booking):
orig_filename = upload_file.name
if isinstance(booking, ManualBooking):
serial = get_new_serial(PODFile, booking=booking)
new_filename = 'POD-%s-%s.%s' % (booking.booking_id, serial, get_ext(orig_filename))
mb = ManualBooking.objects.get(booking_id=booking.booking_id)
mb.pod_status = 'unverified'
mb.pod_date = datetime.now()
mb.save()
elif isinstance(lr_number, LrNumber):
serial = get_new_serial(PODFile, lr_number=lr_number)
new_filename = 'POD-%s-%s.%s' % (lr_number.lr_number, serial, get_ext(orig_filename))
mb = ManualBooking.objects.get(booking_id=lr_number.booking.booking_id)
mb.pod_status = 'unverified'
mb.pod_date = datetime.now()
mb.save()
else:
serial = random_id(num_digits=8)
new_filename = 'POD-%s-%s.%s' % (serial, serial, get_ext(orig_filename))
s3_upload = s3util.save_to_s3_uploads_pod(new_filename, upload_file)
pod_file = PODFile.objects.create(
lr_number=lr_number if isinstance(lr_number, LrNumber) else None,
serial=serial,
s3_upload=s3_upload,
uploaded_by=user,
booking=booking if isinstance(booking, ManualBooking) else None
)
return pod_file
|
import unittest
import grpc
import example_python.app_02_grpc.proto.greeter_pb2 as greeter_pb2
import example_python.app_02_grpc.proto.greeter_pb2_grpc as greeter_pb2_grpc
from .main import GreeterServer
class GreeterTest(unittest.TestCase):
server = None
client = None
@classmethod
def setUpClass(cls):
cls.server = GreeterServer()
cls.server.run(50051)
cls.client = greeter_pb2_grpc.GreeterStub(grpc.insecure_channel('localhost:50051'))
@classmethod
def tearDownClass(cls):
cls.server.stop()
def test_hello(self):
response = self.client.SayHello(greeter_pb2.HelloRequest(name='Dr. Neil'))
self.assertEqual(response.message, 'Hello, Dr. Neil!!')
|
from rest_framework import serializers
from core.models import EventType
class EventTypeSerializer(serializers.ModelSerializer):
"""
Сериализатор Типа события
"""
class Meta:
model = EventType
fields = ('id', 'name', 'description')
class EventTypeChoiceSerializer(serializers.ModelSerializer):
"""
Сериализатор Типа события
"""
class Meta:
model = EventType
fields = ('id', 'name')
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# $Id$
import ctypes
import heapq
import threading
import time
import queue
import atexit
from . import lib_openal as al
from . import lib_alc as alc
from pyglet.media import MediaException, MediaEvent, AbstractAudioPlayer, \
AbstractAudioDriver, AbstractListener, MediaThread
import pyglet
_debug = pyglet.options['debug_media']
_debug_buffers = pyglet.options.get('debug_media_buffers', False)
class OpenALException(MediaException):
pass
# TODO move functions into context/driver?
def _split_nul_strings(s):
# NUL-separated list of strings, double-NUL-terminated.
nul = False
i = 0
while True:
if s[i] == '\0':
if nul:
break
else:
nul = True
else:
nul = False
i += 1
s = s[:i - 1]
return [_f for _f in [ss.strip() for ss in s.split('\0')] if _f]
format_map = {
(1, 8): al.AL_FORMAT_MONO8,
(1, 16): al.AL_FORMAT_MONO16,
(2, 8): al.AL_FORMAT_STEREO8,
(2, 16): al.AL_FORMAT_STEREO16,
}
class OpenALWorker(MediaThread):
# Minimum size to bother refilling (bytes)
_min_write_size = 512
# Time to wait if there are players, but they're all full.
_nap_time = 0.05
# Time to wait if there are no players.
_sleep_time = None
def __init__(self):
super(OpenALWorker, self).__init__()
self.players = set()
def run(self):
while True:
# This is a big lock, but ensures a player is not deleted while
# we're processing it -- this saves on extra checks in the
# player's methods that would otherwise have to check that it's
# still alive.
self.condition.acquire()
if self.stopped:
self.condition.release()
break
sleep_time = -1
# Refill player with least write_size
if self.players:
player = None
write_size = 0
for p in self.players:
s = p.get_write_size()
if s > write_size:
player = p
write_size = s
if write_size > self._min_write_size:
player.refill(write_size)
else:
sleep_time = self._nap_time
else:
sleep_time = self._sleep_time
self.condition.release()
if sleep_time != -1:
self.sleep(sleep_time)
else:
# We MUST sleep, or we will starve pyglet's main loop. It
# also looks like if we don't sleep enough, we'll starve out
# various updates that stop us from properly removing players
# that should be removed.
time.sleep(self._nap_time)
def add(self, player):
self.condition.acquire()
self.players.add(player)
self.condition.notify()
self.condition.release()
def remove(self, player):
self.condition.acquire()
if player in self.players:
self.players.remove(player)
self.condition.notify()
self.condition.release()
class OpenALBufferPool(object):
"""At least Mac OS X doesn't free buffers when a source is deleted; it just
detaches them from the source. So keep our own recycled queue.
"""
def __init__(self):
self._buffers = [] # list of free buffer names
self._sources = {} # { sourceId : [ buffer names used ] }
def getBuffer(self, alSource):
"""Convenience for returning one buffer name"""
return self.getBuffers(alSource, 1)[0]
def getBuffers(self, alSource, i):
"""Returns an array containing i buffer names. The returned list must
not be modified in any way, and may get changed by subsequent calls to
getBuffers.
"""
assert context.lock.locked()
buffs = []
try:
while i > 0:
b = self._buffers.pop()
if not al.alIsBuffer(b):
# Protect against implementations that DO free buffers
# when they delete a source - carry on.
if _debug_buffers:
print("Found a bad buffer")
continue
buffs.append(b)
i -= 1
except IndexError:
while i > 0:
buffer_name = al.ALuint()
al.alGenBuffers(1, buffer_name)
if _debug_buffers:
error = al.alGetError()
if error != 0:
print(("GEN BUFFERS: " + str(error)))
buffs.append(buffer_name)
i -= 1
alSourceVal = alSource.value
if alSourceVal not in self._sources:
self._sources[alSourceVal] = buffs
else:
self._sources[alSourceVal].extend(buffs)
return buffs
def deleteSource(self, alSource):
"""Delete a source pointer (self._al_source) and free its buffers"""
assert context.lock.locked()
if alSource.value in self._sources:
for buffer in self._sources.pop(alSource.value):
self._buffers.append(buffer)
def dequeueBuffer(self, alSource, buffer):
"""A buffer has finished playing, free it."""
assert context.lock.locked()
sourceBuffs = self._sources[alSource.value]
for i, b in enumerate(sourceBuffs):
if buffer == b.value:
self._buffers.append(sourceBuffs.pop(i))
break
else:
# If no such buffer exists, should not happen anyway.
if _debug_buffers:
print(("Bad buffer: " + str(buffer)))
def delete(self):
"""Delete all sources and free all buffers"""
assert context.lock.locked()
for source, buffers in list(self._sources.items()):
al.alDeleteSources(1, ctypes.byref(ctypes.c_uint(source)))
for b in buffers:
if not al.alIsBuffer(b):
# Protect against implementations that DO free buffers
# when they delete a source - carry on.
if _debug_buffers:
print("Found a bad buffer")
continue
al.alDeleteBuffers(1, ctypes.byref(b))
for b in self._buffers:
al.alDeleteBuffers(1, ctypes.byref(b))
self._buffers = []
self._sources = {}
bufferPool = OpenALBufferPool()
class OpenALAudioPlayer(AbstractAudioPlayer):
#: Minimum size of an OpenAL buffer worth bothering with, in bytes
_min_buffer_size = 512
#: Aggregate (desired) buffer size, in bytes
_ideal_buffer_size = 44800
def __init__(self, source_group, player):
super(OpenALAudioPlayer, self).__init__(source_group, player)
audio_format = source_group.audio_format
try:
self._al_format = format_map[(audio_format.channels,
audio_format.sample_size)]
except KeyError:
raise OpenALException('Unsupported audio format.')
self._al_source = al.ALuint()
al.alGenSources(1, self._al_source)
# Lock policy: lock all instance vars (except constants). (AL calls
# are locked on context).
self._lock = threading.RLock()
# Cursor positions, like DSound and Pulse drivers, refer to a
# hypothetical infinite-length buffer. Cursor units are in bytes.
# Cursor position of current (head) AL buffer
self._buffer_cursor = 0
# Estimated playback cursor position (last seen)
self._play_cursor = 0
# Cursor position of end of queued AL buffer.
self._write_cursor = 0
# List of currently queued buffer sizes (in bytes)
self._buffer_sizes = []
# List of currently queued buffer timestamps
self._buffer_timestamps = []
# Timestamp at end of last written buffer (timestamp to return in case
# of underrun)
self._underrun_timestamp = None
# List of (cursor, MediaEvent)
self._events = []
# Desired play state (True even if stopped due to underrun)
self._playing = False
# Has source group EOS been seen (and hence, event added to queue)?
self._eos = False
# OpenAL 1.0 timestamp interpolation: system time of current buffer
# playback (best guess)
if not context.have_1_1:
self._buffer_system_time = time.time()
self.refill(self._ideal_buffer_size)
def __del__(self):
try:
self.delete()
except:
pass
def delete(self):
if _debug:
print('OpenALAudioPlayer.delete()')
if not self._al_source:
return
context.worker.remove(self)
with self._lock:
with context.lock:
al.alDeleteSources(1, self._al_source)
bufferPool.deleteSource(self._al_source)
if _debug_buffers:
error = al.alGetError()
if error != 0:
print(("DELETE ERROR: " + str(error)))
self._al_source = None
def play(self):
if self._playing:
return
if _debug:
print('OpenALAudioPlayer.play()')
self._playing = True
self._al_play()
if not context.have_1_1:
self._buffer_system_time = time.time()
context.worker.add(self)
def _al_play(self):
if _debug:
print('OpenALAudioPlayer._al_play()')
with context.lock:
state = al.ALint()
al.alGetSourcei(self._al_source, al.AL_SOURCE_STATE, state)
if state.value != al.AL_PLAYING:
al.alSourcePlay(self._al_source)
def stop(self):
if not self._playing:
return
if _debug:
print('OpenALAudioPlayer.stop()')
self._pause_timestamp = self.get_time()
with context.lock:
al.alSourcePause(self._al_source)
self._playing = False
context.worker.remove(self)
def clear(self):
if _debug:
print('OpenALAudioPlayer.clear()')
with self._lock:
with context.lock:
al.alSourceStop(self._al_source)
self._playing = False
del self._events[:]
self._underrun_timestamp = None
self._buffer_timestamps = [None for _ in self._buffer_timestamps]
def _update_play_cursor(self):
if not self._al_source:
return
with self._lock:
with context.lock:
# Release spent buffers
processed = al.ALint()
al.alGetSourcei(self._al_source, al.AL_BUFFERS_PROCESSED, processed)
processed = processed.value
if _debug_buffers:
print(("Processed buffer count:", processed))
if processed:
buffers = (al.ALuint * processed)()
al.alSourceUnqueueBuffers(self._al_source, len(buffers), buffers)
error = al.alGetError()
if error != 0:
if _debug_buffers:
print(("Source unqueue error: " + str(error)))
else:
for b in buffers:
bufferPool.dequeueBuffer(self._al_source, b)
if processed:
if (len(self._buffer_timestamps) == processed
and self._buffer_timestamps[-1] is not None):
# Underrun, take note of timestamp.
# We check that the timestamp is not None, because otherwise
# our source could have been cleared.
self._underrun_timestamp = \
self._buffer_timestamps[-1] + \
self._buffer_sizes[-1] / \
float(self.source_group.audio_format.bytes_per_second)
self._buffer_cursor += sum(self._buffer_sizes[:processed])
del self._buffer_sizes[:processed]
del self._buffer_timestamps[:processed]
if not context.have_1_1:
self._buffer_system_time = time.time()
# Update play cursor using buffer cursor + estimate into current
# buffer
if context.have_1_1:
byte_offset = al.ALint()
with context.lock:
al.alGetSourcei(self._al_source, al.AL_BYTE_OFFSET, byte_offset)
if _debug:
print('Current offset in bytes:', byte_offset.value)
self._play_cursor = self._buffer_cursor + byte_offset.value
else:
# Interpolate system time past buffer timestamp
self._play_cursor = \
self._buffer_cursor + int(
(time.time() - self._buffer_system_time) * \
self.source_group.audio_format.bytes_per_second)
# Process events
while self._events and self._events[0][0] < self._play_cursor:
_, event = self._events.pop(0)
event._sync_dispatch_to_player(self.player)
def get_write_size(self):
with self._lock:
self._update_play_cursor()
write_size = self._ideal_buffer_size - \
(self._write_cursor - self._play_cursor)
if self._eos:
write_size = 0
if _debug:
print(("Write size {} bytes".format(write_size)))
return write_size
def refill(self, write_size):
if _debug:
print('refill', write_size)
with self._lock:
while write_size > self._min_buffer_size:
audio_data = self.source_group.get_audio_data(write_size)
if not audio_data:
self._eos = True
self._events.append(
(self._write_cursor, MediaEvent(0, 'on_eos')))
self._events.append(
(self._write_cursor, MediaEvent(0, 'on_source_group_eos')))
break
for event in audio_data.events:
cursor = self._write_cursor + event.timestamp * \
self.source_group.audio_format.bytes_per_second
self._events.append((cursor, event))
with context.lock:
buffer_name = bufferPool.getBuffer(self._al_source)
al.alBufferData(buffer_name,
self._al_format,
audio_data.data,
audio_data.length,
self.source_group.audio_format.sample_rate)
if _debug_buffers:
error = al.alGetError()
if error != 0:
print(("BUFFER DATA ERROR: " + str(error)))
al.alSourceQueueBuffers(self._al_source, 1, ctypes.byref(buffer_name))
if _debug_buffers:
error = al.alGetError()
if error != 0:
print(("QUEUE BUFFER ERROR: " + str(error)))
self._write_cursor += audio_data.length
self._buffer_sizes.append(audio_data.length)
self._buffer_timestamps.append(audio_data.timestamp)
write_size -= audio_data.length
# Check for underrun stopping playback
if self._playing:
state = al.ALint()
with context.lock:
al.alGetSourcei(self._al_source, al.AL_SOURCE_STATE, state)
if state.value != al.AL_PLAYING:
if _debug:
print('underrun')
al.alSourcePlay(self._al_source)
def get_time(self):
try:
buffer_timestamp = self._buffer_timestamps[0]
except IndexError:
return self._underrun_timestamp
if buffer_timestamp is None:
return None
return buffer_timestamp + \
(self._play_cursor - self._buffer_cursor) / \
float(self.source_group.audio_format.bytes_per_second)
def set_volume(self, volume):
volume = float(volume)
with context.lock:
al.alSourcef(self._al_source, al.AL_GAIN, max(0., volume))
def set_position(self, position):
x, y, z = list(map(float, position))
with context.lock:
al.alSource3f(self._al_source, al.AL_POSITION, x, y, z)
def set_min_distance(self, min_distance):
min_distance = float(min_distance)
with context.lock:
al.alSourcef(self._al_source, al.AL_REFERENCE_DISTANCE, min_distance)
def set_max_distance(self, max_distance):
max_distance = float(max_distance)
with context.lock:
al.alSourcef(self._al_source, al.AL_MAX_DISTANCE, max_distance)
def set_pitch(self, pitch):
pitch = float(pitch)
with context.lock:
al.alSourcef(self._al_source, al.AL_PITCH, max(0., pitch))
def set_cone_orientation(self, cone_orientation):
x, y, z = list(map(float, cone_orientation))
with context.lock:
al.alSource3f(self._al_source, al.AL_DIRECTION, x, y, z)
def set_cone_inner_angle(self, cone_inner_angle):
cone_inner_angle = float(cone_inner_angle)
with context.lock:
al.alSourcef(self._al_source, al.AL_CONE_INNER_ANGLE, cone_inner_angle)
def set_cone_outer_angle(self, cone_outer_angle):
cone_outer_angle = float(cone_outer_angle)
with context.lock:
al.alSourcef(self._al_source, al.AL_CONE_OUTER_ANGLE, cone_outer_angle)
def set_cone_outer_gain(self, cone_outer_gain):
cone_outer_gain = float(cone_outer_gain)
with context.lock:
al.alSourcef(self._al_source, al.AL_CONE_OUTER_GAIN, cone_outer_gain)
class OpenALDriver(AbstractAudioDriver):
_forward_orientation = (0, 0, -1)
_up_orientation = (0, 1, 0)
def __init__(self, device_name=None):
super(OpenALDriver, self).__init__()
# TODO devices must be enumerated on Windows, otherwise 1.0 context is
# returned.
self._device = alc.alcOpenDevice(device_name)
if not self._device:
raise Exception('No OpenAL device.')
self._context = alc.alcCreateContext(self._device, None)
alc.alcMakeContextCurrent(self._context)
self.have_1_1 = self.have_version(1, 1) and False
self.lock = threading.Lock()
self._listener = OpenALListener(self)
# Start worker thread
self.worker = OpenALWorker()
self.worker.start()
def create_audio_player(self, source_group, player):
assert self._device is not None, "Device was closed"
return OpenALAudioPlayer(source_group, player)
def delete(self):
self.worker.stop()
with self.lock:
alc.alcMakeContextCurrent(None)
alc.alcDestroyContext(self._context)
alc.alcCloseDevice(self._device)
self._device = None
def have_version(self, major, minor):
return (major, minor) <= self.get_version()
def get_version(self):
major = alc.ALCint()
minor = alc.ALCint()
alc.alcGetIntegerv(self._device, alc.ALC_MAJOR_VERSION,
ctypes.sizeof(major), major)
alc.alcGetIntegerv(self._device, alc.ALC_MINOR_VERSION,
ctypes.sizeof(minor), minor)
return major.value, minor.value
def get_extensions(self):
extensions = alc.alcGetString(self._device, alc.ALC_EXTENSIONS)
if pyglet.compat_platform == 'darwin' or pyglet.compat_platform.startswith('linux'):
return ctypes.cast(extensions, ctypes.c_char_p).value.split(b' ')
else:
return _split_nul_strings(extensions)
def have_extension(self, extension):
return extension in self.get_extensions()
def get_listener(self):
return self._listener
class OpenALListener(AbstractListener):
def __init__(self, driver):
self._driver = driver
def _set_volume(self, volume):
volume = float(volume)
with self._driver.lock:
al.alListenerf(al.AL_GAIN, volume)
self._volume = volume
def _set_position(self, position):
x, y, z = list(map(float, position))
with self._driver.lock:
al.alListener3f(al.AL_POSITION, x, y, z)
self._position = position
def _set_forward_orientation(self, orientation):
val = (al.ALfloat * 6)(*list(map(float, (orientation + self._up_orientation))))
with self._driver.lock:
al.alListenerfv(al.AL_ORIENTATION, val)
self._forward_orientation = orientation
def _set_up_orientation(self, orientation):
val = (al.ALfloat * 6)(*list(map(float, (self._forward_orientation + orientation))))
with self._driver.lock:
al.alListenerfv(al.AL_ORIENTATION, val)
self._up_orientation = orientation
context = None
def create_audio_driver(device_name=None):
global context
context = OpenALDriver(device_name)
if _debug:
print('OpenAL', context.get_version())
return context
def cleanup_audio_driver():
global context
if _debug:
print("Cleaning up audio driver")
if context:
with context.lock:
bufferPool.delete()
context.delete()
context = None
if _debug:
print("Cleaning done")
atexit.register(cleanup_audio_driver)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-19 01:32
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
def copy_form_submission_orgs_to_application(apps, schema_editor):
db_alias = schema_editor.connection.alias
FormSubmission = apps.get_model('intake', 'FormSubmission')
Application = apps.get_model('intake', 'Application')
sub = FormSubmission.objects.using(db_alias).last()
if sub:
sub_orgs = sub.organizations.through.objects.all()
applications = []
for sub_org in sub_orgs:
app = Application(
form_submission=sub_org.formsubmission,
organization=sub_org.organization)
applications.append(app)
Application.objects.bulk_create(applications)
def empty_application_table(apps, schema_editor):
db_alias = schema_editor.connection.alias
Application = apps.get_model('intake', 'Application')
Application.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('user_accounts', '0018_org_accepting_apps_checking_notifications'),
('intake', '0038_create_next_step_and_status_types'),
]
operations = [
migrations.CreateModel(
name='Application',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.AddField(
model_name='application',
name='form_submission',
field=models.ForeignKey(db_column='formsubmission_id', on_delete=django.db.models.deletion.PROTECT, to='intake.FormSubmission'),
),
migrations.AddField(
model_name='application',
name='organization',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='user_accounts.Organization'),
),
migrations.RunPython(copy_form_submission_orgs_to_application, empty_application_table),
migrations.RemoveField(
model_name='formsubmission',
name='organizations'
),
migrations.AddField(
model_name='formsubmission',
name='organizations',
field=models.ManyToManyField(related_name='submissions', through='intake.Application', to='user_accounts.Organization'),
),
migrations.CreateModel(
name='StatusUpdate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('additional_information', models.TextField()),
('application', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='intake.Application')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
('next_steps', models.ManyToManyField(to='intake.NextStep')),
('status_type', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='intake.StatusType')),
],
),
]
|
from setuptools import setup, find_packages
print(find_packages())
setup(
name="xenaPython",
version="1.0.14",
packages=find_packages(),
include_package_data=True,
author = '@jingchunzhu, @acthp',
author_email = 'craft@soe.ucsc.com',
description = 'XENA python API',
url = 'https://github.com/ucscXena/xenaPython',
keywords = ['xena', 'ucsc', 'xenaAPI', 'xenaPython'],
license='Apache 2.0',
extras_require={
"convert": ["scanpy", "loompy"],
}
)
|
"""Defines WebotsSimObject Class
----------------------------------------------------------------------------------------------------------
This file is part of Sim-ATAV project and licensed under MIT license.
Copyright (c) 2018 Cumhur Erkan Tuncali, Georgios Fainekos, Danil Prokhorov, Hisahiro Ito, James Kapinski.
For questions please contact:
C. Erkan Tuncali (etuncali [at] asu.edu)
----------------------------------------------------------------------------------------------------------
"""
class WebotsVideoRecording(object):
"""Structure to describe video recording settings in Webots environment"""
def __init__(self):
self.width = 1920
self.height = 1080
self.codec = 1 # ignored
self.quality = 100
self.acceleration = 1
self.is_caption = False
self.caption_name = 'SimATAV'
self.filename = 'SimATAV_video.mp4'
|
import hashlib
class MinimalBlock():
def __init__(self, index, timestamp, data, previous_hash):
self.index = index
self.timestamp = timestamp
self.data = data
self.previous_hash = previous_hash
self.hash = self.hashing()
def hashing(self):
key = hashlib.sha256()
key.update(str(self.index).encode('utf-8'))
key.update(str(self.timestamp).encode('utf-8'))
key.update(str(self.data).encode('utf-8'))
key.update(str(self.previous_hash).encode('utf-8'))
return key.hexdigest()
class ItemBlock:
def __init__(self, previous_block_hash, transaction_list):
self.previous_block_hash = previous_block_hash
self.transaction_list = transaction_list
self.block_hash = hashlib.sha256(self.block_data.encode()).hexdigest()
@property
def block_data(self):
return "-".join(self.transaction_list) + "-" + self.previous_block_hash
@property
def last_block(self):
return self.transaction_list[-1]
class AccessoryBlock(ItemBlock):
def __init__(self, previous_block_hash, transaction_list, name):
super().__init__(previous_block_hash, transaction_list)
self.name = name
class MinimalChain():
def __init__(self):
self.block = [self.get_genesis_block()]
def get_genesis_block(self):
return MinimalBlock(0, datetime.datetime.utcnow(), 'Genesis', 'arbitrary')
def add_block(self, data):
return len(self.blocks)-1
def verify(self, verbose=True):
flag=True
for i in range(1,len(self,blocks)):
if self.blocks[i].index != i:
flag = False
if verbose:
print(f'Wrong block.index at block {i}.')
if self.blocks[i-1].hash != self.blocks[i].previous_hash:
flag=False
if verbose:
print(f'Wrong previous hash at block {i}.')
if self.blocks[i].hash != self.blocks[i].hashing():
flag=False
if verbose:
print(f"Wrong hash at block {i}.")
if self.blocks[i-1].timestamp >= self.blocks[i].timestamp:
flag=False
if verbose:
print(f"Backdating at block {i}.")
return flag
def fork(self, head='latest'):
if head in ['latest', 'whole', 'all']:
return copy.deepcopy(self)
else:
c = copy.deepcopy(self)
c.blocks = c.blocks[0:head+1]
return c
def get_root(self, chain_2):
min_chain_size = min(self.get_chain_size(), chain_2.get_chain_size())
for i in range(1,min_chain_size+1):
if self.blocks[i] != chain_2.blocks[i]:
return self.fork(i-1)
return self.fork(min_chain_size)
|
from xbmcswift2 import Plugin
from resources.lib import rsa
plugin = Plugin()
@plugin.route('/')
def index():
items = [{
'label': plugin.get_string(30000),
'path': plugin.url_for('rsa_videos', page_no=1)
}, {
'label': plugin.get_string(30001),
'path': plugin.url_for('rsa_animate')
}, {
'label': plugin.get_string(30002),
'path': plugin.url_for('rsa_shorts')
}]
return items
@plugin.route('/rsa_animate')
def rsa_animate():
"""
Get RSA Animate videos from the RSA module and send to XBMC
"""
items = []
video_list = rsa.get_rsa_animate_videos()
for video in video_list:
items.append({
'label': video['title'],
'path': plugin.url_for('play_video', url=video['url']),
'thumbnail': video['thumbnail'],
'is_playable': True
})
if video_list:
items.append({
'label': 'Next Page',
'path': plugin.url_for('rsa_animate')
})
return items
@plugin.route('/rsa_videos/<page_no>')
def rsa_videos(page_no):
"""
Get videos from RSA module and send to XBMC
"""
items = []
page_no = int(page_no)
video_list = rsa.get_videos(page_no)
for video in video_list:
items.append({
'label': video['title'],
'path': plugin.url_for('play_video', url=video['url']),
'thumbnail': video['thumbnail'],
'is_playable': True
})
if video_list:
items.append({
'label': 'Next Page',
'path': plugin.url_for('rsa_videos', page_no=page_no + 1)
})
return items
@plugin.route('/rsa_shorts')
def rsa_shorts():
"""
Get RSA Shorts videos from RSA module and send to XBMC
"""
items = []
video_list = rsa.get_rsa_shorts_videos()
for video in video_list:
items.append({
'label': video['title'],
'path': plugin.url_for('play_video', url=video['url']),
'thumbnail': video['thumbnail'],
'is_playable': True
})
return items
@plugin.route('/play_video/<url>')
def play_video(url):
youtube_id = rsa.get_youtube_id_from_video(url)
return plugin.set_resolved_url(
'plugin://plugin.video.youtube?action=play_video&videoid={0}'.format(
youtube_id)
)
if __name__ == '__main__':
plugin.run()
|
from django.shortcuts import redirect
def redirect_view(request):
response = redirect("https://kdsp-web.herokuapp.com/")
return response
|
import logging
from helper.logger import get_logger
from api import LOG_FORMAT, LOG_NAME, LOG_LEVEL
from api.requestvars import g
logging.getLogger().handlers.clear()
uvi_error = logging.getLogger("uvicorn.error")
uvi_access = logging.getLogger("uvicorn.access")
uvi_error.handlers.clear()
uvi_access.handlers.clear()
uvi_error.propagate = False
uvi_access.propagate = False
logger = get_logger(
name=LOG_NAME,
log_format=LOG_FORMAT,
level=LOG_LEVEL,
callbacks=[
('client_ip', lambda: g().client_ip)
]
)
|
name = input()
age = int(input())
town = input()
salary = float(input())
ageRange = "teen" if age < 18 else "adult" if age < 70 else "elder"
salaryRange = "low" if salary < 500 else "medium" if salary < 2000 else "high"
print(f'Name: {name}')
print(f'Age: {age}')
print(f'Town: {town}')
print(f'Salary: ${salary:.2f}')
print(f'Age range: {ageRange}')
print(f'Salary range: {salaryRange}')
|
from .mDbgHelp import *;
|
# -*- coding: utf-8 -*-
# (C) Copyright IBM Corp. 2021.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Integration Tests for SchematicsV1
"""
import os
import pytest
import time
from ibm_cloud_sdk_core import *
from ibm_schematics.schematics_v1 import *
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
# Config file name
config_file = 'schematics_v1.env'
class TestSchematicsV1():
"""
Integration Test Class for SchematicsV1
"""
@classmethod
def setup_class(cls):
if os.path.exists(config_file):
os.environ['IBM_CREDENTIALS_FILE'] = config_file
cls.schematics_service = SchematicsV1.new_instance(
)
assert cls.schematics_service is not None
cls.config = read_external_sources(
SchematicsV1.DEFAULT_SERVICE_NAME)
assert cls.config is not None
auth = IAMAuthenticator(
apikey=cls.config.get('APIKEY'),
url=cls.config.get('AUTH_URL'),
client_id=cls.config.get('CLIENT_ID'),
client_secret=cls.config.get('CLIENT_SECRET'),
disable_ssl_verification=False
)
cls.refresh_token = auth.token_manager.request_token()['refresh_token']
print('Setup complete.')
needscredentials = pytest.mark.skipif(
not os.path.exists(config_file), reason="External configuration not available, skipping..."
)
def createWorkspace(self):
# Construct a dict representation of a TemplateSourceDataRequest model
template_source_data_request_model = {
'env_values': [
{
'KEY1': 'VALUE1'
},
{
'KEY2': 'VALUE2'
}
],
'folder': '.',
'type': 'terraform_v0.12.20',
'variablestore': [
{
'name': 'variable_name1',
'value': 'variable_valuename1'
},
{
'name': 'variable_name2',
'value': 'variable_valuename2'
}
]
}
# Construct a dict representation of a TemplateRepoRequest model
template_repo_request_model = {
'url': 'https://github.ibm.com/gshamann/tf_cloudless_sleepy'
}
response = self.schematics_service.create_workspace(
description='description',
name='workspace_name',
tags=['testString'],
template_data=[template_source_data_request_model],
template_repo=template_repo_request_model,
type=['terraform_v0.12.20'],
)
workspace_response = response.get_result()
print(json.dumps(workspace_response, indent=2))
return workspace_response
def createWorkspaceWithEmptyRepoURL(self):
# Construct a dict representation of a TemplateSourceDataRequest model
template_source_data_request_model = {
'env_values': [
{
'KEY1': 'VALUE1'
},
{
'KEY2': 'VALUE2'
}
],
'folder': '.',
'type': 'terraform_v0.11.14',
'variablestore': [
{
'name': 'variable_name1',
'value': 'variable_valuename1'
},
{
'name': 'variable_name2',
'value': 'variable_valuename2'
}
]
}
response = self.schematics_service.create_workspace(
description='description',
name='workspace_name',
tags=['testString'],
template_data=[template_source_data_request_model],
type=['terraform_v0.11.14'],
)
workspace_response = response.get_result()
print(json.dumps(workspace_response, indent=2))
return workspace_response
def getWorkspaceById(self, wid):
response = self.schematics_service.get_workspace(
w_id=wid
)
workspace_response = response.get_result()
print(json.dumps(workspace_response, indent=2))
return workspace_response
def getWorkspaceActivityById(self, wid, activityid):
response = self.schematics_service.get_workspace_activity(
w_id=wid,
activity_id=activityid,
)
workspace_response = response.get_result()
print(json.dumps(workspace_response, indent=2))
return workspace_response
def waitForWorkspaceStatus(self, wid, status):
workspaceStatus = ""
while workspaceStatus != status:
workspaceStatus = self.getWorkspaceById(wid)['status']
print(workspaceStatus)
time.sleep(2)
def waitForWorkspaceActivityStatus(self, wid, activityid, status):
workspaceActivityStatus = ""
while workspaceActivityStatus != status:
workspaceActivityStatus = self.getWorkspaceActivityById(wid, activityid)['status']
print(workspaceActivityStatus)
time.sleep(2)
def deleteWorkspaceById(self, wid):
success = False
while not success:
try:
response = self.schematics_service.delete_workspace(
w_id=wid,
refresh_token=self.refresh_token
)
workspace_response = response.get_result()
print(json.dumps(workspace_response, indent=2))
success = True
except:
time.sleep(2)
return workspace_response
def uploadTarFile(self):
ws = self.createWorkspaceWithEmptyRepoURL()
self.waitForWorkspaceStatus(ws['id'], "DRAFT")
fileDir = os.getcwd()
fileName = "tf_cloudless_sleepy_git_archive.tar"
filePath = os.path.join(fileDir, "tarfiles", fileName)
fileReader = open(filePath, "rb")
response = self.schematics_service.template_repo_upload(
w_id=ws['id'],
t_id=ws['template_data'][0]['id'],
file_content_type="multipart/form-data",
file=fileReader
)
workspace_response = response.get_result()
print(json.dumps(workspace_response, indent=2))
return ws
def planWorkspaceAction(self):
ws = self.uploadTarFile()
self.waitForWorkspaceStatus(ws['id'], "INACTIVE")
response = self.schematics_service.plan_workspace_command(
w_id=ws['id'],
refresh_token = self.refresh_token
)
activity = response.get_result()
self.waitForWorkspaceActivityStatus(ws['id'], activity['activityid'], "COMPLETED")
return ws['id'], activity['activityid']
def applyWorkspaceAction(self):
ws = self.uploadTarFile()
self.waitForWorkspaceStatus(ws['id'], "INACTIVE")
response = self.schematics_service.apply_workspace_command(
w_id=ws['id'],
refresh_token=self.refresh_token
)
activity = response.get_result()
self.waitForWorkspaceActivityStatus(ws['id'], activity['activityid'], "COMPLETED")
return ws, activity['activityid']
def applyWorkspaceActionByIdWithoutWait(self,id):
response = self.schematics_service.apply_workspace_command(
w_id=id,
refresh_token=self.refresh_token
)
activity = response.get_result()
return id, activity['activityid']
def destroyWorkspaceAction(self):
ws = self.uploadTarFile()
self.waitForWorkspaceStatus(ws['id'], "INACTIVE")
response = self.schematics_service.destroy_workspace_command(
w_id=ws['id'],
refresh_token=self.refresh_token
)
activity = response.get_result()
self.waitForWorkspaceActivityStatus(ws['id'], activity['activityid'], "COMPLETED")
return ws['id'], activity['activityid']
def destroyWorkspaceActionById(self,id):
response = self.schematics_service.destroy_workspace_command(
w_id=id,
refresh_token=self.refresh_token
)
activity = response.get_result()
self.waitForWorkspaceActivityStatus(id, activity['activityid'], "COMPLETED")
return id, activity['activityid']
def refreshWorkspaceAction(self):
ws = self.uploadTarFile()
self.waitForWorkspaceStatus(ws['id'], "INACTIVE")
response = self.schematics_service.refresh_workspace_command(
w_id=ws['id'],
refresh_token=self.refresh_token
)
activity = response.get_result()
self.waitForWorkspaceActivityStatus(ws['id'], activity['activityid'], "COMPLETED")
return ws['id'], activity['activityid']
def refreshWorkspaceActionById(self,id):
response = self.schematics_service.refresh_workspace_command(
w_id=id,
refresh_token=self.refresh_token
)
activity = response.get_result()
self.waitForWorkspaceActivityStatus(id, activity['activityid'], "COMPLETED")
return id, activity['activityid']
@needscredentials
def test_list_schematics_location(self):
list_schematics_location_response = self.schematics_service.list_schematics_location()
assert list_schematics_location_response.get_status_code() == 200
list_schematics_locations = list_schematics_location_response.get_result()
assert list_schematics_locations is not None
@needscredentials
def test_list_resource_group(self):
list_resource_group_response = self.schematics_service.list_resource_group()
assert list_resource_group_response.get_status_code() == 200
list_resource_group_response = list_resource_group_response.get_result()
assert list_resource_group_response is not None
@needscredentials
def test_get_schematics_version(self):
get_schematics_version_response = self.schematics_service.get_schematics_version()
assert get_schematics_version_response.get_status_code() == 200
version_response = get_schematics_version_response.get_result()
assert version_response is not None
@needscredentials
def test_list_workspaces(self):
list_workspaces_response = self.schematics_service.list_workspaces()
assert list_workspaces_response.get_status_code() == 200
workspace_response_list = list_workspaces_response.get_result()
assert workspace_response_list is not None
@needscredentials
def test_create_workspace(self):
# Construct a dict representation of a TemplateSourceDataRequest model
template_source_data_request_model = {
'env_values': [
{
'KEY1': 'VALUE1'
},
{
'KEY2': 'VALUE2'
}
],
'folder': '.',
'type': 'terraform_v0.12.20',
'variablestore': [
{
'name': 'variable_name1',
'value': 'variable_valuename1'
},
{
'name': 'variable_name2',
'value': 'variable_valuename2'
}
]
}
# Construct a dict representation of a TemplateRepoRequest model
template_repo_request_model = {
'url': 'https://github.ibm.com/gshamann/tf_cloudless_sleepy'
}
create_workspace_response = self.schematics_service.create_workspace(
description='description',
name='workspace_name',
tags=['testString'],
template_data=[template_source_data_request_model],
template_repo=template_repo_request_model,
type=['terraform_v0.12.20'],
)
assert create_workspace_response.get_status_code() == 201
workspace_response = create_workspace_response.get_result()
assert workspace_response is not None
self.deleteWorkspaceById(workspace_response['id'])
@needscredentials
def test_get_workspace(self):
ws = self.createWorkspace()
get_workspace_response = self.schematics_service.get_workspace(
w_id=ws['id'],
)
assert get_workspace_response.get_status_code() == 200
workspace_response = get_workspace_response.get_result()
assert workspace_response is not None
self.deleteWorkspaceById(workspace_response['id'])
@needscredentials
def test_replace_workspace(self):
ws = self.createWorkspace()
self.waitForWorkspaceStatus(ws['id'], "INACTIVE")
replace_workspace_response = self.schematics_service.replace_workspace(
w_id=ws['id'],
description="",
name="myworkspace",
type=["terraform_v0.12.20"],
)
assert replace_workspace_response.get_status_code() == 200
workspace_response = replace_workspace_response.get_result()
assert workspace_response is not None
self.deleteWorkspaceById(workspace_response['id'])
@needscredentials
def test_update_workspace(self):
ws = self.createWorkspace()
self.waitForWorkspaceStatus(ws['id'], "INACTIVE")
update_workspace_response = self.schematics_service.update_workspace(
w_id=ws['id'],
description="",
name="myworkspace",
type=["terraform_v0.12.20"],
)
assert update_workspace_response.get_status_code() == 200
workspace_response = update_workspace_response.get_result()
assert workspace_response is not None
self.deleteWorkspaceById(workspace_response['id'])
@needscredentials
def test_upload_template_tar(self):
ws = self.createWorkspaceWithEmptyRepoURL()
self.waitForWorkspaceStatus(ws['id'], "DRAFT")
fileDir = os.getcwd()
fileName = "tf_cloudless_sleepy_git_archive.tar"
filePath = os.path.join(fileDir, "tarfiles", fileName)
fileReader = open(filePath, "rb")
upload_template_tar_response = self.schematics_service.template_repo_upload(
w_id=ws['id'],
t_id=ws['template_data'][0]['id'],
file_content_type="multipart/form-data",
file=fileReader
)
assert upload_template_tar_response.get_status_code() == 200
template_repo_tar_upload_response = upload_template_tar_response.get_result()
assert template_repo_tar_upload_response is not None
self.deleteWorkspaceById(ws['id'])
@needscredentials
def test_get_workspace_readme(self):
(ws, activityid) = self.applyWorkspaceAction()
get_workspace_readme_response = self.schematics_service.get_workspace_readme(
w_id=ws['id']
)
assert get_workspace_readme_response.get_status_code() == 200
template_readme = get_workspace_readme_response.get_result()
assert template_readme is not None
self.deleteWorkspaceById(ws['id'])
@needscredentials
def test_list_workspace_activities(self):
ws = self.uploadTarFile()
self.waitForWorkspaceStatus(ws['id'], "INACTIVE")
self.refreshWorkspaceActionById(ws['id'])
self.destroyWorkspaceActionById(ws['id'])
list_workspace_activities_response = self.schematics_service.list_workspace_activities(
w_id=ws['id']
)
assert list_workspace_activities_response.get_status_code() == 200
workspace_activities = list_workspace_activities_response.get_result()
assert workspace_activities is not None
self.deleteWorkspaceById(ws['id'])
@needscredentials
def test_get_workspace_activity(self):
ws = self.uploadTarFile()
self.waitForWorkspaceStatus(ws['id'], "INACTIVE")
activity = self.refreshWorkspaceActionById(ws['id'])
get_workspace_activity_response = self.schematics_service.get_workspace_activity(
w_id=ws['id'],
activity_id=activity[1]
)
assert get_workspace_activity_response.get_status_code() == 200
workspace_activity = get_workspace_activity_response.get_result()
assert workspace_activity is not None
self.deleteWorkspaceById(ws['id'])
@needscredentials
def test_run_workspace_commands(self):
(ws, activityid) = self.applyWorkspaceAction()
# Construct a dict representation of a TerraformCommand model
terraform_command_model = {
'command': 'state show',
'command_params': 'data.template_file.test',
'command_name': 'Test Command',
'command_desc': 'Check command execution',
'command_onError': 'continue',
'command_dependsOn': ''
}
run_workspace_commands_response = self.schematics_service.run_workspace_commands(
w_id=ws['id'],
refresh_token=self.refresh_token,
commands=[terraform_command_model],
operation_name='testString',
description='testString'
)
self.waitForWorkspaceActivityStatus(ws['id'], activityid, "COMPLETED")
assert run_workspace_commands_response.get_status_code() == 202
workspace_activity_command_result = run_workspace_commands_response.get_result()
assert workspace_activity_command_result is not None
self.deleteWorkspaceById(ws['id'])
@needscredentials
def test_apply_workspace_command(self):
ws = self.uploadTarFile()
self.waitForWorkspaceStatus(ws['id'], "INACTIVE")
apply_workspace_command_response = self.schematics_service.apply_workspace_command(
w_id=ws['id'],
refresh_token=self.refresh_token
)
assert apply_workspace_command_response.get_status_code() == 202
workspace_activity_apply_result = apply_workspace_command_response.get_result()
assert workspace_activity_apply_result is not None
self.waitForWorkspaceActivityStatus(ws['id'], workspace_activity_apply_result['activityid'], "COMPLETED")
self.deleteWorkspaceById(ws['id'])
@needscredentials
def test_destroy_workspace_command(self):
ws = self.uploadTarFile()
self.waitForWorkspaceStatus(ws['id'], "INACTIVE")
destroy_workspace_command_response = self.schematics_service.destroy_workspace_command(
w_id=ws['id'],
refresh_token=self.refresh_token,
)
assert destroy_workspace_command_response.get_status_code() == 202
workspace_activity_destroy_result = destroy_workspace_command_response.get_result()
assert workspace_activity_destroy_result is not None
self.waitForWorkspaceActivityStatus(ws['id'], workspace_activity_destroy_result['activityid'], "COMPLETED")
self.deleteWorkspaceById(ws['id'])
@needscredentials
def test_plan_workspace_command(self):
ws = self.uploadTarFile()
self.waitForWorkspaceStatus(ws['id'], "INACTIVE")
plan_workspace_command_response = self.schematics_service.plan_workspace_command(
w_id=ws['id'],
refresh_token = self.refresh_token
)
assert plan_workspace_command_response.get_status_code() == 202
workspace_activity_plan_result = plan_workspace_command_response.get_result()
assert workspace_activity_plan_result is not None
self.waitForWorkspaceActivityStatus(ws['id'], workspace_activity_plan_result['activityid'], "COMPLETED")
self.deleteWorkspaceById(ws['id'])
@needscredentials
def test_refresh_workspace_command(self):
ws = self.uploadTarFile()
self.waitForWorkspaceStatus(ws['id'], "INACTIVE")
refresh_workspace_command_response = self.schematics_service.refresh_workspace_command(
w_id=ws['id'],
refresh_token=self.refresh_token
)
assert refresh_workspace_command_response.get_status_code() == 202
workspace_activity_refresh_result = refresh_workspace_command_response.get_result()
assert workspace_activity_refresh_result is not None
self.waitForWorkspaceActivityStatus(ws['id'], workspace_activity_refresh_result['activityid'], "COMPLETED")
self.deleteWorkspaceById(ws['id'])
@needscredentials
def test_get_workspace_inputs(self):
(ws, activityid) = self.applyWorkspaceAction()
get_workspace_inputs_response = self.schematics_service.get_workspace_inputs(
w_id=ws['id'],
t_id=ws['template_data'][0]['id']
)
assert get_workspace_inputs_response.get_status_code() == 200
template_values = get_workspace_inputs_response.get_result()
assert template_values is not None
@needscredentials
def test_replace_workspace_inputs(self):
(ws, activityid) = self.applyWorkspaceAction()
replace_workspace_inputs_response = self.schematics_service.replace_workspace_inputs(
w_id=ws['id'],
t_id=ws['template_data'][0]['id'],
variablestore=[{
'name': 'updated_var',
'value': 'test'
}]
)
assert replace_workspace_inputs_response.get_status_code() == 200
user_values = replace_workspace_inputs_response.get_result()
assert user_values is not None
@needscredentials
def test_get_all_workspace_inputs(self):
(ws, activityid) = self.applyWorkspaceAction()
get_all_workspace_inputs_response = self.schematics_service.get_all_workspace_inputs(
w_id=ws['id']
)
assert get_all_workspace_inputs_response.get_status_code() == 200
workspace_template_values_response = get_all_workspace_inputs_response.get_result()
assert workspace_template_values_response is not None
@needscredentials
def test_get_workspace_input_metadata(self):
(ws, activityid) = self.applyWorkspaceAction()
get_workspace_input_metadata_response = self.schematics_service.get_workspace_input_metadata(
w_id=ws['id'],
t_id=ws['template_data'][0]['id']
)
assert get_workspace_input_metadata_response.get_status_code() == 200
result = get_workspace_input_metadata_response.get_result()
assert result is not None
@needscredentials
def test_get_workspace_outputs(self):
(ws, activityid) = self.applyWorkspaceAction()
get_workspace_outputs_response = self.schematics_service.get_workspace_outputs(
w_id=ws['id']
)
assert get_workspace_outputs_response.get_status_code() == 200
list_output_values_item = get_workspace_outputs_response.get_result()
assert list_output_values_item is not None
@needscredentials
def test_get_workspace_resources(self):
(ws, activityid) = self.applyWorkspaceAction()
get_workspace_resources_response = self.schematics_service.get_workspace_resources(
w_id=ws['id']
)
assert get_workspace_resources_response.get_status_code() == 200
list_template_resources = get_workspace_resources_response.get_result()
assert list_template_resources is not None
@needscredentials
def test_get_workspace_state(self):
(ws, activityid) = self.applyWorkspaceAction()
get_workspace_state_response = self.schematics_service.get_workspace_state(
w_id=ws['id'],
)
assert get_workspace_state_response.get_status_code() == 200
state_store_response_list = get_workspace_state_response.get_result()
assert state_store_response_list is not None
@needscredentials
def test_get_workspace_template_state(self):
(ws, activityid) = self.applyWorkspaceAction()
get_workspace_template_state_response = self.schematics_service.get_workspace_template_state(
w_id=ws['id'],
t_id=ws['template_data'][0]['id']
)
assert get_workspace_template_state_response.get_status_code() == 200
template_state_store = get_workspace_template_state_response.get_result()
assert template_state_store is not None
@needscredentials
def test_get_workspace_activity_logs(self):
(ws, activityid) = self.applyWorkspaceAction()
get_workspace_activity_logs_response = self.schematics_service.get_workspace_activity_logs(
w_id=ws['id'],
activity_id=activityid
)
assert get_workspace_activity_logs_response.get_status_code() == 200
workspace_activity_logs = get_workspace_activity_logs_response.get_result()
assert workspace_activity_logs is not None
@needscredentials
def test_get_workspace_log_urls(self):
(ws, activityid) = self.applyWorkspaceAction()
get_workspace_log_urls_response = self.schematics_service.get_workspace_log_urls(
w_id=ws['id'],
)
assert get_workspace_log_urls_response.get_status_code() == 200
log_store_response_list = get_workspace_log_urls_response.get_result()
assert log_store_response_list is not None
@needscredentials
def test_get_template_logs(self):
(ws, activityid) = self.applyWorkspaceAction()
get_template_logs_response = self.schematics_service.get_template_logs(
w_id=ws['id'],
t_id=ws['template_data'][0]['id'],
log_tf_cmd=True,
log_tf_prefix=True,
log_tf_null_resource=True,
log_tf_ansible=True
)
assert get_template_logs_response.get_status_code() == 200
result = get_template_logs_response.get_result()
assert result is not None
@needscredentials
def test_get_template_activity_log(self):
(ws, activityid) = self.applyWorkspaceAction()
get_template_activity_log_response = self.schematics_service.get_template_activity_log(
w_id=ws['id'],
t_id=ws['template_data'][0]['id'],
activity_id=activityid,
log_tf_cmd=True,
log_tf_prefix=True,
log_tf_null_resource=True,
log_tf_ansible=True
)
assert get_template_activity_log_response.get_status_code() == 200
result = get_template_activity_log_response.get_result()
assert result is not None
|
from julia import Dojo as dojo
import numpy as np
import torch
class TorchStep(torch.autograd.Function):
@staticmethod
def forward(ctx, env, state, input):
if type(state) is np.ndarray:
state = torch.tensor(state)
if type(input) is np.ndarray:
input = torch.tensor(input)
# step
dojo.step(env, state.numpy(), input.numpy(), gradients=True)
# next state
next_state = env.state
if type(next_state) is np.ndarray:
next_state = torch.tensor(next_state)
# Jacobians
jacobian_state = torch.tensor(env.dynamics_jacobian_state)
jacobian_input = torch.tensor(env.dynamics_jacobian_input)
# cache
ctx.save_for_backward(state, input, next_state, jacobian_state, jacobian_input)
# output
return next_state
@staticmethod
def backward(ctx, grad_output):
s, i, ns, jacobian_state, jacobian_input = ctx.saved_tensors
return None, jacobian_state.T @ grad_output, jacobian_input.T @ grad_output
torch_step = TorchStep.apply
|
T = int(input())
for _ in range(T):
n = int(input())
List = []
for _ in range(n):
List.append(list(map(int, input().split())))
for i in reversed(range(0, n-1)):
for j in range(0,i+1):
List[i][j] += max(List[i+1][j], List[i+1][j+1])
print(List[0][0])
|
from abc import ABC, abstractmethod
import OpenGL.GL as gl
from color import Color
from interfaces import IColorable, IMovable, IAnimation
class Entity:
""" Simple entity that knows its position only """
def __init__(self, x: int, y: int):
self.x = x
self.y = y
def __repr__(self):
return f"<Entity x={self.x}, y={self.y}>"
class MovableMixin(IMovable):
""" Mixin that provides the ability to move to a position or by given dx/dy amount """
def move_to(self, dest_x: int, dest_y: int):
self.x = dest_x
self.y = dest_y
def move_by(self, dx: int, dy: int):
self.x += dx
self.y += dy
def get_speed(self):
return self.speed_x, self.speed_y
def set_speed(self, speed_x: int, speed_y: int):
self.speed_x = speed_x
self.speed_y = speed_y
def get_coords(self):
return self.x, self.y
def set_coords(self, x: int, y: int):
self.x = x
self.y = y
def move(self):
self.x += self.speed_x
self.y += self.speed_y
class DrawableMixin(ABC):
""" Mixin that provides the ability for the entity to draw itself (abstract) """
@abstractmethod
def draw(self):
pass
class ColorableMixin(IColorable):
""" Mixin that provides the ability to have color """
DEFAULT_COLOR = Color(255, 255, 255)
def set_color(self, color: Color):
self.color = color
def get_color(self):
return self.color
class AnimatedMixin:
""" Mixin to provide the ability to have a number of animations applied to the object """
def __init__(self):
self.clear_animations()
self.target = None
def clear_animations(self):
print(f"AnimatedMixin: Clearing all animations on {self}")
self.animations = []
def add_animation(self, animation: IAnimation):
animation.set_target(self)
self.animations.append(animation)
def delete_animation(self, animation: IAnimation):
if animation in self.animations:
self.animations.remove(animation)
def animate(self, dt: int):
for anim in list(self.animations):
anim.update(dt)
if anim.is_finished():
print(f"AnimatedMixin: {anim} is finished and being removed")
self.delete_animation(anim)
class Rectangle(Entity, DrawableMixin, ColorableMixin):
""" Simple drawable, colorable rectangle """
def __init__(self, x: int, y: int, width: int, height: int, color=None):
super().__init__(x, y)
self.width = width
self.height = height
self.set_color(color if color is not None else ColorableMixin.DEFAULT_COLOR)
def draw(self):
gl.glBegin(gl.GL_QUADS)
gl.glColor3ub(self.color.r, self.color.g, self.color.b)
gl.glVertex2f(self.x, self.y)
gl.glVertex2f(self.x + self.width, self.y)
gl.glVertex2f(self.x + self.width, self.y + self.height)
gl.glVertex2f(self.x, self.y + self.height)
gl.glEnd()
def __repr__(self):
return f"<Rectangle x={self.x}, y={self.y}>"
class Ball(Rectangle, MovableMixin, AnimatedMixin):
""" Implementation of the ball in game """
def __init__(self, *args):
super().__init__(*args)
AnimatedMixin.__init__(self)
self.set_speed(0, 0)
# When bouncing on the left/right edge, providing a possibility to
# also adjust vertical speed e.g. when the pad was moving
def bounce_x(self, adjust_x: int = 0):
self.speed_x = -self.speed_x
if adjust_x:
print("Ball: Increasing vertical speed")
self.speed_y += adjust_x
def bounce_y(self):
self.speed_y *= -1
def update(self, dt: int):
self.animate(dt)
self.move()
def increase_speed(self):
self.speed_x += 1
self.speed_y += 1
def __repr__(self):
return f"<Ball x={self.x}, y={self.y}>"
class Pad(Rectangle, MovableMixin, AnimatedMixin):
""" Implementation of the pad in game """
def __init__(self, *args):
super().__init__(*args)
AnimatedMixin.__init__(self)
def update(self, dt: int):
self.animate(dt)
def __repr__(self):
return f"<Pad x={self.x}, y={self.y}>"
|
# Generated by Django 2.0.1 on 2018-11-06 13:06
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("invitations", "0004_invitation_sent_at"),
]
operations = [
migrations.AlterField(
model_name="invitation",
name="case_role",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.PROTECT,
to="security.CaseRole",
),
),
]
|
SERVER_NAME = "localhost:5005"
|
# GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Input:
HOST_LIST = "host_list"
NAME = "name"
PORT_LIST_ID = "port_list_id"
class Output:
MESSAGE = "message"
SUCCESS = "success"
TARGET_ID = "target_id"
class CreateTargetInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"host_list": {
"type": "array",
"title": "Target IP List",
"description": "Target IP List, in the form of a JSON array for each host or list of hosts. CIDR notation can be used. For example, the following would be a valid list: ['192.168.0.101', '192.168.1.101,192.168.1.103,192.168.1.105','192.168.1.2/24','192.168.3.105-112']",
"items": {
"type": "string"
},
"order": 2
},
"name": {
"type": "string",
"title": "Target Name",
"description": "Target Name",
"order": 1
},
"port_list_id": {
"type": "string",
"title": "Port List ID",
"description": "ID of the Port List to use for scanning, if you want to scan a custom list of ports",
"order": 3
}
},
"required": [
"name",
"host_list"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class CreateTargetOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"message": {
"type": "string",
"title": "Message",
"description": "Message",
"order": 3
},
"success": {
"type": "boolean",
"title": "Success",
"description": "Success",
"order": 2
},
"target_id": {
"type": "string",
"title": "Target ID",
"description": "Target ID",
"order": 1
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
|
# theory MPD client
# Copyright (C) 2008 Ryan Roemmich <ralfonso@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import urllib2
import xml.dom.minidom
import os
import logging
import re
import hashlib
import hmac
import base64
import datetime
from pylons import app_globals as g
class NoArtError(Exception):
pass
class NoArtOnDisk(Exception):
pass
class AlbumArt:
amazonurl = None
imgurl = None
logger = None
def __init__(self):
self.logger = logging.getLogger(__name__)
self.www_root = './img/art/'
self.disk_root = 'theory/public/img/art/'
def album_fetch(self,artist,album):
"""
attempt to load an album's cover art from disk.
if it doesn't exist, make a request using Amazon's
Web Services
"""
self.artist = artist
self.album = album
# some ID3 tags split a two-disc volume into two, attempt to remove that part of the tag for the search
disc_num_found = re.search('(\(disc.+\)|\(CD.+\))',self.album,re.IGNORECASE)
if disc_num_found:
self.album = self.album[:disc_num_found.start()-1]
self.set_file_paths()
try:
self.check_disk()
except NoArtOnDisk:
self.amazon_fetch()
def artist_art(self,artist):
""" return all of the album covers for a particular artist """
images = []
# get a list of all of the filenames associated with the selected artist
filenames = [filename for filename in os.listdir(self.disk_root) if filename.startswith("%s -" % artist)]
for i in filenames:
album_name = i.split(' - ')[1][:-4]
# we include the name of the album in the list we're returning so
# we can auto-link the img on the albums list page
images.append({
'album' :album_name,
'imgurl' :"%s/%s" % (self.www_root,i)
})
return images
def log(self,msg):
self.logger.info(msg)
def amazon_fetch(self):
"""
attempts to fetch album cover art from Amazon Web Services and
calls save_to_disk() to save the largest available image permanently
to avoid subsequent lookups. first tries to fetch the artist + album
but falls back to artist search only if the album art isn't found
"""
if g.tc.awskey == '':
raise NoArtError
artist_safe = urllib2.quote(self.artist)
album_safe = urllib2.quote(self.album)
urls = []
date = datetime.datetime.utcnow()
date = date.replace(microsecond=0)
timestamp = date.isoformat()
query_string = {'Service': 'AWSECommerceService',
'AWSAccessKeyId': g.tc.awskey,
'Operation': 'ItemSearch',
'SearchIndex': 'Music',
'Version': '2009-10-01',
'ResponseGroup': 'Images',
'Artist': artist_safe,
'Title': album_safe,
'Timestamp': timestamp + 'Z'}
query_string_sorted = '&'.join(['='.join((k,query_string[k])) for k in sorted(query_string.iterkeys())])
urls.append({'verb': 'GET',
'protocol': 'http://',
'host': 'ecs.amazonaws.com',
'request_uri': '/onca/xml',
'query_string': query_string_sorted.replace(':','%3A')})
del query_string['Title']
query_string_sorted = '&'.join(['='.join((k,query_string[k])) for k in sorted(query_string.iterkeys())])
urls.append({'verb': 'GET',
'protocol': 'http://',
'host': 'ecs.amazonaws.com',
'request_uri': '/onca/xml',
'query_string': query_string_sorted.replace(':','%3A')})
for url in urls:
encode_string = '\n'.join((url['verb'],url['host'],url['request_uri'],url['query_string']))
h = hmac.new(str(g.tc.aws_secret), str(encode_string), hashlib.sha256)
hmac_string = h.digest()
signature = base64.b64encode(hmac_string).replace('+','%2B').replace('=','%3D')
real_url = url['protocol'] + url['host'] + url['request_uri'] + '?' + url['query_string'] + '&Signature=%s' % signature
try:
self.log('Fetching Amazon album image: %s' % real_url)
urlfile = urllib2.urlopen(real_url)
except urllib2.URLError:
# there are probably other exceptions that need to be caught here..
self.log('Error fetching Amazon XML')
raise NoArtError
doc = xml.dom.minidom.parse(urlfile)
urlfile.close()
imgnodes = doc.getElementsByTagName('LargeImage')
if len(imgnodes) > 0:
node = imgnodes[0]
self.amazonurl = node.firstChild.firstChild.nodeValue
self.log('Found album art: %s' % self.amazonurl)
break
if not self.amazonurl:
raise NoArtError
self.save_to_disk()
def set_file_paths(self):
""" set up the local paths images on both disk and web root """
artist_pathsafe = self.artist.replace(os.sep,' ')
album_pathsafe = self.album.replace(os.sep,' ')
filename = "%s - %s.jpg" % (artist_pathsafe,album_pathsafe)
self.www_path = os.path.join(self.www_root,filename)
self.disk_path = os.path.join(self.disk_root,filename)
def check_disk(self):
""" check if cover art exists locally """
if os.path.exists(self.disk_path):
self.imgurl = self.www_path
else:
raise NoArtOnDisk
def save_to_disk(self):
""" save the fetched cover image to disk permanently """
try:
urlfile = urllib2.urlopen(self.amazonurl)
except urllib2.URLError:
raise NoArtError
f = open(self.disk_path,'wb')
f.write(urlfile.read())
f.close()
self.imgurl = self.www_path
def dir_size(self):
""" return the sum of the cover art disk usage """
dir_size = 0
for (path,dirs,files) in os.walk(self.disk_root):
for file in files:
filename = os.path.join(path,file)
dir_size += os.path.getsize(filename)
return dir_size
|
"""
Login page
"""
import requests
from bs4 import BeautifulSoup
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
URL = 'https://login.yahoo.com'
DESKTOP_USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1)\
AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.99 Safari/537.36'
def authenticated_session(username, password):
"""
Given username and password, return an authenticated Yahoo `requests`
session that can be used for further scraping requests.
Throw an AuthencationError if authentication fails.
"""
session = requests.Session()
session.headers.update(headers())
response = session.get(url())
login_path = path(response.text)
login_url = urljoin(response.url, login_path)
login_post_data = post_data(response.text, username, password)
response = session.post(login_url, data=login_post_data)
if response.headers['connection'] == 'close':
raise Exception('Authencation failed')
return session
def url():
"""
Return the URL for the login page
"""
return URL
def headers():
"""
Return the headers necessary to get expected version of the login page
"""
return {
'user-agent': DESKTOP_USER_AGENT
}
def path(page):
"""
Return the required path for login
"""
soup = BeautifulSoup(page)
try:
return soup.find(id='mbr-login-form')['action']
except:
return None
def post_data(page, username, password):
"""
Given username and password, return the post data necessary for login
"""
soup = BeautifulSoup(page)
try:
inputs = soup.find(id='hiddens').findAll('input')
post_data = {input['name']: input['value'] for input in inputs}
post_data['username'] = username
post_data['passwd'] = password
return post_data
except:
return None
|
# --------------
# Importing header files
import numpy as np
import warnings
warnings.filterwarnings('ignore')
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
#Reading file
data = np.genfromtxt(path, delimiter=",", skip_header=1)
print(data.shape)
census=np.concatenate((data,new_record),axis=0)
print(census.shape)
#age
age=census[:,0]
max_age=age.max()
min_age=age.min()
age_mean=age.mean()
age_std=age.std()
print('Max Age=',max_age)
print('Min Age=',min_age)
print('Mean of ages=',age_mean)
print('Standard deviation of ages=',age_std)
#Race
race=census[:,2]
mask_0=race==0
race_0=np.array(race[mask_0])
len_0=len(race_0)
print('No. of Person in Race 0=',len_0)
mask_1=race==1
race_1=np.array(race[mask_1])
len_1=len(race_1)
print('No. of Person in Race 1=',len_1)
mask_2=race==2
race_2=np.array(race[mask_2])
len_2=len(race_2)
print('No. of Person in Race 2=',len_2)
mask_3=race==3
race_3=np.array(race[mask_3])
len_3=len(race_3)
print('No. of Person in Race 3=',len_3)
mask_4=race==4
race_4=np.array(race[mask_4])
len_4=len(race_4)
print('No. of Person in Race 4=',len_4)
len_of_each_race=np.array([len_0,len_1,len_2,len_3,len_4])
print(len_of_each_race)
minority_race=np.argmin(len_of_each_race, axis=0)
print('Minority race=',minority_race)
#senior_Citizens
senior_citizens=census[census[:,0]>60]
senior_citizens_len=len(senior_citizens)
working_hours_sum=sum(senior_citizens[:,6])
print('working hours sum=',working_hours_sum)
avg_working_hours=(working_hours_sum/senior_citizens_len)
print('average working hours',avg_working_hours)
#Income based on education: true or false
high=census[census[:,1]>10]
low=census[census[:,1]<=10]
avg_pay_high=np.mean(high[:,7])
avg_pay_low=np.mean(low[:,7])
print('Avg pay high=',avg_pay_high)
print('Avg pay low=',avg_pay_low)
compare=np.array_equal(avg_pay_high,avg_pay_low)
print(compare)
|
"""
**************
SparseGraph 6
**************
Read graphs in graph6 and sparse6 format.
Format
------
"graph6 and sparse6 are formats for storing undirected graphs in a
compact manner, using only printable ASCII characters. Files in these
formats have text type and contain one line per graph."
http://cs.anu.edu.au/~bdm/data/formats.html
See http://cs.anu.edu.au/~bdm/data/formats.txt for details.
"""
# Original author: D. Eppstein, UC Irvine, August 12, 2003.
# The original code at http://www.ics.uci.edu/~eppstein/PADS/ is public domain.
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
# Copyright (C) 2004-2010 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
__all__ = ['read_graph6', 'parse_graph6', 'read_graph6_list',
'read_sparse6', 'parse_sparse6', 'read_sparse6_list']
import networkx as nx
from networkx.exception import NetworkXError
from networkx.utils import open_file
# graph6
def read_graph6(path):
"""Read simple undirected graphs in graph6 format from path.
Returns a single Graph.
"""
return read_graph6_list(path)[0]
def parse_graph6(str):
"""Read a simple undirected graph in graph6 format from string.
Returns a single Graph.
"""
def bits():
"""Return sequence of individual bits from 6-bit-per-value
list of data values."""
for d in data:
for i in [5,4,3,2,1,0]:
yield (d>>i)&1
if str.startswith('>>graph6<<'):
str = str[10:]
data = graph6data(str)
n, data = graph6n(data)
nd = (n*(n-1)//2 + 5) // 6
if len(data) != nd:
raise NetworkXError(\
'Expected %d bits but got %d in graph6' % (n*(n-1)//2, len(data)*6))
G=nx.Graph()
G.add_nodes_from(range(n))
for (i,j),b in zip([(i,j) for j in range(1,n) for i in range(j)], bits()):
if b: G.add_edge(i,j)
return G
@open_file(0,mode='rt')
def read_graph6_list(path):
"""Read simple undirected graphs in graph6 format from path.
Returns a list of Graphs, one for each line in file.
"""
glist=[]
for line in path:
line = line.strip()
if not len(line): continue
glist.append(parse_graph6(line))
return glist
# sparse6
def read_sparse6(path):
"""Read simple undirected graphs in sparse6 format from path.
Returns a single MultiGraph."""
return read_sparse6_list(path)[0]
@open_file(0,mode='rt')
def read_sparse6_list(path):
"""Read undirected graphs in sparse6 format from path.
Returns a list of MultiGraphs, one for each line in file."""
glist=[]
for line in path:
line = line.strip()
if not len(line): continue
glist.append(parse_sparse6(line))
return glist
def parse_sparse6(string):
"""Read undirected graph in sparse6 format from string.
Returns a MultiGraph.
"""
if string.startswith('>>sparse6<<'):
string = str[10:]
if not string.startswith(':'):
raise NetworkXError('Expected colon in sparse6')
n, data = graph6n(graph6data(string[1:]))
k = 1
while 1<<k < n:
k += 1
def parseData():
"""Return stream of pairs b[i], x[i] for sparse6 format."""
chunks = iter(data)
d = None # partial data word
dLen = 0 # how many unparsed bits are left in d
while 1:
if dLen < 1:
d = next(chunks)
dLen = 6
dLen -= 1
b = (d>>dLen) & 1 # grab top remaining bit
x = d & ((1<<dLen)-1) # partially built up value of x
xLen = dLen # how many bits included so far in x
while xLen < k: # now grab full chunks until we have enough
d = next(chunks)
dLen = 6
x = (x<<6) + d
xLen += 6
x = (x >> (xLen - k)) # shift back the extra bits
dLen = xLen - k
yield b,x
v = 0
G=nx.MultiGraph()
G.add_nodes_from(range(n))
for b,x in parseData():
if b: v += 1
if x >= n: break # padding with ones can cause overlarge number here
elif x > v: v = x
else:
G.add_edge(x,v)
return G
# helper functions
def graph6data(str):
"""Convert graph6 character sequence to 6-bit integers."""
v = [ord(c)-63 for c in str]
if min(v) < 0 or max(v) > 63:
return None
return v
def graph6n(data):
"""Read initial one or four-unit value from graph6 sequence.
Return value, rest of seq."""
if data[0] <= 62:
return data[0], data[1:]
return (data[1]<<12) + (data[2]<<6) + data[3], data[4:]
|
from odoo import SUPERUSER_ID, api
from odoo.tools.sql import column_exists
def migrate(cr, version=None):
env = api.Environment(cr, SUPERUSER_ID, {})
if column_exists(cr, "product_template", "purchase_request"):
_migrate_purchase_request_to_property(env)
def _migrate_purchase_request_to_property(env):
"""Create properties for all products with the flag set on all companies"""
env.cr.execute("select id, coalesce(purchase_request, False) from product_template")
values = dict(env.cr.fetchall())
for company in env["res.company"].with_context(active_test=False).search([]):
env["ir.property"].with_context(force_company=company.id).set_multi(
"purchase_request", "product.template", values, False,
)
env.cr.execute("alter table product_template drop column purchase_request")
|
# -*- coding: utf-8 -*-
""" Module implementing alignment estimators on ndarrays
"""
import numpy as np
import scipy
from scipy.spatial.distance import cdist
from scipy import linalg
from scipy.sparse import diags
import sklearn
from sklearn.base import BaseEstimator, TransformerMixin
from scipy.optimize import linear_sum_assignment
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.linear_model import RidgeCV
from joblib import Parallel, delayed
import warnings
def scaled_procrustes(X, Y, scaling=False, primal=None):
"""Compute a mixing matrix R and a scaling sc such that Frobenius norm
||sc RX - Y||^2 is minimized and R is an orthogonal matrix.
Parameters
----------
X: (n_samples, n_features) nd array
source data
Y: (n_samples, n_features) nd array
target data
scaling: bool
If scaling is true, computes a floating scaling parameter sc such that:
||sc * RX - Y||^2 is minimized and
- R is an orthogonal matrix
- sc is a scalar
If scaling is false sc is set to 1
primal: bool or None, optional,
Whether the SVD is done on the YX^T (primal) or Y^TX (dual)
if None primal is used iff n_features <= n_timeframes
Returns
----------
R: (n_features, n_features) nd array
transformation matrix
sc: int
scaling parameter
"""
X = X.astype(np.float64, copy=False)
Y = Y.astype(np.float64, copy=False)
if np.linalg.norm(X) == 0 or np.linalg.norm(Y) == 0:
return np.eye(X.shape[1]), 1
if primal is None:
primal = X.shape[0] >= X.shape[1]
if primal:
A = Y.T.dot(X)
if A.shape[0] == A.shape[1]:
A += + 1.e-18 * np.eye(A.shape[0])
U, s, V = linalg.svd(A, full_matrices=0)
R = U.dot(V)
else: # "dual" mode
Uy, sy, Vy = linalg.svd(Y, full_matrices=0)
Ux, sx, Vx = linalg.svd(X, full_matrices=0)
A = np.diag(sy).dot(Uy.T).dot(Ux).dot(np.diag(sx))
U, s, V = linalg.svd(A)
R = Vy.T.dot(U).dot(V).dot(Vx)
if scaling:
sc = s.sum() / (np.linalg.norm(X) ** 2)
else:
sc = 1
return R.T, sc
def optimal_permutation(X, Y):
"""Compute the optmal permutation matrix of X toward Y
Parameters
----------
X: (n_samples, n_features) nd array
source data
Y: (n_samples, n_features) nd array
target data
Returns
----------
permutation : (n_features, n_features) nd array
transformation matrix
"""
dist = pairwise_distances(X.T, Y.T)
u = linear_sum_assignment(dist)
u = np.array(list(zip(*u)))
permutation = scipy.sparse.csr_matrix(
(np.ones(X.shape[1]), (u[:, 0], u[:, 1]))).T
return permutation
def _projection(x, y):
"""Compute scalar d minimizing ||dx-y||
Parameters
----------
x: (n_features) nd array
source vector
y: (n_features) nd array
target vector
Returns
--------
d: int
scaling factor
"""
if (x == 0).all():
return 0
else:
return np.dot(x, y) / np.linalg.norm(x)**2
def _voxelwise_signal_projection(X, Y, n_jobs=1, parallel_backend='threading'):
"""Compute D, list of scalar d_i minimizing :
||d_i * x_i - y_i|| for every x_i, y_i in X, Y
Parameters
----------
X: (n_samples, n_features) nd array
source data
Y: (n_samples, n_features) nd array
target data
Returns
--------
D: list of ints
List of optimal scaling factors
"""
return Parallel(n_jobs, parallel_backend)(delayed(_projection)(
voxel_source, voxel_target)
for voxel_source, voxel_target in zip(X, Y))
class Alignment(BaseEstimator, TransformerMixin):
def __init__(self):
pass
def fit(self, X, Y):
pass
def transform(self, X):
pass
class Identity(Alignment):
"""Compute no alignment, used as baseline for benchmarks : RX = X.
"""
def transform(self, X):
"""returns X"""
return X
class DiagonalAlignment(Alignment):
'''Compute the voxelwise projection factor between X and Y.
Parameters
----------
n_jobs: integer, optional (default = 1)
The number of CPUs to use to do the computation. -1 means
'all CPUs', -2 'all CPUs but one', and so on.
parallel_backend: str, ParallelBackendBase instance, None (default: 'threading')
Specify the parallelization backend implementation. For more
informations see joblib.Parallel documentation
Attributes
-----------
R : scipy.sparse.diags
Scaling matrix containing the optimal shrinking factor for every voxel
'''
def __init__(self, n_jobs=1, parallel_backend='threading'):
self.n_jobs = n_jobs
self.parallel_backend = parallel_backend
def fit(self, X, Y):
'''
Parameters
--------------
X: (n_samples, n_features) nd array
source data
Y: (n_samples, n_features) nd array
target data'''
shrinkage_coefficients = _voxelwise_signal_projection(
X.T, Y.T, self.n_jobs, self.parallel_backend)
self.R = diags(shrinkage_coefficients)
return
def transform(self, X):
"""Transform X using optimal coupling computed during fit.
"""
return self.R.dot(X.T).T
class ScaledOrthogonalAlignment(Alignment):
"""Compute a orthogonal mixing matrix R and a scaling sc such that Frobenius norm \
||sc RX - Y||^2 is minimized.
Parameters
-----------
scaling : boolean, optional
Determines whether a scaling parameter is applied to improve transform.
Attributes
-----------
R : ndarray (n_features, n_features)
Optimal orthogonal transform
"""
def __init__(self, scaling=True):
self.scaling = scaling
self.scale = 1
def fit(self, X, Y):
""" Fit orthogonal R s.t. ||sc XR - Y||^2
Parameters
-----------
X: (n_samples, n_features) nd array
source data
Y: (n_samples, n_features) nd array
target data
"""
R, sc = scaled_procrustes(X, Y, scaling=self.scaling)
self.scale = sc
self.R = sc * R
return self
def transform(self, X):
"""Transform X using optimal transform computed during fit.
"""
return X.dot(self.R)
class RidgeAlignment(Alignment):
""" Compute a scikit-estimator R using a mixing matrix M s.t Frobenius \
norm || XM - Y ||^2 + alpha * ||M||^2 is minimized with cross-validation
Parameters
----------
R : scikit-estimator from sklearn.linear_model.RidgeCV
with methods fit, predict
alpha : numpy array of shape [n_alphas]
Array of alpha values to try. Regularization strength; \
must be a positive float. Regularization improves the conditioning \
of the problem and reduces the variance of the estimates. \
Larger values specify stronger regularization. Alpha corresponds to \
``C^-1`` in other models such as LogisticRegression or LinearSVC.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.\
Possible inputs for cv are:
-None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
"""
def __init__(self, alphas=[0.1, 1.0, 10.0, 100, 1000], cv=4):
self.alphas = [alpha for alpha in alphas]
self.cv = cv
def fit(self, X, Y):
""" Fit R s.t. || XR - Y ||^2 + alpha ||R||^2 is minimized with cv
Parameters
-----------
X: (n_samples, n_features) nd array
source data
Y: (n_samples, n_features) nd array
target data
"""
self.R = RidgeCV(alphas=self.alphas, fit_intercept=True,
normalize=False,
scoring=sklearn.metrics.SCORERS['r2'],
cv=self.cv)
self.R.fit(X, Y)
return self
def transform(self, X):
"""Transform X using optimal transform computed during fit.
"""
return self.R.predict(X)
class Hungarian(Alignment):
'''Compute the optimal permutation matrix of X toward Y
Attributes
----------
R : scipy.sparse.csr_matrix
Mixing matrix containing the optimal permutation
'''
def fit(self, X, Y):
'''
Parameters
-----------
X: (n_samples, n_features) nd array
source data
Y: (n_samples, n_features) nd array
target data'''
self.R = optimal_permutation(X, Y).T
return self
def transform(self, X):
"""Transform X using optimal permutation computed during fit.
"""
return X.dot(self.R.toarray())
def _import_ot():
'''Import the optional dependency ot (POT module) if installed or give
back a clear error message to the user if not installed
'''
try:
import ot
except ImportError:
from fmralign.version import REQUIRED_MODULE_METADATA
for module, metadata in REQUIRED_MODULE_METADATA:
if module == 'POT':
POT_min_version = metadata['min_version']
raise ImportError("To use optimal transport solver, POT module(v > {}) \
is necessary but not installed by default with fmralign. To install \
it run 'pip install POT'".format(POT_min_version))
else:
return ot
class POTAlignment(Alignment):
'''Compute the optimal coupling between X and Y with entropic regularization.
Legacy implementation of optimal transport alignment based on POT.
Kept to check compatibility of new implementation
Parameters
----------
solver : str (optional)
solver from POT called to find optimal coupling 'sinkhorn', \
'greenkhorn', 'sinkhorn_stabilized','sinkhorn_epsilon_scaling', 'exact' \
see POT/ot/bregman on Github for source code of solvers
metric : str(optional)
metric used to create transport cost matrix, \
see full list in scipy.spatial.distance.cdist doc
reg : int (optional)
level of entropic regularization
Attributes
----------
R : scipy.sparse.csr_matrix
Mixing matrix containing the optimal permutation
'''
def __init__(self, solver='sinkhorn_epsilon_scaling',
metric='euclidean', reg=1, max_iter=1000, tol=1e-3):
self.ot = _import_ot()
self.solver = solver
self.metric = metric
self.reg = reg
self.max_iter = max_iter
self.tol = tol
def fit(self, X, Y):
'''Parameters
--------------
X: (n_samples, n_features) nd array
source data
Y: (n_samples, n_features) nd array
target data
'''
n = len(X.T)
if n > 5000:
warnings.warn(
'One parcel is {} voxels. As optimal transport on this region '.format(n) +
'would take too much time, no alignment was performed on it. ' +
'Decrease parcel size to have intended behavior of alignment.')
self.R = np.eye(n)
return self
else:
a = np.ones(n) * 1 / n
b = np.ones(n) * 1 / n
M = cdist(X.T, Y.T, metric=self.metric)
if self.solver == 'exact':
self.R = self.ot.lp.emd(a, b, M) * n
else:
self.R = self.ot.sinkhorn(
a, b, M, self.reg, method=self.solver, numItermax=self.max_iter, stopThr=self.tol) * n
return self
def transform(self, X):
"""Transform X using optimal coupling computed during fit.
"""
return X.dot(self.R)
class OptimalTransportAlignment(Alignment):
'''Compute the optimal coupling between X and Y with entropic regularization
using a OTT as a backend for acceleration.
Parameters
----------
metric : str(optional)
metric used to create transport cost matrix, \
see full list in scipy.spatial.distance.cdist doc
reg : int (optional)
level of entropic regularization
Attributes
----------
R : scipy.sparse.csr_matrix
Mixing matrix containing the optimal permutation
'''
def __init__(self, metric='euclidean', reg=1, max_iter=1000, tol=1e-3):
self.metric = metric
self.reg = reg
self.tol = tol
self.max_iter = max_iter
def fit(self, X, Y):
'''Parameters
--------------
X: (n_samples, n_features) nd array
source data
Y: (n_samples, n_features) nd array
target data
'''
from ott.geometry import geometry
from ott.tools import transport
n = len(X.T)
cost_matrix = cdist(X.T, Y.T, metric=self.metric)
geom = geometry.Geometry(cost_matrix=cost_matrix, epsilon=self.reg)
P = transport.Transport(
geom, max_iterations=self.max_iter, threshold=self.tol)
P.solve()
self.R = np.asarray(P.matrix * n)
return self
def transform(self, X):
"""Transform X using optimal coupling computed during fit.
"""
return X.dot(self.R)
|
import os
from pathlib import Path
from nuclear import *
from nuclear.utils.files import script_real_path
from nuclear import shell
from tests.asserts import MockIO
def test_bash_install_twice():
app_name = 'nuclear_test_dupa123'
with MockIO('--install-bash', 'nuclear_test_dupa123') as mockio:
CliBuilder().run()
assert 'Link installed' in mockio.output()
assert 'Autocompleter has been installed' in mockio.output()
assert os.path.islink(f'/usr/bin/{app_name}')
assert os.path.realpath(f'/usr/bin/{app_name}') == script_real_path()
assert os.path.exists(f'/etc/bash_completion.d/nuclear_{app_name}.sh')
with MockIO('--install-bash', 'nuclear_test_dupa123') as mockio:
CliBuilder().run()
assert 'Link installed' in mockio.output()
assert 'Autocompleter has been installed' in mockio.output()
assert os.path.islink(f'/usr/bin/{app_name}')
assert os.path.realpath(f'/usr/bin/{app_name}') == script_real_path()
assert os.path.exists(f'/etc/bash_completion.d/nuclear_{app_name}.sh')
shell(f'sudo rm -f /usr/bin/{app_name}')
shell(f'sudo rm -f /etc/bash_completion.d/nuclear_{app_name}.sh')
assert not os.path.exists(f'/usr/bin/{app_name}')
assert not os.path.exists(f'/etc/bash_completion.d/nuclear_{app_name}.sh')
def test_autocomplete_install_explicit_name():
app_name = 'nuclear_test_dupa123'
with MockIO('--install-autocomplete', 'nuclear_test_dupa123') as mockio:
CliBuilder().run()
assert 'Autocompleter has been installed' in mockio.output()
assert os.path.exists(f'/etc/bash_completion.d/nuclear_{app_name}.sh')
completion_script = Path(f'/etc/bash_completion.d/nuclear_{app_name}.sh').read_text()
assert '''COMPREPLY=($(nuclear_test_dupa123 --autocomplete "${COMP_LINE}" ${COMP_CWORD}))''' \
in completion_script
assert '''complete -o filenames -F _autocomplete_1446250409 nuclear_test_dupa123''' in completion_script
shell(f'sudo rm -f /etc/bash_completion.d/nuclear_{app_name}.sh')
assert not os.path.exists(f'/etc/bash_completion.d/nuclear_{app_name}.sh')
def test_autocomplete_install_implicit_name():
app_name = 'glue'
with MockIO('--install-autocomplete') as mockio:
CliBuilder().run()
assert 'Autocompleter has been installed' in mockio.output()
assert os.path.exists(f'/etc/bash_completion.d/nuclear_{app_name}.sh')
completion_script = Path(f'/etc/bash_completion.d/nuclear_{app_name}.sh').read_text()
assert '''COMPREPLY=($(glue --autocomplete "${COMP_LINE}" ${COMP_CWORD}))''' in completion_script
assert '''complete -o filenames -F _autocomplete_70451630 glue''' in completion_script
shell(f'sudo rm -f /etc/bash_completion.d/nuclear_{app_name}.sh')
assert not os.path.exists(f'/etc/bash_completion.d/nuclear_{app_name}.sh')
|
import argparse
import time
from collections import defaultdict
import cupy as cp
import cudf
import pandas as pd
import rmm
import gpugwas.io as gwasio
import gpugwas.filter as gwasfilter
import gpugwas.algorithms as algos
import gpugwas.dataprep as dp
import gpugwas.runner as runner
from gpugwas.vizb import show_qq_plot, show_manhattan_plot
#import gpugwas.processing as gwasproc
import warnings
warnings.filterwarnings('ignore', 'Expected ')
warnings.simplefilter('ignore')
parser = argparse.ArgumentParser(description='Run GPU GWAS Pipeline')
parser.add_argument('--vcf_path', default = './data/test.vcf')
parser.add_argument('--annotation_path', default = './data/1kg_annotations.txt')
parser.add_argument('--workdir', default = './temp/')
args = parser.parse_args()
# Initialize Memory Pool to 10GB
cudf.set_allocator(pool=True, initial_pool_size=1e10)
cp.cuda.set_allocator(rmm.rmm_cupy_allocator)
# Load data
print("Loading data")
vcf_df, feature_mapping = gwasio.load_vcf(args.vcf_path, info_keys=["AF"], format_keys=["GT", "DP"])
print(vcf_df.head())
print("Loading annotations")
ann_df = gwasio.load_annotations(args.annotation_path)
#print(ann_df)
# Start benchmarking after I/O
t0 = time.time()
# Filter data
print("Filtering samples")
vcf_df = gwasfilter.filter_samples(vcf_df)
print(vcf_df.head())
print("Filtering variants")
vcf_df = gwasfilter.filter_variants(vcf_df)
print(vcf_df.head())
# Generate phenotypes dataframe
phenotypes_df, features = dp.create_phenotype_df(vcf_df, ann_df, ['CaffeineConsumption','isFemale','PurpleHair'], "call_GT",
vcf_sample_col="sample", ann_sample_col="Sample")
# Run PCA on phenotype dataframe
phenotypes_df = algos.PCA_concat(phenotypes_df, 3)
print(phenotypes_df)
# Fit linear regression model for each variant feature
print("Fitting linear regression model")
p_value_df = runner.run_gwas(phenotypes_df, 'CaffeineConsumption', features, algos.cuml_LinearReg, add_cols=['PC0'])
print(p_value_df)
# Please save_to='manhattan.svg' argument to save the plot. This require firefox installed.
# conda install -c conda-forge firefox geckodriver
manhattan_plot = show_manhattan_plot(
p_value_df,
'chrom',
'p_value', 'feature',
title='GWAS Manhattan Plot')
print('Time Elapsed: {}'.format(time.time()- t0))
|
import abc
import sys
from time import sleep
from typing import Optional
import click
from afancontrol.arduino import (
DEFAULT_BAUDRATE,
ArduinoConnection,
ArduinoName,
ArduinoPin,
ArduinoPWMFan,
)
from afancontrol.pwmfan import (
BasePWMFan,
FanInputDevice,
FanValue,
LinuxPWMFan,
PWMDevice,
PWMValue,
)
# Time to wait before measuring fan speed after setting a PWM value.
STEP_INTERVAL_SECONDS = 2
# Time to wait before starting the test right after resetting the fan
# (i.e. setting it to full speed).
FAN_RESET_INTERVAL_SECONDS = 7
EXIT_CODE_CTRL_C = 130 # https://stackoverflow.com/a/1101969
HELP_FAN_TYPE = (
"Linux -- a standard PWM fan connected to a motherboard; "
"Arduino -- a PWM fan connected to an Arduino board."
)
HELP_LINUX_PWM_FILE = (
"PWM file for a Linux PWM fan, e.g. `/sys/class/hwmon/hwmon0/device/pwm2`."
)
HELP_LINUX_FAN_INPUT_FILE = (
"Fan input (tachometer) file for a Linux PWM fan, "
"e.g. `/sys/class/hwmon/hwmon0/device/fan2_input`."
)
HELP_ARDUINO_SERIAL_URL = "URL for the Arduino's Serial port"
HELP_ARDUINO_BAUDRATE = "Arduino Serial connection baudrate"
HELP_ARDUINO_PWM_PIN = (
"Arduino Board pin where the target fan's PWM wire is connected to."
)
HELP_ARDUINO_TACHO_PIN = (
"Arduino Board pin where the target fan's tachometer wire is connected to."
)
HELP_OUTPUT_FORMAT = (
"Output format for the measurements. `csv` data could be used "
"to make a plot using a spreadsheet program like MS Excel."
)
HELP_TEST_DIRECTION = (
"The default test is to stop the fan and then gracefully increase its speed. "
"You might want to reverse it, i.e. run the fan at full speed and then start "
"decreasing the speed. This would allow you to test the fan without fully "
"stopping it, if you abort the test with Ctrl+C when the speed becomes too low."
)
HELP_PWM_STEP_SIZE = (
"A single step size for the PWM value. `accurate` equals to 5 and provides "
"more accurate results, but is a slower option. `fast` equals to 25 and completes "
"faster."
)
@click.command()
@click.option(
"--fan-type",
help="FAN type. %s" % HELP_FAN_TYPE,
default="linux",
type=click.Choice(["linux", "arduino"]),
prompt="\n%s\nFAN type (linux, arduino)" % HELP_FAN_TYPE,
# `show_choices` is supported since click 7.0
show_default=True,
)
@click.option(
"--linux-fan-pwm",
help=HELP_LINUX_PWM_FILE,
type=click.Path(exists=True, dir_okay=False),
)
@click.option(
"--linux-fan-input",
help=HELP_LINUX_FAN_INPUT_FILE,
type=click.Path(exists=True, dir_okay=False),
)
@click.option("--arduino-serial-url", help=HELP_ARDUINO_SERIAL_URL, type=str)
@click.option(
"--arduino-baudrate",
help=HELP_ARDUINO_BAUDRATE,
type=int,
default=DEFAULT_BAUDRATE,
show_default=True,
)
@click.option("--arduino-pwm-pin", help=HELP_ARDUINO_PWM_PIN, type=int)
@click.option("--arduino-tacho-pin", help=HELP_ARDUINO_TACHO_PIN, type=int)
@click.option(
"-f",
"--output-format",
help=HELP_OUTPUT_FORMAT,
default="human",
type=click.Choice(["human", "csv"]),
prompt="\n%s\nOutput format (human, csv)" % HELP_OUTPUT_FORMAT,
show_default=True,
)
@click.option(
"-d",
"--direction",
help=HELP_TEST_DIRECTION,
default="increase",
type=click.Choice(["increase", "decrease"]),
prompt="\n%s\nTest direction (increase decrease)" % HELP_TEST_DIRECTION,
show_default=True,
)
@click.option(
"-s",
"--pwm-step-size",
help=HELP_PWM_STEP_SIZE,
default="accurate",
type=click.Choice(["accurate", "fast"]),
prompt="\n%s\nPWM step size (accurate fast)" % HELP_PWM_STEP_SIZE,
show_default=True,
)
def fantest(
*,
fan_type: str,
linux_fan_pwm: Optional[str],
linux_fan_input: Optional[str],
arduino_serial_url: Optional[str],
arduino_baudrate: int,
arduino_pwm_pin: Optional[int],
arduino_tacho_pin: Optional[int],
output_format: str,
direction: str,
pwm_step_size: str
) -> None:
"""The PWM fan testing program.
This program tests how changing the PWM value of a fan affects its speed.
In the beginning the fan would be stopped (by setting it to a minimum PWM value),
and then the PWM value would be increased in small steps, while also
measuring the speed as reported by the fan.
This data would help you to find the effective range of values
for the `pwm_line_start` and `pwm_line_end` settings where the correlation
between PWM and fan speed is close to linear. Usually its
`pwm_line_start = 100` and `pwm_line_end = 240`, but it is individual
for each fan. The allowed range for a PWM value is from 0 to 255.
Note that the fan would be stopped for some time during the test. If you'll
feel nervous, press Ctrl+C to stop the test and return the fan to full speed.
Before starting the test ensure that no fan control software is currently
controlling the fan you're going to test.
"""
try:
if fan_type == "linux":
if not linux_fan_pwm:
linux_fan_pwm = click.prompt(
"\n%s\nPWM file" % HELP_LINUX_PWM_FILE,
type=click.Path(exists=True, dir_okay=False),
)
if not linux_fan_input:
linux_fan_input = click.prompt(
"\n%s\nFan input file" % HELP_LINUX_FAN_INPUT_FILE,
type=click.Path(exists=True, dir_okay=False),
)
assert linux_fan_pwm is not None
assert linux_fan_input is not None
fan = LinuxPWMFan(
pwm=PWMDevice(linux_fan_pwm), fan_input=FanInputDevice(linux_fan_input)
) # type: BasePWMFan
elif fan_type == "arduino":
if not arduino_serial_url:
arduino_serial_url = click.prompt(
"\n%s\nArduino Serial url" % HELP_ARDUINO_SERIAL_URL, type=str
)
# typeshed currently specifies `Optional[str]` for `default`,
# see https://github.com/python/typeshed/blob/5acc22d82aa01005ea47ef64f31cad7e16e78450/third_party/2and3/click/termui.pyi#L34 # noqa
# however the click docs say that `default` can be of any type,
# see https://click.palletsprojects.com/en/7.x/prompts/#input-prompts
# Hence the `type: ignore`.
arduino_baudrate = click.prompt( # type: ignore
"\n%s\nBaudrate" % HELP_ARDUINO_BAUDRATE,
type=int,
default=str(arduino_baudrate),
show_default=True,
)
if not arduino_pwm_pin and arduino_pwm_pin != 0:
arduino_pwm_pin = click.prompt(
"\n%s\nArduino PWM pin" % HELP_ARDUINO_PWM_PIN, type=int
)
if not arduino_tacho_pin and arduino_tacho_pin != 0:
arduino_tacho_pin = click.prompt(
"\n%s\nArduino Tachometer pin" % HELP_ARDUINO_TACHO_PIN, type=int
)
assert arduino_serial_url is not None
arduino_connection = ArduinoConnection(
name=ArduinoName("_fantest"),
serial_url=arduino_serial_url,
baudrate=arduino_baudrate,
)
assert arduino_pwm_pin is not None
assert arduino_tacho_pin is not None
fan = ArduinoPWMFan(
arduino_connection,
pwm_pin=ArduinoPin(arduino_pwm_pin),
tacho_pin=ArduinoPin(arduino_tacho_pin),
)
else:
raise AssertionError(
"unreachable if the `fan_type`'s allowed `values` are in sync"
)
output = {"human": HumanMeasurementsOutput(), "csv": CSVMeasurementsOutput()}[
output_format
]
pwm_step_size_value = {"accurate": PWMValue(5), "fast": PWMValue(25)}[
pwm_step_size
]
if direction == "decrease":
pwm_step_size_value = PWMValue(
pwm_step_size_value * -1 # a bad PWM value, to be honest
)
except KeyboardInterrupt:
click.echo("")
sys.exit(EXIT_CODE_CTRL_C)
try:
run_fantest(fan=fan, pwm_step_size=pwm_step_size_value, output=output)
except KeyboardInterrupt:
click.echo("Fan has been returned to full speed")
sys.exit(EXIT_CODE_CTRL_C)
def run_fantest(
fan: BasePWMFan, pwm_step_size: PWMValue, output: "MeasurementsOutput"
) -> None:
with fan:
start = fan.min_pwm
stop = fan.max_pwm
if pwm_step_size > 0:
print("Testing increase with step %s" % pwm_step_size)
print("Waiting %s seconds for fan to stop..." % FAN_RESET_INTERVAL_SECONDS)
else:
start, stop = stop, start
print("Testing decrease with step %s" % pwm_step_size)
print(
"Waiting %s seconds for fan to run in full speed..."
% FAN_RESET_INTERVAL_SECONDS
)
fan.set(start)
sleep(FAN_RESET_INTERVAL_SECONDS)
print(output.header())
prev_rpm = None
for pwm_value in range(start, stop, pwm_step_size):
fan.set(PWMValue(pwm_value))
sleep(STEP_INTERVAL_SECONDS)
rpm = fan.get_speed()
rpm_delta = None # Optional[FanValue]
if prev_rpm is not None:
rpm_delta = rpm - prev_rpm
prev_rpm = rpm
print(
output.data_row(pwm=PWMValue(pwm_value), rpm=rpm, rpm_delta=rpm_delta)
)
print("Test is complete, returning fan to full speed")
class MeasurementsOutput(abc.ABC):
@abc.abstractmethod
def header(self) -> str:
pass
@abc.abstractmethod
def data_row(
self, pwm: PWMValue, rpm: FanValue, rpm_delta: Optional[FanValue]
) -> str:
pass
class HumanMeasurementsOutput(MeasurementsOutput):
def header(self) -> str:
return """PWM -- PWM value;
RPM -- fan speed (as reported by the fan);
DELTA -- RPM increase since the last step."""
def data_row(
self, pwm: PWMValue, rpm: FanValue, rpm_delta: Optional[FanValue]
) -> str:
return "PWM %s RPM %s DELTA %s" % (
str(pwm).rjust(3),
str(rpm).rjust(4),
str(rpm_delta if rpm_delta is not None else "n/a").rjust(4),
)
class CSVMeasurementsOutput(MeasurementsOutput):
def header(self) -> str:
return "pwm;rpm;rpm_delta"
def data_row(
self, pwm: PWMValue, rpm: FanValue, rpm_delta: Optional[FanValue]
) -> str:
return "%s;%s;%s" % (pwm, rpm, rpm_delta if rpm_delta is not None else "")
|
def dp(left, right):
if not cache[left][right] is None:
return cache[left][right]
if left == right:
cache[left][right] = board[left]
return cache[left][right]
elif (right - left) == 1:
if board[left] > board[right]:
diff = board[left] - board[right]
else:
diff = board[right] - board[left]
cache[left][right] = diff
return cache[left][right]
else:
# 4 cases
ret = EMPTY
# Delete left 2
ret = max(ret, -dp(left+2, right))
# Delete right 2
ret = max(ret, -dp(left, right-2))
# Gain left 1
ret = max(ret, board[left] - dp(left+1, right))
# Gain right 1
ret = max(ret, board[right] - dp(left, right-1))
cache[left][right] = ret
return cache[left][right]
def solve():
return dp(0, num_num-1)
if __name__=='__main__':
EMPTY = -987654321
test_case = int(input())
num_list = []
for _ in range(test_case):
num_num = int(input())
board = [int(x) for x in input().split()]
cache = [[None for _ in range(num_num)] for _ in range(num_num)]
print(solve())
|
"""
Constants module.
This module contains any values that are widely used across the framework,
utilities, or tests that will predominantly remain unchanged.
In the event values here have to be changed it should be under careful review
and with consideration of the entire project.
"""
import os
# Directories
TOP_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_DIR = os.path.join(TOP_DIR, "templates")
TEMPLATE_DEPLOYMENT_DIR = os.path.join(TEMPLATE_DIR, "ocs-deployment")
TEMPLATE_CSI_DIR = os.path.join(TEMPLATE_DIR, "CSI")
TEMPLATE_CSI_RBD_DIR = os.path.join(TEMPLATE_CSI_DIR, "rbd")
TEMPLATE_CSI_FS_DIR = os.path.join(TEMPLATE_CSI_DIR, "cephfs")
TEMPLATE_PV_PVC_DIR = os.path.join(TEMPLATE_DIR, "pv_pvc")
TEMPLATE_APP_POD_DIR = os.path.join(TEMPLATE_DIR, "app-pods")
# Statuses
STATUS_PENDING = 'Pending'
STATUS_AVAILABLE = 'Available'
STATUS_RUNNING = 'Running'
STATUS_TERMINATING = 'Terminating'
STATUS_BOUND = 'Bound'
# Resources / Kinds
CEPHFILESYSTEM = "CephFileSystem"
CEPHBLOCKPOOL = "CephBlockPool"
STORAGECLASS = "StorageClass"
PV = "PersistentVolume"
PVC = "PersistentVolumeClaim"
POD = "Pod"
# Other
SECRET = "Secret"
NAMESPACE = 'Namespace'
IGNORE_SC = "gp2"
# encoded value of 'admin'
ADMIN_BASE64 = 'YWRtaW4='
GB = 1024 ** 3
|
# -*- coding: utf-8 -*-
from ..campos import Campo, CampoData, CampoFixo, CampoNumerico
from ..registros import Registro
class Registro0000(Registro):
"""
ABERTURA DO ARQUIVO DIGITAL E IDENTIFICAÇÃO DA PESSOA FÍSICA
"""
campos = [
CampoFixo(1, 'REG', '0000'),
Campo(2, 'NOME_ESC', 'LCDPR'),
Campo(3, 'COD_VER'),
Campo(4, 'CPF'),
Campo(5, 'NOME'),
Campo(6, 'IND_SIT_INI_PER'),
Campo(7, 'SIT_ESPECIAL'),
CampoData(8, 'DT_SIT_ESP'),
CampoData(9, 'DT_INI'),
CampoData(10, 'DT_FIN'),
]
class Registro0010(Registro):
"""
PARÂMETROS DE TRIBUTAÇÃO
"""
campos = [
CampoFixo(1, 'REG', '0010'),
CampoNumerico(2, 'FORMA_APUR', precisao=0),
]
class Registro0030(Registro):
"""
DADOS CADASTRAIS DO CONTRIBUINTE
"""
campos = [
CampoFixo(1, 'REG', '0030'),
Campo(2, 'ENDERECO'),
Campo(3, 'NUM'),
Campo(4, 'COMPL'),
Campo(5, 'BAIRRO'),
Campo(6, 'UF'),
Campo(7, 'COD_MUN'),
Campo(8, 'CEP'),
Campo(9, 'NUM_TEL'),
Campo(10, 'EMAIL'),
]
class Registro0040(Registro):
"""
DADOS CADASTRAIS DO CONTRIBUINTE
"""
campos = [
CampoFixo(1, 'REG', '0040'),
Campo(2, 'COD_IMOVEL'),
Campo(3, 'PAIS'),
Campo(4, 'MOEDA'),
Campo(5, 'CAD_ITR'),
Campo(6, 'CAEPF'),
Campo(7, 'INSCR_ESTADUAL'),
Campo(8, 'NOME_IMOVEL'),
Campo(9, 'ENDERECO'),
Campo(10, 'NUM'),
Campo(11, 'COMPL'),
Campo(12, 'BAIRRO'),
Campo(13, 'UF'),
Campo(14, 'COD_MUN'),
Campo(15, 'CEP'),
Campo(16, 'TIPO_EXPLORACAO'),
Campo(17, 'PARTICIPACAO'),
]
class Registro0045(Registro):
"""
CADASTRO DE TERCEIROS
"""
campos = [
CampoFixo(1, 'REG', '0045'),
Campo(2, 'COD_IMOVEL'),
CampoNumerico(3, 'TIPO_CONTRAPARTE', precisao=0),
Campo(4, 'ID_CONTRAPARTE'),
Campo(5, 'NOME_CONTRAPARTE'),
Campo(6, 'PERC_CONTRAPARTE'),
]
class Registro0050(Registro):
"""
CADASTRO DAS CONTAS BANCÁRIAS DO PRODUTOR RURAL
"""
campos = [
CampoFixo(1, 'REG', '0050'),
Campo(2, 'COD_CONTA'),
Campo(3, 'PAIS_CTA'),
Campo(4, 'BANCO'),
Campo(5, 'NOME_BANCO'),
Campo(6, 'AGENCIA'),
Campo(7, 'NUM_CONTA'),
]
class RegistroQ100(Registro):
"""
DEMONSTRATIVO DO RESULTADO DA ATIVIDADE RURAL
"""
campos = [
CampoFixo(1, 'REG', 'Q100'),
CampoData(2, 'DATA'),
Campo(3, 'COD_IMOVEL'),
Campo(4, 'COD_CONTA'),
Campo(5, 'NUM_DOC'),
Campo(6, 'TIPO_DOC'),
Campo(7, 'HIST'),
Campo(8, 'ID_PARTIC'),
Campo(9, 'TIPO_LANC'),
Campo(10, 'VL_ENTRADA'),
Campo(11, 'VL_SAIDA'),
Campo(12, 'SLD_FIN'),
Campo(13, 'NAT_SLD_FIN'),
]
class RegistroQ200(Registro):
"""
RESUMO MENSAL DO DEMONSTRATIVO DO RESULTADO DA ATIVIDADE RURAL
"""
campos = [
CampoFixo(1, 'REG', 'Q200'),
Campo(2, 'MES'),
Campo(3, 'VL_ENTRADA'),
Campo(4, 'VL_SAIDA'),
Campo(5, 'SLD_FIN'),
Campo(6, 'NAT_SLD_FIN'),
]
class Registro9999(Registro):
"""
IDENTIFICAÇÃO DO CONTADOR E ENCERRAMENTO DO ARQUIVO DIGITAL
"""
campos = [
CampoFixo(1, 'REG', '9999'),
Campo(2, 'IDENT_NOM'),
Campo(3, 'IDENT_CPF_CNPJ'),
Campo(4, 'IND_CRC'),
Campo(5, 'EMAIL'),
Campo(6, 'FONE'),
CampoNumerico(7, 'QTD_LIN', precisao=0),
]
|
def remove_duplicates(from_list):
"""
The function list() will convert an item to a list.
The function set() will convert an item to a set.
A set is similar to a list, but all values must be unique.
Converting a list to a set removes all duplicate values.
We then convert it back to a list since we're most comfortable working with lists.
"""
converted_from_list = list(set(from_list))
return converted_from_list
my_list = ['AL,Alabama','AL,Alabama','AL,Alabama', 'AK,Alaska','AK,Alaska', 'AZ,Arizona', 'AR,Arkansas', 'CA,California', 'CA,California', 'CA,California', 'CO,Colorado', 'CT,Connecticut']
print "my_list before de-duping:"
print my_list
print '\n\n'
deduped_mylist = remove_duplicates(my_list)
print "my_list after de-duping:"
print deduped_mylist
def greeting(the_name):
'Just some help text, to help you out.'
the_greeting = 'Hello, {0}!'.format(the_name)
return the_greeting
#your_name = raw_input('Enter your name: ').strip()
#your_name = your_name.strip()
#print greeting(your_name)
#print greeting(raw_input('Enter your name: ').strip())
|
import hashlib
import time
import pymongo
from rest_framework.response import Response
from rest_framework.views import APIView
from RestapiManage.restapi.models import Project
from RestapiManage.restapi.serializer import ManyProject
class ProjectView(APIView):
server = '39.99.214.102'
mongo_password = 'Aa12345.'
mongo = f'mongodb://root:{mongo_password}@{server}:27017/'
conn = pymongo.MongoClient(mongo)
db = conn['restapi']
def get(self, request):
if request.u.username == 'ahriknow':
projects = Project.objects.all()
else:
projects = Project.objects.filter(user=request.u)
data = ManyProject(instance=projects, many=True).data
return Response({'code': 200, 'msg': 'Query was successful!', 'data': data})
def post(self, request):
try:
m = hashlib.md5()
m.update(str(time.time()).encode('utf-8'))
auth = m.hexdigest()
data = request.data
project = Project(name=data.get('name'), describe=data.get('describe'), auth=auth, user=request.u)
project.save()
return Response({'code': 200, 'msg': 'Opera Successfully!', 'data': None})
except Exception as ex:
return Response({'code': 500, 'msg': str(ex), 'data': None})
def put(self, request):
return Response({'code': 400, 'msg': 'Data does not exist!', 'data': None})
def delete(self, request, id=None):
if project := Project.objects.filter(pk=id).first():
self.db['url'].delete_many({'auth': project.auth})
project.delete()
return Response({'code': 200, 'msg': 'Delete successful!'})
return Response({'code': 400, 'msg': 'Data does not exist!', 'data': None})
|
""" Main application and routing logic for TWoff """
from flask import Flask, request, render_template
from .models import DB, User, Tweet
from decouple import config
from .functions import adduser, add_or_update_user
from .predicted import predict_user
def create_app():
""" create + config Flask app obj """
app = Flask(__name__)
# after creatin models.py run the follow
# configure the app object
app.config['SQLALCHEMY_DATABASE_URI'] = config('DATABASE_URL') # get db loc from .env
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
DB.init_app(app)
@app.route('/')
def root():
users = User.query.all()
tweets = Tweet.query.all()
return render_template('base.html', title='Home', users=users, tweets=tweets )
@app.route('/testload')
def testload():
adduser('NBCNews')
users = User.query.all()
tweets = Tweet.query.all()
return render_template('base.html', title='Home', users=users, tweets=tweets )
@app.route('/user', methods=['POST'])
@app.route('/user/<name>', methods=['GET'])
def user(name=None):
message = ''
name = name or request.values['user_name']
try:
if request.method == 'POST':
add_or_update_user(name)
message = 'User {} successfully added!'.format(name)
tweets = User.query.filter(User.name == name).one().tweets
except Exception as e:
message = 'Error adding {}: {}'.format(name, e)
tweets = []
return render_template('user.html', title=name, tweets=tweets,
message=message)
@app.route('/compare', methods=['POST'])
def compare():
user1, user2 = request.values['user1'], request.values['user2']
if user1 == user2:
return 'Cannot compare a user to themselves!'
else:
prediction = predict_user(user1, user2,
request.values['tweet_text'])
return user1 if prediction else user2
@app.route('/reload')
def reload():
DB.drop_all()
DB.create_all()
return render_template('base.html', title='DB has been RESET', users=[], tweets=[])
return app
# to run from terminal : cd to TWEETCOMPARE directory
# set FLASK_APP=TWoff:APP
# + flask run OR flask shell
|
# -*- coding: utf-8 -*-
"""Functional tests using WebTest.
See: http://webtest.readthedocs.org/
"""
from flask import current_app
from flask import url_for
import time
import pytest
from lti import ToolConsumer
from hxlti.consumer.models import Consumer
from hxlti.user.models import User
from .factories import ConsumerFactory
from .factories import UserFactory
class TestLoggingIn:
"""Login."""
def test_can_log_in_returns_200(self, user, testapp):
"""Login successful."""
# Goes to homepage
res = testapp.get('/')
# Fills out login form in navbar
form = res.forms['loginForm']
form['username'] = user.username
form['password'] = 'myprecious'
# Submits
res = form.submit().follow()
assert res.status_code == 200
def test_sees_alert_on_log_out(self, user, testapp):
"""Show alert on logout."""
res = testapp.get('/')
# Fills out login form in navbar
form = res.forms['loginForm']
form['username'] = user.username
form['password'] = 'myprecious'
# Submits
res = form.submit().follow()
res = testapp.get(url_for('public.logout')).follow()
# sees alert
assert 'You are logged out.' in res
def test_sees_error_message_if_password_is_incorrect(self, user, testapp):
"""Show error if password is incorrect."""
# Goes to homepage
res = testapp.get('/')
# Fills out login form, password incorrect
form = res.forms['loginForm']
form['username'] = user.username
form['password'] = 'wrong'
# Submits
res = form.submit()
# sees error
assert 'Invalid password' in res
def test_sees_error_message_if_username_doesnt_exist(self, user, testapp):
"""Show error if username doesn't exist."""
# Goes to homepage
res = testapp.get('/')
# Fills out login form, password incorrect
form = res.forms['loginForm']
form['username'] = 'unknown'
form['password'] = 'myprecious'
# Submits
res = form.submit()
# sees error
assert 'Unknown user' in res
@pytest.mark.skip('not registering for now')
class TestRegistering:
"""Register a user."""
def test_can_register(self, user, testapp):
"""Register a new user."""
old_count = len(User.query.all())
# Goes to homepage
res = testapp.get('/')
# Clicks Create Account button
res = res.click('Create account')
# Fills out the form
form = res.forms['registerForm']
form['username'] = 'foobar'
form['email'] = 'foo@bar.com'
form['password'] = 'secret'
form['confirm'] = 'secret'
# Submits
res = form.submit().follow()
assert res.status_code == 200
# A new user was created
assert len(User.query.all()) == old_count + 1
def test_sees_error_message_if_passwords_dont_match(self, user, testapp):
"""Show error if passwords don't match."""
# Goes to registration page
res = testapp.get(url_for('public.register'))
# Fills out form, but passwords don't match
form = res.forms['registerForm']
form['username'] = 'foobar'
form['email'] = 'foo@bar.com'
form['password'] = 'secret'
form['confirm'] = 'secrets'
# Submits
res = form.submit()
# sees error message
assert 'Passwords must match' in res
def test_sees_error_message_if_user_already_registered(self, user, testapp):
"""Show error if user already registered."""
user = UserFactory(active=True) # A registered user
user.save()
# Goes to registration page
res = testapp.get(url_for('public.register'))
# Fills out form, but username is already registered
form = res.forms['registerForm']
form['username'] = user.username
form['email'] = 'foo@bar.com'
form['password'] = 'secret'
form['confirm'] = 'secret'
# Submits
res = form.submit()
# sees error
assert 'Username already registered' in res
@pytest.mark.usefixtures('db')
class TestLtiLaunch:
"""lti launch: auth and login."""
def test_can_login(self, testapp, app, db):
app.config['SERVER_NAME'] = 'localhost'
#app.config['SERVER_PORT'] = 5000
app.config['PREFERRED_URL_SCHEME'] = 'http'
consumer = ConsumerFactory()
consumer.save()
params = {
'lti_message_type': 'basic-lti-launch-request',
'lti_version': 'LTI-1p0',
'resource_link_id' : 'same.site.org-9fcae62972a240e488ca1de83dc4a6d9',
}
tool_consumer = ToolConsumer(
consumer_key=consumer.client_key,
consumer_secret=consumer.secret_key,
launch_url=url_for('lti_launch.lti_launch', _external=True),
params=params
)
lti_params = tool_consumer.generate_launch_data()
#res = testapp.post(url_for('lti_launch.lti_launch', _external=True), lti_params)
res = testapp.post(url_for('lti_launch.lti_launch', _external=True), lti_params)
assert res.status_code == 200
|
from .settings import *
import os
import json
DEBUG = True
# sqlite3
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
# postgresql
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ.get('DB_NAME', None),
'USER': os.environ.get('DB_USER', None),
'PASSWORD': os.environ.get('DB_PASSWORD', None),
'HOST': 'localhost',
},
}
# 메일 설정 비밀키 윈도우에서 json파일 가져오기
# try:
# with open(BASE_DIR+"\secret.json","r") as f:
# secrets = json.loads(f.read())
# email_host_user=secrets["EMAIL_HOST_USER"]
# email_host_password=secrets["EMAIL_HOST_PASSWORD"]
# except:#리눅스/맥에서 환경변수로 메일설정 비밀키 가져오기
email_host_user = os.environ.get("EMAIL_HOST_USER", "")
email_host_password = os.environ.get("EMAIL_USER_PASSWORD", "")
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = "smtp.gmail.com"
EMAIL_HOST_USER = email_host_user
EMAIL_HOST_PASSWORD = email_host_password
EMAIL_PORT = 587
EMAIL_USE_TLS = True
DEFAULT_FROM_EMAIL = email_host_user
CORS_ORIGIN_WHITELIST = (
'localhost:8000',
'127.0.0.1:8000',
'localhost:3000',
'www.rasbp.site',
'localhost:8080',
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.