content
stringlengths 5
1.05M
|
|---|
#
# Copyright (C) 2020 IBM. All Rights Reserved.
#
# See LICENSE.txt file in the root directory
# of this source tree for licensing information.
#
import os
import unittest
from builtins import classmethod
from clai.server.command_message import State
from clai.server.plugins.howdoi.howdoi import HowDoIAgent
OS_NAME: str = os.uname().sysname.upper()
@unittest.skip("Only for local testing")
class SearchAgentTest(unittest.TestCase):
@classmethod
def set_up_class(cls):
_agent = HowDoIAgent()
cls.agent = _agent
def print_and_verify(self, question, answer):
state = State(user_name='tester', command_id='0', command=question)
action = self.agent.get_next_action(state=state)
print(f"Input: {state.command}")
print("===========================")
print(f"Response: {action.suggested_command}")
print("===========================")
print(f"Explanation: {action.description}")
self.assertEqual(answer, action.suggested_command)
@unittest.skip("Only for local testing")
def test_get_next_action_pwd_without_question(self):
self.agent.init_agent()
if OS_NAME in ("OS/390", "Z/OS"):
self.print_and_verify("pds", "pds")
else:
self.print_and_verify("pds", None)
@unittest.skip("Only for local testing")
def test_get_next_action_pwd_with_question(self):
self.agent.init_agent()
if OS_NAME in ("OS/390", "Z/OS"):
self.print_and_verify("What is a pds?", "man readlink")
else:
self.print_and_verify("What is pwd?", "man pwd")
@unittest.skip("Only for local testing")
def test_get_next_action_sudo(self):
self.agent.init_agent()
self.print_and_verify("when to use sudo vs su?", "man su")
@unittest.skip("Only for local testing")
def test_get_next_action_disk(self):
self.agent.init_agent()
question: str = "find out disk usage per user?"
if OS_NAME in ("OS/390", "Z/OS"):
self.print_and_verify(question, "man du")
else:
self.print_and_verify(question, "man df")
@unittest.skip("Only for local testing")
def test_get_next_action_zip(self):
self.agent.init_agent()
question: str = "How to process gz files?"
if OS_NAME in ("OS/390", "Z/OS"):
self.print_and_verify(question, "man dnctl")
else:
self.print_and_verify(question, "man gzip")
@unittest.skip("Only for local testing")
def test_get_next_action_pds(self):
self.agent.init_agent()
question: str = "copy a PDS member?"
if OS_NAME in ("OS/390", "Z/OS"):
self.print_and_verify(question, "man tcsh")
else:
self.print_and_verify(question, "man cmp")
|
from app.handlers.auth import blueprint as auth_blueprint
from app.handlers.commands import blueprint as commands_blueprint
from app.handlers.errors import (
not_found,
server_error,
)
from app.handlers.home import blueprint as home_blueprint
from app.handlers.run import blueprint as run_blueprint
from app.handlers.setup import blueprint as setup_blueprint
from app.secrets import APP_SECRET_KEY
from app.utils import (
Flask,
before_request,
request,
timedelta,
)
app = Flask(__name__)
app.config.update(
PERMANENT_SESSION_LIFETIME=timedelta(days=30 * 365),
SECRET_KEY = APP_SECRET_KEY,
)
app.before_request(before_request)
app.register_blueprint(home_blueprint)
app.register_blueprint(commands_blueprint)
app.register_blueprint(auth_blueprint)
app.register_blueprint(setup_blueprint)
app.register_blueprint(run_blueprint)
app.register_error_handler(404, not_found)
app.register_error_handler(Exception, server_error)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 18 16:00:46 2021
@author: romainloirs
"""
def find_road(current_cluster,pt_depart,matrice_adj_elag,pt_depart_current,pt_arrive_current,aux_time_windows):
from RO_main import sequence_zone_id
from RO_main import sequence_zone_id_2
from RO_main import sequence_zone_id_3
from time_windows import get_time_windows_stop
sous_sequence,res_RO_2, res_RO_3= sequence_zone_id_3(current_cluster,matrice_adj_elag,pt_depart_current,pt_arrive_current,aux_time_windows)
if pt_depart in current_cluster:
if res_RO_2=="Infeasible":
#supprimer les contraintes de point d'arrivé
sous_sequence,res_RO_2, res_RO_3= sequence_zone_id(current_cluster,matrice_adj_elag,pt_depart_current,aux_time_windows)
if res_RO_2=="Infeasible":
#supprimer les contraintes temporelles
aux_time_windows=[None]*100
sous_sequence,res_RO_2, res_RO_3= sequence_zone_id_3(current_cluster,matrice_adj_elag,pt_depart_current,pt_arrive_current,aux_time_windows)
if res_RO_2=="Infeasible":
#supprimer les deux constraintes à la fois
aux_time_windows=[None]*100
sous_sequence,res_RO_2, res_RO_3= sequence_zone_id(current_cluster,matrice_adj_elag,pt_depart_current,aux_time_windows)
else:
if res_RO_2=="Infeasible":
#supprimer la constrainte de point de départ et d'arrivé
sous_sequence,res_RO_2, res_RO_3= sequence_zone_id_2(current_cluster,matrice_adj_elag,aux_time_windows)
if res_RO_2=="Infeasible":
#supprimer les contraintes temporelles
aux_time_windows=[None]*100
sous_sequence,res_RO_2, res_RO_3= sequence_zone_id_3(current_cluster,matrice_adj_elag,pt_depart_current,pt_arrive_current,aux_time_windows)
if res_RO_2=="Infeasible":
#supprimer les deux constraintes à la fois
aux_time_windows=[None]*100
sous_sequence,res_RO_2, res_RO_3= sequence_zone_id_2(current_cluster,matrice_adj_elag,aux_time_windows)
return sous_sequence,res_RO_2, res_RO_3
# paramétre: l'id de la route
# return: une séquence ordonnée de stops
def prediction(road_id,route_data,package_data,stop_data):
#importations
from clustering import belong_to
from clustering import cluster_conforme
from clustering import time_next_cluster
from clustering import find_pt_arrive_depart
from RO_cluster import ordonne_cluster
from RO_cluster import get_first_zi_to_visit
from RO_cluster import ordonne_cluster_2
from time_windows import get_time_windows_stop
from time_windows import translate_time_windows
from ML import elaguer_coefs_bis
from get_data import get_final_adj_matrix
from get_data import get_road_time_windows
from get_data import get_zone_id
from get_data import get_first_stop
import time
import pickle
#les résultats de la fonctions
Sequence=[]
time_livraison=0
optimalite=[]
# Importations pour le ML
model_path = "data/model_build_outputs/"
scaler = pickle.load(open(model_path + "scaler.sav", 'rb'))
model = pickle.load(open(model_path + "model.sav", 'rb'))
tmps1=time.time()
#obtenir les données du probème
matrice_adj=get_final_adj_matrix(road_id,package_data,stop_data)
all_time_windows=get_road_time_windows(road_id,route_data,package_data,stop_data)
list_zone_id= get_zone_id(road_id,route_data)
pt_depart=get_first_stop(road_id,route_data)
#premiere partie: trouver l'ordonnencement des zone_id
cluster_depart= belong_to(pt_depart, list_zone_id)
first_zi_to_visit= get_first_zi_to_visit(pt_depart,cluster_depart,matrice_adj,list_zone_id,road_id,route_data,package_data,stop_data)
list_zone_id_ordonne = ordonne_cluster_2(matrice_adj,list_zone_id,cluster_depart,first_zi_to_visit, all_time_windows,15)
#list_zone_id_ordonne=ordonne_cluster(matrice_adj,list_zone_id,cluster_depart, all_time_windows,15)
# seconde partie : trouver l'ordonnancement au sein des zone_id
list_cluster=cluster_conforme(matrice_adj,list_zone_id_ordonne,pt_depart,15)
pt_depart_current=pt_depart
list_depart=[pt_depart]
res_test= []
for current_cluster in list_cluster:
n=len(current_cluster)
pt_arrive_current,pt_depart_next= find_pt_arrive_depart(pt_depart_current,current_cluster,list_cluster,matrice_adj)
# si le cluster contient 1 stop
if n==1:
Sequence.extend(current_cluster)
#si le cluster contient plus de 1 stop
else:
ss_adj_matrice=matrice_adj.loc[current_cluster,current_cluster]
matrice_adj_elag,n_elag=elaguer_coefs_bis(road_id,ss_adj_matrice,model,scaler,stop_data,route_data,package_data)
time_windows=get_time_windows_stop(current_cluster,all_time_windows)
aux_time_windows=translate_time_windows(time_windows, time_livraison)
n_coeff= len(current_cluster)*len(current_cluster) - n_elag
tmps3=time.time()
sous_sequence,res_RO_2, res_RO_3= find_road(current_cluster,pt_depart,matrice_adj_elag,pt_depart_current,pt_arrive_current,aux_time_windows)
tmps4=time.time()-tmps3
res_test.append([tmps4,n_coeff])
Sequence.extend(sous_sequence)
optimalite.append(res_RO_2)
time_livraison+=res_RO_3
pt_depart_current=pt_depart_next
list_depart.append(pt_depart_current)
aux= time_next_cluster(current_cluster,list_cluster,matrice_adj)
time_livraison+=aux
tmps2=time.time()-tmps1
is_valid=True
if Sequence[0]!=pt_depart:
is_valid=False
n_stops= sum([len(zone_id) for zone_id in list_zone_id ])
if n_stops!=len(set(Sequence)):
is_valid=False
res={}
res["road_id"]=road_id
res["n_cluster"]=len(list_zone_id)
res["n_stops"]=n_stops
res["pt_depart"]=pt_depart
res["temps de livraison"]=time_livraison
res["temps execution"]=tmps2
res["Sequence"]=Sequence
res["zone_id"]=list_zone_id
res["list_cluster"]=list_cluster
res["test_elagage"]=res_test
return res
|
if __name__ == '__main__':
try:
name = input("Enter file:")
if len(name) < 1:
name = "mbox-short.txt"
handle = open(name)
m = dict()
for word in handle:
if word.startswith('From '):
word = word.split()
mail = word[1]
m[mail] = m.get(mail, 0) + 1
for key in m:
if m[key] == max(m.values()):
print(key, m[key])
except:
#print("Can not open file")
pass
|
'''
================================
extract - Extract UMI from fastq
================================
*Extract UMI barcode from a read and add it to the read name, leaving
any sample barcode in place*
Can deal with paired end reads and UMIs
split across the paired ends. Can also optionally extract cell
barcodes and append these to the read name also. See the section below
for an explanation for how to encode the barcode pattern(s) to
specficy the position of the UMI +/- cell barcode.
Usage:
------
For single ended reads, the following reads from stdin and outputs to
stdout::
umi_tools extract --extract-method=string
--bc-pattern=[PATTERN] -L extract.log [OPTIONS]
For paired end reads, the following reads end one from stdin and end
two from FASTQIN and outputs end one to stdin and end two to
FASTQOUT::
umi_tools extract --extract-method=string
--bc-pattern=[PATTERN] --bc-pattern2=[PATTERN]
--read2-in=[FASTQIN] --read2-out=[FASTQOUT] -L extract.log [OPTIONS]
Using regex and filtering against a whitelist of cell barcodes::
umi_tools extract --extract-method=regex
--bc-pattern=[REGEX] --whitlist=[WHITELIST_TSV]
-L extract.log [OPTIONS]
Filtering and correcting cell barcodes
--------------------------------------
umi_tools extract can optionally filter cell barcodes against a user-supplied
whitelist (``--whitelist``). If a whitelist is not available for your data, e.g
if you have performed droplet-based scRNA-Seq, you can use the
whitelist tool.
Cell barcodes which do not match the whitelist (user-generated or
automatically generated) can also be optionally corrected using the
``--error-correct-cell`` option.
""""""""""""""""""""""""
``--error-correct-cell``
""""""""""""""""""""""""
Error correct cell barcodes to the whitelist (see ``--whitelist``)
"""""""""""""""
``--whitelist``
"""""""""""""""
Whitelist of accepted cell barcodes. The whitelist should be in
the following format (tab-separated)::
AAAAAA AGAAAA
AAAATC
AAACAT
AAACTA AAACTN,GAACTA
AAATAC
AAATCA GAATCA
AAATGT AAAGGT,CAATGT
Where column 1 is the whitelisted cell barcodes and column 2 is
the list (comma-separated) of other cell barcodes which should be
corrected to the barcode in column 1. If the ``--error-correct-cell``
option is not used, this column will be ignored. Any additional columns
in the whitelist input, such as the counts columns from the output of
umi_tools whitelist, will be ignored.
"""""""""""""""
``--blacklist``
"""""""""""""""
BlackWhitelist of cell barcodes to discard
""""""""""""""""""""""
``--subset-reads=[N]``
""""""""""""""""""""""
Only parse the first N reads
""""""""""""""""""""""""""""""
``--quality-filter-threshold``
""""""""""""""""""""""""""""""
Remove reads where any UMI base quality score falls below this threshold
"""""""""""""""""""""""""
``--quality-filter-mask``
"""""""""""""""""""""""""
If a UMI base has a quality below this threshold, replace the base with 'N'
""""""""""""""""""""""
``--quality-encoding``
""""""""""""""""""""""
Quality score encoding. Choose from:
- 'phred33' [33-77]
- 'phred64' [64-106]
- 'solexa' [59-106]
"""""""""""""""""""""
``--reconcile-pairs``
"""""""""""""""""""""
Allow read 2 infile to contain reads not in read 1 infile. This
enables support for upstream protocols where read one contains
cell barcodes, and the read pairs have been filtered and corrected
without regard to the read2s
Experimental options
--------------------
.. note:: These options have not been extensively testing to ensure behaviour is as expected. If you have some suitable input files which we can use for testing, please `contact us <https://github.com/CGATOxford/UMI-tools/issues>`_.
If you have a library preparation method where the UMI may be in
either read, you can use the following options to search for the UMI
in either read::
--either-read --extract-method --bc-pattern=[PATTERN1] --bc-pattern2=[PATTERN2]
Where both patterns match, the default behaviour is to discard both
reads. If you want to select the read with the UMI with highest
sequence quality, provide ``--either-read-resolve=quality.``
'''
from __future__ import absolute_import
import sys
import regex
import collections
import optparse
# python 3 doesn't require izip
try:
# Python 2
from itertools import izip
except ImportError:
# Python 3
izip = zip
import umi_tools.Utilities as U
import umi_tools.Documentation as Documentation
import umi_tools.umi_methods as umi_methods
import umi_tools.extract_methods as extract_methods
import umi_tools.whitelist_methods as whitelist_methods
# add the generic docstring text
__doc__ = __doc__ + Documentation.GENERIC_DOCSTRING_WE
usage = '''
extract - Extract UMI from fastq
Usage:
Single-end:
umi_tools extract [OPTIONS] -p PATTERN [-I IN_FASTQ[.gz]] [-S OUT_FASTQ[.gz]]
Paired end:
umi_tools extract [OPTIONS] -p PATTERN [-I IN_FASTQ[.gz]] [-S OUT_FASTQ[.gz]] --read2-in=IN2_FASTQ[.gz] --read2-out=OUT2_FASTQ[.gz]
note: If -I/-S are ommited standard in and standard out are used
for input and output. To generate a valid BAM file on
standard out, please redirect log with --log=LOGFILE or
--log2stderr. Input/Output will be (de)compressed if a
filename provided to -S/-I/--read2-in/read2-out ends in .gz
'''
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
# setup command line parser
parser = U.OptionParser(version="%prog version: $Id$",
usage=usage,
description=globals()["__doc__"])
group = U.OptionGroup(parser, "extract-specific options")
# (Experimental option) Retain the UMI in the sequence read"
group.add_option("--retain-umi", dest="retain_umi", action="store_true",
help=optparse.SUPPRESS_HELP)
group.add_option("--read2-out", dest="read2_out", type="string",
help="file to output processed paired read to")
group.add_option("--read2-stdout", dest="read2_stdout",
action="store_true",
help="Paired reads, send read2 to stdout, discarding read1")
group.add_option("--quality-filter-threshold",
dest="quality_filter_threshold", type="int",
help=("Remove reads where any UMI base quality score "
"falls below this threshold"))
group.add_option("--quality-filter-mask",
dest="quality_filter_mask", type="int",
help=("If a UMI base has a quality below this threshold, "
"replace the base with 'N'"))
group.add_option("--quality-encoding",
dest="quality_encoding", type="choice",
choices=["phred33", "phred64", "solexa"],
help=("Quality score encoding. Choose from 'phred33'"
"[33-77] 'phred64' [64-106] or 'solexa' [59-106]"))
group.add_option("--filter-cell-barcode",
dest="filter_cell_barcode",
action="store_true",
help=optparse.SUPPRESS_HELP)
group.add_option("--error-correct-cell",
dest="error_correct_cell",
action="store_true",
help=("Correct errors in the cell barcode"))
group.add_option("--whitelist",
dest="whitelist", type="string",
help=("A whitelist of accepted cell barcodes"))
group.add_option("--blacklist",
dest="blacklist", type="string",
help=("A blacklist of rejected cell barcodes"))
group.add_option("--filter-umi",
dest="filter_umi",
action="store_true",
#help="Filter the UMIs"
help=optparse.SUPPRESS_HELP)
group.add_option("--umi-whitelist", dest="umi_whitelist",
type="string", default=None,
#help="A whitelist of accepted UMIs [default=%default]"
help=optparse.SUPPRESS_HELP)
group.add_option("--umi-whitelist-paired", dest="umi_whitelist_paired",
type="string", default=None,
#help="A whitelist of accepted UMIs for read2[default=%default]"
help=optparse.SUPPRESS_HELP)
group.add_option("--correct-umi-threshold", dest="correct_umi_threshold",
type="int", default=0,
#help="Correct errors in UMIs to the whitelist(s) provided"
#"if within threshold [default=%default]"
help=optparse.SUPPRESS_HELP)
group.add_option("--umi-correct-log", dest="umi_correct_log",
type="string", default=None,
#help="File logging UMI error correction",
help=optparse.SUPPRESS_HELP)
group.add_option("--subset-reads", "--reads-subset",
dest="reads_subset", type="int",
help=("Only extract from the first N reads. If N is "
"greater than the number of reads, all reads will "
"be used"))
group.add_option("--reconcile-pairs",
dest="reconcile", action="store_true",
help=("Allow the presences of reads in read2 input that "
"are not present in read1 input. This allows cell "
"barcode filtering of read1s without "
"considering read2s"))
parser.add_option_group(group)
group = U.OptionGroup(parser, "[EXPERIMENTAl] barcode extraction options")
group.add_option("--either-read", dest="either_read", action="store_true",
help="UMI may be on either read (see "
"--either-read-resolve) for options to resolve cases where"
"UMI is on both reads")
group.add_option("--either-read-resolve",
dest="either_read_resolve", type="choice",
choices=["discard", "quality"],
help=("How to resolve instances where both reads "
"contain a UMI but using --either-read."
"Choose from 'discard' or 'quality'"
"(use highest quality). default=dicard"))
parser.add_option_group(group)
parser.set_defaults(extract_method="string",
filter_cell_barcodes=False,
whitelist=None,
blacklist=None,
error_correct_cell=False,
pattern=None,
pattern2=None,
read2_in=None,
read2_out=False,
read2_stdout=False,
quality_filter_threshold=None,
quality_encoding=None,
reconcile=False,
either_read=False,
either_read_resolve="discard",
ignore_suffix=False)
# add common options (-h/--help, ...) and parse command line
(options, args) = U.Start(parser, argv=argv,
add_extract_options=True,
add_group_dedup_options=False,
add_umi_grouping_options=False,
add_sam_options=False)
if options.filter_cell_barcode:
U.info('Use of --whitelist ensures cell barcodes are filtered. '
'--filter-cell-barcode is no longer required and may be '
'removed in future versions.')
if options.whitelist is not None:
options.filter_cell_barcode = True
if options.retain_umi and not options.extract_method == "regex":
U.error("option --retain-umi only works with --extract-method=regex")
if (options.filtered_out and not options.extract_method == "regex" and
whitelist is None):
U.error("Reads will not be filtered unless extract method is"
"set to regex (--extract-method=regex) or cell"
"barcodes are filtered (--whitelist)")
if options.quality_filter_threshold or options.quality_filter_mask:
if not options.quality_encoding:
U.error("must provide a quality encoding (--quality-"
"encoding) to filter UMIs by quality (--quality"
"-filter-threshold) or mask low quality bases "
"with (--quality-filter-mask)")
extract_cell, extract_umi = U.validateExtractOptions(options)
if options.either_read:
if extract_cell:
U.error("Option to extract from either read (--either-read) "
"is not currently compatible with cell barcode extraction")
if not options.extract_method == "regex":
U.error("Option to extract from either read (--either-read)"
"requires --extract-method=regex")
if not options.pattern or not options.pattern2:
U.error("Option to extract from either read (--either-read)"
"requires --bc-pattern=[PATTERN1] and"
"--bc-pattern2=[PATTERN2]")
if options.filter_umi:
if not options.umi_whitelist:
U.error("must provide a UMI whitelist (--umi-whitelist) if using "
"--filter-umi option")
if options.pattern2 and not options.umi_whitelist_paired:
U.error("must provide a UMI whitelist for paired end "
"(--umi-whitelist-paired) if using --filter-umi option"
"with paired end data")
if not extract_umi:
if options.extract_method == "string":
U.error("barcode pattern(s) do not include any umi bases "
"(marked with 'Ns') %s, %s" % (
options.pattern, options.pattern2))
elif options.extract_method == "regex":
U.error("barcode regex(es) do not include any umi groups "
"(starting with 'umi_') %s, %s" (
options.pattern, options.pattern2))
if options.whitelist:
if not extract_cell:
if options.extract_method == "string":
U.error("barcode pattern(s) do not include any cell bases "
"(marked with 'Cs') %s, %s" % (
options.pattern, options.pattern2))
elif options.extract_method == "regex":
U.error("barcode regex(es) do not include any cell groups "
"(starting with 'cell_') %s, %s" (
options.pattern, options.pattern2))
read1s = umi_methods.fastqIterate(options.stdin)
# set up read extractor
ReadExtractor = extract_methods.ExtractFilterAndUpdate(
options.extract_method,
options.pattern,
options.pattern2,
options.prime3,
extract_cell,
options.quality_encoding,
options.quality_filter_threshold,
options.quality_filter_mask,
options.filter_umi,
options.filter_cell_barcode,
options.retain_umi,
options.either_read,
options.either_read_resolve)
if options.filter_umi:
umi_whitelist, false_to_true_map = whitelist_methods.getUserDefinedBarcodes(
options.umi_whitelist,
options.umi_whitelist_paired,
deriveErrorCorrection=True,
threshold=options.correct_umi_threshold)
U.info("Length of whitelist: %i" % len(umi_whitelist))
U.info("Length of 'correctable' whitelist: %i" % len(false_to_true_map))
ReadExtractor.umi_whitelist = umi_whitelist
ReadExtractor.umi_false_to_true_map = false_to_true_map
ReadExtractor.umi_whitelist_counts = collections.defaultdict(
lambda: collections.Counter())
if options.whitelist:
cell_whitelist, false_to_true_map = whitelist_methods.getUserDefinedBarcodes(
options.whitelist,
getErrorCorrection=options.error_correct_cell)
ReadExtractor.cell_whitelist = cell_whitelist
ReadExtractor.false_to_true_map = false_to_true_map
if options.blacklist:
blacklist = set()
with U.openFile(options.blacklist, "r") as inf:
for line in inf:
blacklist.add(line.strip().split("\t")[0])
ReadExtractor.cell_blacklist = blacklist
# variables for progress monitor
progCount = 0
displayMax = 100000
U.info("Starting barcode extraction")
if options.filtered_out:
filtered_out = U.openFile(options.filtered_out, "w")
if options.read2_in is None:
for read in read1s:
# incrementing count for monitoring progress
progCount += 1
# Update display in every 100kth iteration
if progCount % displayMax == 0:
U.info("Parsed {} reads".format(progCount))
new_read = ReadExtractor(read)
if options.reads_subset:
if (ReadExtractor.read_counts['Input Reads'] >
options.reads_subset):
break
if not new_read:
if options.filtered_out:
filtered_out.write(str(read) + "\n")
continue
options.stdout.write(str(new_read) + "\n")
else:
if options.filtered_out2:
filtered_out2 = U.openFile(options.filtered_out2, "w")
read2s = umi_methods.fastqIterate(U.openFile(options.read2_in))
if options.read2_out:
read2_out = U.openFile(options.read2_out, "w")
if options.reconcile:
strict = False
else:
strict = True
for read1, read2 in umi_methods.joinedFastqIterate(
read1s, read2s, strict, options.ignore_suffix):
# incrementing count for monitoring progress
progCount += 1
# Update display in every 100kth iteration
if progCount % displayMax == 0:
U.info("Parsed {} reads".format(progCount))
sys.stdout.flush()
reads = ReadExtractor(read1, read2)
if options.reads_subset:
if (ReadExtractor.read_counts['Input Reads'] >
options.reads_subset):
break
if not reads:
if options.filtered_out:
filtered_out.write(str(read1) + "\n")
if options.filtered_out2:
filtered_out2.write(str(read2) + "\n")
continue
else:
new_read1, new_read2 = reads
if options.read2_stdout:
options.stdout.write(str(new_read2) + "\n")
else:
options.stdout.write(str(new_read1) + "\n")
if options.read2_out:
read2_out.write(str(new_read2) + "\n")
if options.read2_out:
read2_out.close()
if options.filtered_out:
filtered_out.close()
if options.filtered_out2:
filtered_out2.close()
for k, v in ReadExtractor.getReadCounts().most_common():
U.info("%s: %s" % (k, v))
if options.umi_correct_log:
with U.openFile(options.umi_correct_log, "w") as outf:
outf.write("umi\tcount_no_errors\tcount_errors\n")
for umi, counts in ReadExtractor.umi_whitelist_counts.items():
outf.write("%s\t%i\t%i\n" % (
umi, counts["no_error"], counts["error"]))
outf.close()
U.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
from math import *
from numpy import *
########################################################################
def computeTimeStepPrep(gap, A0, AN):
beta = 1 - (A0-AN)/gap
NP = int(log(AN/A0)/log(beta))
A = zeros(NP)
R = zeros(NP)
sum = 0.0
A[0] = A0
sum = A0
R[0] = sum
for i in range(1, NP):
A[i] = A[i-1] * beta
sum = sum + A[i]
R[i] = sum
return R
########################################################################
def computeTravelDistance(distance, gap, rmin=0.1e-9, rmax=50e-9):
#default: mininum displacement is 0.1 nm
#default: maximum displacement is 50 nm
R = computeTimeStepPrep(gap, rmin, rmax)
NP = R.size
# find the current position in R array
found = False
if distance > R[NP-1]:
Rinit = gap
Rend = R[NP-1]
found = True
elif distance < R[0]:
Rinit = R[0]
Rend = 0.01e-9
found = True
else:
left = 0
right = NP-1
while found == False:
mid = int((left+right)*0.5)
if distance > R[mid]:
left = mid
if distance <= R[mid]:
right = mid
if right-left == 1:
Rinit = R[right]
Rend = R[left]
found = True
deltaR = fabs(Rinit-Rend)
return deltaR
########################################################################
def computeTimeStep(dr, vel, acc, rmin=0.1e-9, rmax=50e-9):
timeStep = 0
if fabs(acc) > 1e-10:
#if vel<0:
# dr = -dr
discr = vel*vel + 2*acc*dr
if discr > 0:
discr = sqrt(discr)
dt1 = (-vel + discr) / acc
dt2 = (-vel - discr) / acc
if dt1>0 and dt2>0:
timeStep = min(dt1, dt2)
elif dt1 > 0:
timeStep = dt1
elif dt2 > 0:
timeStep = dt2
else:
timeStep = -1
else:
timeStep = fabs(2*vel/acc)
else:
timeStep = -1
return timeStep
########################################################################
|
# coding=utf-8
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for identity_optimizer.py."""
from absl.testing import parameterized
from optimizers_builtin import identity_optimizer
from test_data import requests_bodies
class IdentityOptimizerTest(parameterized.TestCase):
def test_process_does_not_transform_data(self):
original_data = requests_bodies.VALID_SINGLE_PRODUCT
optimizer = identity_optimizer.IdentityOptimizer()
optimized_data, _ = optimizer.process(original_data)
self.assertEqual(original_data, optimized_data)
|
"""
Modul rekap analisis frekuensi. Untuk manual lihat modul terpisah.
"""
from hidrokit.contrib.taruma import hk172, hk124, hk126, hk127
freq_normal = hk172.freq_normal
freq_lognormal = hk124.freq_lognormal
freq_logpearson3 = hk126.freq_logpearson3
freq_gumbel = hk127.freq_gumbel
normal = hk172
lognormal = hk124
logpearson3 = hk126
gumbel = hk127
|
import json
import sys
import face_recognition
import numpy as np
class KnownFaces:
def __init__(self):
self.known_faces_names = []
self.known_faces_encodings = np.array([])
self.known_faces_encodings.resize((0, 128))
def learn_face(self, name, encoding):
self.known_faces_names.append(name)
self.known_faces_encodings = np.vstack((self.known_faces_encodings, encoding))
def match(self, face_encoding):
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(self.known_faces_encodings, face_encoding)
name = 'Unknown'
# If a match was found in known_face_encodings, just use the first one.
if True in matches:
first_match_index = matches.index(True)
name = self.known_faces_names[first_match_index]
return name
def build_known_faces_dict():
known_faces = KnownFaces()
base_path = 'data/office/faces'
faces_map = {
'Pam': base_path + '/Pam.png',
'Dwight': base_path + '/Dwight.png',
'Jim': base_path + '/Jim.png',
'Michael': base_path + '/Michael.png',
'Roy': base_path + '/Roy.png',
'Ryan': base_path + '/Ryan.png',
'Oscar': base_path + '/Oscar.png',
'Angela': base_path + '/Angela.png',
'Phyllis': base_path + '/Phyllis.png',
'Kevin': base_path + '/Kevin.png',
'Jane': base_path + '/Jane.png',
'Creed': base_path + '/Creed.png',
'Stanley': base_path + '/Stanley.png',
'Kelly': base_path + '/Kelly.png',
'Meredith': base_path + '/Meredith.png',
'Toby': base_path + '/Toby.png',
'Darryl': base_path + '/Darryl.png',
'Bob Vance': base_path + '/Bob Vance.png',
'Katy': base_path + '/Katy.png',
}
for name, file_name in faces_map.items():
image = face_recognition.load_image_file(file_name)
encoding = face_recognition.face_encodings(image)[0]
known_faces.learn_face(name, encoding)
return known_faces
def main():
known_faces = build_known_faces_dict()
file_name = sys.argv[1]
with open(file_name) as f:
content = f.readlines()
for line in content:
[frame_number, face_locations, face_encodings] = json.loads(line)
frame_faces = []
for face_location, face_encoding in zip(face_locations, face_encodings):
face_name = known_faces.match(face_encoding)
frame_faces.append(face_name)
print(json.dumps([frame_number, frame_faces]))
main()
|
from http import HTTPStatus
from django.test import TestCase
from django.test import Client
from users.models import User
class TestUsers(TestCase):
def setUp(self) -> None:
user = User.objects.create(username='testuser')
user.set_password('12345')
user.save()
self.client = Client()
def test_signup(self):
"""
Right signup would redirect to home.
Returns:
"""
response = self.client.post('/signup', {'username': 'testuser1', 'password': '12345', 'confirm_password': '12345'})
self.assertEqual(response.status_code, HTTPStatus.OK)
def test_signin(self):
"""
Right password would redirect to home.
Returns:
"""
response = self.client.post('/signin', {'username': 'testuser1', 'password': '12345'})
self.assertEqual(response.status_code, HTTPStatus.OK)
def test_signin_wrong_password(self):
"""
Wrongcd password would render the same page again with 200 status code.
Returns:
"""
response = self.client.post('/signin', {'username': 'testuser', 'password': '1234'})
self.assertEqual(response.status_code, HTTPStatus.OK)
def test_signin_wrong_body(self):
"""
Signin api expects two keys in the body.
email and password. On getting wrong body.
it would render the same page again with 200 status code.
Returns:
"""
response = self.client.post('/signin', {'email': 'testuser', 'password': '12345'})
self.assertEqual(response.status_code, HTTPStatus.OK)
|
import maestro
devices = maestro.getConnectedDevices()
if len(devices) == 0:
print("No Maestro devices connected")
exit()
for device in devices:
print(f"name: {device.getName()} #channels: {device.getNumChannels()}")
for c in range(device.getNumChannels()):
channel=device.getChannelSettings(c)
print(f"#{c} mode: {channel.mode} min: {channel.minimum} max: {channel.maximum} speed: {channel.speed} acceleration: {channel.acceleration}")
# select first Maestro device
device=devices[0]
# change servo 0 position
device.setTarget(0, 5000)
|
# -*- coding: utf-8 -*-
# This script describes how private sector savings interact affect the fiscal
# multiplier based on a geometric series interpretation. It is described in the
# accompanying iPython Notebook and at
#
# http://misunderheard.org/monetary_economics/2016/11/20/government_money_and_saving/
#
import matplotlib.pyplot as plt
import numpy as np
G = 100 # government spending
theta = 0.2 # tax rate
alpha = 0.9 # propensity to consume
n_rounds = 30 # number of rounds we'll consider
# create an array of numbers from 0-30, one for each spending round
r = np.arange(0,n_rounds)
# solve equation 1 for each individual round
y_n = G*(alpha*(1-theta))**r
# solve equation 2 for each individual round
sum_y = G*(1-(alpha*(1-theta))**(r+1))/(1-alpha*(1-theta))
# plot
plt.bar(r,sum_y, color='r',label='cumulative income')
plt.bar(r,y_n, label='spending round income')
plt.grid()
plt.legend(loc='center right')
plt.xlabel('Spending round, n')
plt.ylabel('Income')
plt.tight_layout()
plt.show()
# calculate the final income
Y = G/(1-alpha*(1-theta))
print(Y)
# calculate the fiscal multiplier
print(1/(1-alpha*(1-theta)))
# calculate the total tax revenue
T = theta * Y
print(T)
# calculate the government budget position
print(T - G)
# calculate the total, accumulated savings
S = (1-alpha) * (1-theta) * Y
print(S)
|
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import seaborn as sns
import numpy as np
import tensorflow as tf
# To install the DBN package: pip install git+git://github.com/albertbup/deep-belief-network.git
# Further information: https://github.com/albertbup/deep-belief-network
from dbn import UnsupervisedDBN
from sklearn.manifold import TSNE
from sklearn.utils import shuffle
# Set random seed for reproducibility
np.random.seed(1000)
nb_samples = 400
if __name__ == '__main__':
# Load the dataset
(X_train, Y_train), (_, _) = \
tf.keras.datasets.mnist.load_data()
X_train, Y_train = shuffle(X_train, Y_train,
random_state=1000)
width = X_train.shape[1]
height = X_train.shape[2]
X = X_train[0:nb_samples].reshape(
(nb_samples, width * height)).\
astype(np.float32) / 255.0
Y = Y_train[0:nb_samples]
# Train the unsupervised DBN
unsupervised_dbn = UnsupervisedDBN(
hidden_layers_structure=[512, 256, 64],
learning_rate_rbm=0.05,
n_epochs_rbm=100,
batch_size=64,
activation_function='sigmoid')
X_dbn = unsupervised_dbn.fit_transform(X)
# Perform t-SNE
tsne = TSNE(n_components=2,
perplexity=10,
random_state=1000)
X_tsne = tsne.fit_transform(X_dbn)
# Show the result
sns.set()
fig, ax = plt.subplots(figsize=(18, 12))
colors = [cm.tab10(i) for i in Y]
for i in range(nb_samples):
ax.scatter(X_tsne[:, 0], X_tsne[:, 1], c=colors, marker='o', s=50)
ax.annotate('%d' % Y[i], xy=(X_tsne[i, 0] + 1, X_tsne[i, 1] + 1), fontsize=15)
ax.set_xlabel(r'$x_0$', fontsize=20)
ax.set_ylabel(r'$x_1$', fontsize=20)
ax.grid(True)
plt.show()
|
from click.testing import CliRunner
from evalai.add_token import set_token
from evalai.utils.config import LEN_OF_TOKEN
from evalai.utils.common import generate_random_string
class TestSetToken:
def test_set_token(self):
expected = "Success: Authentication token is successfully set."
runner = CliRunner()
result = runner.invoke(set_token, [generate_random_string(LEN_OF_TOKEN)])
response = result.output.rstrip()
assert response == expected
def test_set_token_when_auth_token_is_invalid(self):
expected = "Error: Invalid Length. Enter a valid token of length: {}".format(LEN_OF_TOKEN)
runner = CliRunner()
result = runner.invoke(set_token, [generate_random_string(10)])
response = result.output.rstrip()
assert response == expected
|
from user import Users
from credential import Credentials
def create_user(uname,password):
'''
Function to create a new user
'''
new_user = Users(uname,password)
return new_user
def save_users(user):
'''
Fuunction to save users
'''
user.save_user()
def generate_password():
'''
Function that generates a password
'''
gen_pass = Credentials.generate_password()
return gen_pass
def create_credential(uname, account, account_username, account_password):
'''
Function to create new credential
'''
new_credential = Credentials(uname, account, account_username, account_password)
return new_credential
def save_credentials(credential):
'''
Function to save credentials
'''
credential.save_credential()
def del_credential(Credential):
'''
Function to delete credential
'''
credential.delete_credential()
def display_credential():
'''
returns all the saved credentials
'''
return Credentials.display_credential()
def main():
print("Hello Welcome to your password locker.Please input your name...")
user_name = input()
print (f"Hello {user_name}. What would you like to do?")
print('\n')
while True:
print("Use these short codes : ca -create an account, cc - create credentials, lg - log in to your account, dc - display credentials, ex - exit")
short_code = input().lower()
if short_code == "ca":
print("New Account")
print("-"*100)
print("User name ....")
uname = input()
print("password ....")
password = input()
save_users(create_user(uname, password))
print('\n')
print(f"New account: username is {uname} and the password is {password}")
print('\n')
elif short_code == 'cc':
print("Create new account credentials")
print("-"*100)
print("User name ....")
uname = input()
print("Account name ....")
account = input()
print("Account username .....")
account_username = input()
while True:
print(' ')
print("-"*60)
print('Please use the short codes to choose an option to set a password: ep-enter a password gp-generate a password ex-exit')
psw_choice =input("Enter an option....")
print("-"*60)
if psw_choice == 'ep':
print(" ")
password =input("Enter your password.....")
break
elif psw_choice == 'gp':
password = generate_password()
break
elif psw_choice == 'ex':
break
else:
print("please try again")
save_credentials(create_credential(uname, account, account_username, password)) #create and save new credentials
print('\n')
print(f"New credentials for {uname} ,*{account}* account and the username for the account is *{account_username}* password **{password}**")
print('\n')
elif short_code == 'lg':
print("-" *40)
print('')
print(f"Fill out your details to login to your account")
print("User name ....")
uname = input()
print("password ...")
password = input()
for user in Users.user_list:
if user == uname:
print("You are already registered")
else:
print("You are already logged in to the account")
break
print('\n')
elif short_code == 'dc':
print('')
if display_credential():
print("Here is all your credentials list ;") .lower().strip()
print(' ')
for credential in display_credential():
print(f"Account name: {credential.account} , Account username: {credential.account_username} ,Password: {credential.password}")
print(' ')
else:
print(' ')
print("There are no credentials saved")
print(' ')
elif short_code == 'ex':
print("Thank you for passing by see you again.......")
break
else:
print("Please use the short codes provided")
if __name__ == '__main__':
main()
|
_base_ = [
'../_base_/models/vit-base-p16.py',
'../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py',
'../_base_/default_runtime.py'
]
# specific to vit pretrain
paramwise_cfg = dict(custom_keys={
'.cls_token': dict(decay_mult=0.0),
'.pos_embed': dict(decay_mult=0.0)
})
pretrained = 'https://download.openmmlab.com/mmclassification/v0/vit/pretrain/vit-base-p16_3rdparty_pt-64xb64_in1k-224_20210928-02284250.pth' # noqa
model = dict(
head=dict(
loss=dict(type='CrossEntropyLoss', loss_weight=1.0, _delete_=True), ),
backbone=dict(
img_size=224,
init_cfg=dict(
type='Pretrained',
checkpoint=pretrained,
_delete_=True,
prefix='backbone')))
img_norm_cfg = dict(
mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='RandomResizedCrop', size=224, backend='pillow'),
dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='ToTensor', keys=['gt_label']),
dict(type='ToHalf', keys=['img']),
dict(type='Collect', keys=['img', 'gt_label'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', size=(224, -1), backend='pillow'),
dict(type='CenterCrop', crop_size=224),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='ToHalf', keys=['img']),
dict(type='Collect', keys=['img'])
]
# change batch size
data = dict(
samples_per_gpu=17,
workers_per_gpu=16,
drop_last=True,
train=dict(pipeline=train_pipeline),
train_dataloader=dict(mode='async'),
val=dict(pipeline=test_pipeline, ),
val_dataloader=dict(samples_per_gpu=4, workers_per_gpu=1),
test=dict(pipeline=test_pipeline),
test_dataloader=dict(samples_per_gpu=4, workers_per_gpu=1))
# remove clip-norm
optimizer_config = dict()
# optimizer
optimizer = dict(
type='SGD',
lr=0.08,
weight_decay=1e-5,
momentum=0.9,
paramwise_cfg=paramwise_cfg,
)
# learning policy
lr_config = dict(
policy='CosineAnnealing',
min_lr=0,
warmup='linear',
warmup_iters=800,
warmup_ratio=0.02,
)
# ipu cfg
# model partition config
ipu_model_cfg = dict(
train_split_edges=[
dict(layer_to_call='backbone.patch_embed', ipu_id=0),
dict(layer_to_call='backbone.layers.3', ipu_id=1),
dict(layer_to_call='backbone.layers.6', ipu_id=2),
dict(layer_to_call='backbone.layers.9', ipu_id=3)
],
train_ckpt_nodes=['backbone.layers.{}'.format(i) for i in range(12)])
# device config
options_cfg = dict(
randomSeed=42,
partialsType='half',
train_cfg=dict(
executionStrategy='SameAsIpu',
Training=dict(gradientAccumulation=32),
availableMemoryProportion=[0.3, 0.3, 0.3, 0.3],
),
eval_cfg=dict(deviceIterations=1, ),
)
# add model partition config and device config to runner
runner = dict(
type='IterBasedRunner',
ipu_model_cfg=ipu_model_cfg,
options_cfg=options_cfg,
max_iters=5000)
checkpoint_config = dict(interval=1000)
fp16 = dict(loss_scale=256.0, velocity_accum_type='half', accum_type='half')
|
# Copyright (C) 2018 Hatching B.V.
# This file is licensed under the MIT License, see also LICENSE.
import requests
from arbiter.backends import AnalysisBackend
class Modified(AnalysisBackend):
def configure(self, config):
url = config['url']
if not url.endswith('/'):
url += '/'
self.cuckoo_url = url
self.options = config.get("options")
def submit_artifact(self, av_id, artifact, previous_task=None):
body = {}
if self.options:
body["options"] = self.options
body["custom"] = artifact.url
files = {"file": (artifact.name, artifact.fetch())}
req = requests.post(self.cuckoo_url + "v1/tasks/create/file",
headers={"X-Arbiter": self.name},
data=body, files=files)
req.raise_for_status()
resp = req.json()
if "task_ids" not in resp:
raise ValueError(resp)
return {"task_ids": resp["task_ids"]}
def health_check(self):
req = requests.get(self.cuckoo_url + "v1/cuckoo/status")
req.raise_for_status()
data = req.json()
report = {
"machinestotal": data["machines"]["total"],
"machinesused": data["machines"]["total"] - data["machines"]["available"],
}
return report
|
"""Pack the modules contained in the tests/config directory."""
|
import numpy as np
import cv2
import pickle
# --------------------------------------------------------------------------------
# old /Users/alexander/Code/OpenCV/env/lib/python3.8/site-packages/cv2/cv2.so
# /Users/alexander/Code/OpenCV/env/lib/python3.8/site-packages/cv2/cv2.cpython-38-darwin.so
#
# 1) use pip uninstall opencv-python
# 2) use pip install opencv-contrib-python
#
# pip freeze > t.txt
# cat t.txt (numpy==1.19.2, opencv-contrib-python==4.4.0.44, Pillow==8.0.1)
#
# cp /Users/alexander/Code/OpenCV/env/lib/python3.8/site-packages/cv2/data /Users/alexander/Code/OpenCV/4. Face_Recognition_and_Identification/src/cascades/
# --------------------------------------------------------------------------------
def rescale_frame(frame, percent=75):
width = int(frame.shape[1] * percent / 100)
height = int(frame.shape[0] * percent / 100)
dim = (width, height)
return cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)
face_cascade = cv2.CascadeClassifier('cascades/data/haarcascade_frontalface_alt2.xml')
eye_cascade = cv2.CascadeClassifier('cascades/data/haarcascade_eye.xml')
smile_cascade = cv2.CascadeClassifier('cascades/data/haarcascade_smile.xml')
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read("recognizers/face-trainner.yml")
labels = {"person_name": 1}
with open("pickles/face-labels.pickle", 'rb') as f:
og_labels = pickle.load(f)
labels = {v: k for k, v in og_labels.items()}
# print(labels)
cap = cv2.VideoCapture(0)
while True:
# Capture frame-by-frame
ret, frame = cap.read()
frame = cv2.flip(frame, 1) # reverse (relevant for macOS)
frame = rescale_frame(frame, percent=75) # window size
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.5, minNeighbors=5)
for (x, y, w, h) in faces:
# print(x,y,w,h)
roi_gray = gray[y:y + h, x:x + w] # (ycord_start, ycord_end)
roi_color = frame[y:y + h, x:x + w]
# recognize? deep learned model predict keras tensorflow pytorch scikit learn
id_, conf = recognizer.predict(roi_gray)
if (conf >= 4) and (conf <= 85):
# print(5: #id_)
# print(labels[id_])
font = cv2.FONT_HERSHEY_SIMPLEX
name = labels[id_]
color = (255, 255, 255)
stroke = 2
cv2.putText(frame, name, (x, y), font, 1, color, stroke, cv2.LINE_AA)
# img_item = "7.png"
# cv2.imwrite(img_item, roi_color)
color = (255, 0, 0) # BGR 0-255
stroke = 2
end_cord_x = x + w
end_cord_y = y + h
cv2.rectangle(frame, (x, y), (end_cord_x, end_cord_y), color, stroke)
# eyes = eye_cascade.detectMultiScale(roi_gray)
# for (ex, ey, ew, eh) in eyes:
# cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2)
# subitems = smile_cascade.detectMultiScale(roi_gray)
# for (ex, ey, ew, eh) in subitems:
# cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2)
# Display the resulting frame
# frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('frame', frame)
if cv2.waitKey(20) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
|
from scheme_runner import SchemeTestCase, Query
cases = [
SchemeTestCase([Query(code=['(define a 1)'], expected={}), Query(code=['(define b 2)'], expected={}), Query(code=['(list a b)'], expected={'out': ['(1 2)\n']}), Query(code=["(list 'a 'b)"], expected={'out': ['(a b)\n']}), Query(code=["(list 'a b)"], expected={'out': ['(a 2)\n']}), Query(code=["(car '(a b c))"], expected={'out': ['a\n']}), Query(code=["(cdr '(a b c))"], expected={'out': ['(b c)\n']}), Query(code=['(define (memq item x)', '(cond ((null? x) false)', '((eqv? item (car x)) x)', '(else (memq item (cdr x)))))'], expected={}), Query(code=["(memq 'apple '(pear banana prune))"], expected={'out': ['#f\n']}), Query(code=["(memq 'apple '(x (apple sauce) y apple pear))"], expected={'out': ['(apple pear)\n']}), Query(code=['(define (equal? x y)', '(cond ((pair? x) (and (pair? y)', '(equal? (car x) (car y))', '(equal? (cdr x) (cdr y))))', '((null? x) (null? y))', '(else (eqv? x y))))'], expected={}), Query(code=["(equal? '(1 2 (three)) '(1 2 (three)))"], expected={'out': ['#t\n']}), Query(code=["(equal? '(1 2 (three)) '(1 2 three))"], expected={'out': ['#f\n']}), Query(code=["(equal? '(1 2 three) '(1 2 (three)))"], expected={'out': ['#f\n']})])
]
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from .datasets import * # noqa
from . import datasets
from .macadam_limits import is_within_macadam_limits
from .mesh import is_within_mesh_volume
from .pointer_gamut import is_within_pointer_gamut
from .spectrum import (generate_pulse_waves, XYZ_outer_surface,
is_within_visible_spectrum)
from .rgb import (RGB_colourspace_limits, RGB_colourspace_volume_MonteCarlo,
RGB_colourspace_volume_coverage_MonteCarlo,
RGB_colourspace_pointer_gamut_coverage_MonteCarlo,
RGB_colourspace_visible_spectrum_coverage_MonteCarlo)
__all__ = []
__all__ += datasets.__all__
__all__ += ['is_within_macadam_limits']
__all__ += ['is_within_mesh_volume']
__all__ += ['is_within_pointer_gamut']
__all__ += [
'generate_pulse_waves', 'XYZ_outer_surface', 'is_within_visible_spectrum'
]
__all__ += [
'RGB_colourspace_limits', 'RGB_colourspace_volume_MonteCarlo',
'RGB_colourspace_volume_coverage_MonteCarlo',
'RGB_colourspace_pointer_gamut_coverage_MonteCarlo',
'RGB_colourspace_visible_spectrum_coverage_MonteCarlo'
]
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='setuptools-wotmod',
version='0.2',
packages=find_packages(),
description='setuptools integration for creating World of Tanks mods',
long_description=open('README.md').read(),
author='jhakonen',
url='https://github.com/jhakonen/setuptools-wotmod/',
license='MIT License',
setup_requires=['pytest-runner'],
tests_require=[
'mock',
'nose',
'pytest<5',
],
entry_points={
"distutils.commands": [
"bdist_wotmod = setuptools_wotmod.bdist_wotmod:bdist_wotmod",
],
},
)
|
#! /usr/bin/env python
import numpy as np
import tf
import rospy
from nav_msgs.msg import Odometry
from std_msgs.msg import Float32
def callback(msg):
# Reference point
xref = 3
yref = 3
tref = np.arctan2(yref,xref)
# Current pose of robot
xcur = msg.pose.pose.position.x
ycur = msg.pose.pose.position.y
(r,p,tcur) = tf.transformations.euler_from_quaternion([msg.pose.pose.orientation.x,msg.pose.pose.orientation.y,msg.pose.pose.orientation.z,msg.pose.pose.orientation.w])
# Find error between ref point and current point
delx = xref - xcur
dely = yref - ycur
delt = tref - tcur
#control
k1 = 4
k2 = 0.1
wc = k1*np.arctan(np.tan(delt))
vc = k2*(np.sqrt((delx**2) + (dely**2)))*np.sign(np.cos(delt))
rospy.loginfo("Vc = " + str(wc) + ", Wc = "+ str(wc))
# Forward kinematic
L = 0.44
r = 0.07
wl = (2*vc - L*wc)/(2*r)
wr = (2*vc + L*wc)/(2*r)
# publish to command robot
left_pub = rospy.Publisher("/", Float32, queue_size = 10)
right_pub = rospy.Publisher("/", Float32, queue_size = 10)
left.data = wl
right.data = wr
left_pub.publish(left)
right_pub.publish(right)
def main():
rospy.init_node("Controller_node")
left = Float32()
right = Float32()
rospy.Subscriber("/Odom_encd",Odometry,callback)
rospy.spin()
if __name__ == '__main__':
main()
|
########################################################################################
## BetterX Data Loader
## Loads files from AWS S3 and processes them
## Jan 14th 2016
## elias@betterX.org
########################################################################################
import boto3, re, os, MySQLdb, sys
## AWS S3
## Libraries from https://github.com/boto/boto3
## Setup Files in ~/.aws
## CONFIG
S3_BUCKET = ''
S3_BUCKET_ARCHIVE = ''
DOWNLOAD_PATH = ''
UNZIP_PATH = ''
FILE_ERR_PATH = ''
DECRYPT_ALGO = ''
DECRYPT_KEY = ''
DECRYPT_INIT_VECTOR = ''
DB_HOST = ''
DB_USER = ''
DB_PASSWORD = ''
DB_NAME = ''
IMPORTER = 'betterX-db-importer.py'
def checkMySQLConnectionCursor(conn, cursor):
if (conn.open == 0):
conn.reconnect(attempts=100, delay=10)
cursor = conn.cursor()
return conn, cursor
def getMySQLConnectionCursor(conn, cursor, host, user, password, name):
if (conn == None):
conn = MySQLdb.connect(host, user, password, name, charset='utf8')
cursor = conn.cursor()
if (conn.open == 0):
conn.reconnect(attempts=100, delay=10)
cursor = conn.cursor()
return conn, cursor
def parseFilename(filename):
pattern = re.compile("^(.*)_([0-9]{8}).zip.enc$")
result = pattern.match(filename)
return [result.group(1), result.group(2)]
def deleteFilename(filename):
os.remove(filename)
def dbinsert(tblName,fields,fieldTypes,cursor,values,conn):
sql_command = "insert into " + tblName + " (" + fields + ") values (" + fieldTypes + ")"
cursor.execute(sql_command, values)
conn.commit()
## MAIN
s3 = boto3.resource('s3')
bucket = s3.Bucket(S3_BUCKET)
exists = True
try:
s3.meta.client.head_bucket(Bucket=S3_BUCKET)
except botocore.exceptions.ClientError as e:
# If a client error is thrown, then check that it was a 404 error.
# If it was a 404 error, then the bucket does not exist.
error_code = int(e.response['Error']['Code'])
if error_code == 404:
exists = False
try:
conn = None
cursor = None
conn, cursor = getMySQLConnectionCursor(conn, cursor, DB_HOST, DB_USER, DB_PASSWORD, DB_NAME)
except Exception as e:
print str(e)
sys.exit()
zip_file_counter = 0
for keyN in bucket.objects.all():
zip_file_counter = zip_file_counter + 1
inner_zip_file_counter = 0
for key in bucket.objects.all():
inner_zip_file_counter = inner_zip_file_counter + 1
print (str(key.key) + " " + str(key.last_modified))
s3.Object(S3_BUCKET, str(key.key)).download_file(DOWNLOAD_PATH +str(key.key))
file_parse_results = parseFilename(str(key.key))
tis_userid = file_parse_results[0]
tis_date = file_parse_results[1]
original_filename = str(key.key)
exported_filename = str(key.key).replace('.enc', '')
os.system('openssl enc -d ' + DECRYPT_ALGO + ' -in ' + DOWNLOAD_PATH + original_filename + ' -out ' + DOWNLOAD_PATH + exported_filename + ' -K ' + DECRYPT_KEY + ' -iv ' + DECRYPT_INIT_VECTOR) # decrypt
os.system('unzip -qq ' + DOWNLOAD_PATH + exported_filename + ' -d ' + UNZIP_PATH) # unzip
inner_file_counter = 0
for dir_entry in os.listdir(UNZIP_PATH): # for each file in temp directory
dir_entry_path = os.path.join(UNZIP_PATH, dir_entry)
if os.path.isfile(dir_entry_path):
inner_file_counter = inner_file_counter + 1
filename = dir_entry_path
print " " + filename
cmd = 'python ' + IMPORTER + ' -m ' + DB_HOST + ' -u ' + DB_USER + ' -p ' + "'" + DB_PASSWORD + "'" + ' -d ' + DB_NAME + ' -f ' + filename + ' -i ' + tis_userid + ' -a ' + tis_date
result = os.system(cmd)
tis_status = 'OK'
if (result != 0):
tis_status = result
conn, cursor = checkMySQLConnectionCursor(conn, cursor)
dbinsert('file_log', 'file_zip, file_name, file_uid, file_time, status, zip_file, zip_file_no, zip_file_total', '%s,%s,%s,%s,%s,%s,%s,%s', cursor, [original_filename,filename,tis_userid,tis_date,tis_status,str(inner_file_counter), str(inner_zip_file_counter), str(zip_file_counter)], conn)
if (tis_status == 'OK'):
deleteFilename(filename)
else:
os.system('cp ' + filename + ' ' + FILE_ERR_PATH)
deleteFilename(filename)
deleteFilename(DOWNLOAD_PATH + original_filename)
deleteFilename(DOWNLOAD_PATH + exported_filename)
s3.Object(S3_BUCKET_ARCHIVE,str(key.key)).copy_from(CopySource=S3_BUCKET+'/'+str(key.key))
s3.Object(S3_BUCKET,str(key.key)).delete()
cursor.close()
conn.close()
|
#!/usr/bin/env python3
import subprocess
import sys
from compiler.provides import ProvidesFile
from compiler.requires import require_directory
from find_built_subvol import find_built_subvol
from fs_image.fs_utils import Path
from tests.temp_subvolumes import TempSubvolumes
from ..common import image_source_item
from ..install_file import InstallFileItem
from .common import BaseItemTestCase, DUMMY_LAYER_OPTS, render_subvol
def _install_file_item(**kwargs):
# The dummy object works here because `subvolumes_dir` of `None` runs
# `artifacts_dir` internally, while our "prod" path uses the
# already-computed value.
return image_source_item(
InstallFileItem, exit_stack=None, layer_opts=DUMMY_LAYER_OPTS,
)(**kwargs)
class InstallFileItemTestCase(BaseItemTestCase):
def test_install_file(self):
exe_item = _install_file_item(
from_target='t', source={'source': 'a/b/c'}, dest='d/c',
is_executable_=True,
)
self.assertEqual(0o555, exe_item.mode)
self.assertEqual(b'a/b/c', exe_item.source)
self._check_item(
exe_item,
{ProvidesFile(path='d/c')},
{require_directory('d')},
)
# Checks `image.source(path=...)`, as well as "is_executable_=False"
data_item = _install_file_item(
from_target='t',
source={'source': 'a', 'path': '/b/q'},
dest='d',
is_executable_=False,
)
self.assertEqual(0o444, data_item.mode)
self.assertEqual(b'a/b/q', data_item.source)
self.assertEqual(b'a/b/q', data_item.source)
self._check_item(
data_item,
{ProvidesFile(path='d')},
{require_directory('/')},
)
# NB: We don't need to get coverage for this check on ALL the items
# because the presence of the ProvidesDoNotAccess items it the real
# safeguard -- e.g. that's what prevents TarballItem from writing
# to /meta/ or other protected paths.
with self.assertRaisesRegex(AssertionError, 'cannot start with meta/'):
_install_file_item(
from_target='t', source={'source': 'a/b/c'}, dest='/meta/foo',
is_executable_=False,
)
def test_install_file_from_layer(self):
layer = find_built_subvol(
Path(__file__).dirname() / 'test-with-one-local-rpm'
)
path_in_layer = b'usr/share/rpm_test/cheese2.txt'
item = _install_file_item(
from_target='t',
source={'layer': layer, 'path': '/' + path_in_layer.decode()},
dest='cheese2',
is_executable_=False,
)
self.assertEqual(0o444, item.mode)
self.assertEqual(Path(layer.path(path_in_layer)), item.source)
self.assertEqual(layer.path(path_in_layer), item.source)
self._check_item(
item,
{ProvidesFile(path='cheese2')},
{require_directory('/')},
)
def test_install_file_command(self):
with TempSubvolumes(sys.argv[0]) as temp_subvolumes:
subvol = temp_subvolumes.create('tar-sv')
subvol.run_as_root(['mkdir', subvol.path('d')])
_install_file_item(
from_target='t', source={'source': '/dev/null'}, dest='/d/null',
is_executable_=False,
).build(subvol, DUMMY_LAYER_OPTS)
self.assertEqual(
['(Dir)', {'d': ['(Dir)', {'null': ['(File m444)']}]}],
render_subvol(subvol),
)
# Fail to write to a nonexistent dir
with self.assertRaises(subprocess.CalledProcessError):
_install_file_item(
from_target='t', source={'source': '/dev/null'},
dest='/no_dir/null', is_executable_=False,
).build(subvol, DUMMY_LAYER_OPTS)
# Running a second copy to the same destination. This just
# overwrites the previous file, because we have a build-time
# check for this, and a run-time check would add overhead.
_install_file_item(
from_target='t', source={'source': '/dev/null'}, dest='/d/null',
# A non-default mode & owner shows that the file was
# overwritten, and also exercises HasStatOptions.
mode='u+rw', user_group='12:34', is_executable_=False,
).build(subvol, DUMMY_LAYER_OPTS)
self.assertEqual(
['(Dir)', {'d': ['(Dir)', {'null': ['(File m600 o12:34)']}]}],
render_subvol(subvol),
)
|
from importers import CSVImportCommand
import requests
import click
class AddressImportCommand(CSVImportCommand):
def process_row(self, row):
address = {
"uprn": row[0],
"address_line_1": row[2],
"address_line_2": row[3],
"address_line_3": row[4],
"city": row[5],
"county": row[6],
"postcode": row[7],
"country_code": row[9],
"point": {
"type": "Point",
"coordinates": [float(row[10]), float(row[9])]
},
"srid": 4326
}
headers = {'Authorization': 'Token {0}'.format(self.token)}
response = requests.post(
self.api_url,
json=address,
headers=headers)
if response.status_code == 201:
print('{0} imported correctly'.format(row[0]))
else:
print(
'ERROR: could not import {0} because of {1}'.format(
row[0], response.text))
@click.command()
@click.argument('filenames', nargs=-1, type=click.Path())
@click.option(
'--apiurl',
default='http://localhost:8000/api/addresses/', help='API url')
@click.option('--apitoken', help='API authentication token')
def import_addresses(filenames, apiurl, apitoken):
command = AddressImportCommand(filenames, apiurl, apitoken)
command.run()
if __name__ == '__main__':
import_addresses()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: Alex
# @Date: 2015-11-16 19:22:12
# @Last Modified by: Alex
# @Last Modified time: 2015-12-28 19:32:16
from django.contrib import admin
from .models import CustomerModel
# Register your models here.
admin.site.register(CustomerModel)
|
from typing import List, Optional
from starlette.responses import RedirectResponse
from ..utils import JWTBaseModel, domain_to_storename, get_unique_id
from .tokens import OAuthJWT
def oauth_init_url(
domain: str,
requested_scopes: List[str],
callback_domain: str,
callback_path: str,
is_login: bool,
jwt_key: str,
api_key: str,
) -> str:
"""
Create the URL and the parameters needed to start the oauth process to install an app or to log
a user in.
Parameters
----------
domain: Domain of the shopify store in the format "{storesubdomain}.myshopify.com"
requested_scopes: List of scopes accessible to the app once installed.
See https://shopify.dev/docs/admin-api/access-scopes
callback_domain: Public domain Shopify will connect to during the oauth process
is_login: Specify if the oauth is to install the app or a user logging in
Returns
-------
URL with all needed parameters to trigger the oauth process
"""
scopes = ','.join(requested_scopes)
redirect_uri = f'https://{callback_domain}{callback_path}'
oauthjwt = OAuthJWT(
is_login=is_login, storename=domain_to_storename(domain), nonce=get_unique_id()
)
oauth_token = oauthjwt.encode_token(key=jwt_key)
access_mode = 'per-user' if is_login else ''
return (
f'https://{domain}/admin/oauth/authorize?client_id={api_key}&'
f'scope={scopes}&redirect_uri={redirect_uri}&state={oauth_token}&'
f'grant_options[]={access_mode}'
)
def app_redirect(
store_domain: str,
app_domain: str,
jwtoken: Optional[JWTBaseModel],
jwt_key: str,
app_handle: str,
) -> RedirectResponse:
if jwtoken is None:
return RedirectResponse(f'https://{store_domain}/admin/apps/{app_handle}')
jwtarg, signature = jwtoken.encode_hp_s(key=jwt_key)
redir = RedirectResponse(f'https://{store_domain}/admin/apps/{app_handle}?jwt={jwtarg}')
# TODO set 'expires'
redir.set_cookie(
key=jwtoken.cookie_key,
value=signature,
domain=app_domain,
httponly=True,
secure=True,
)
return redir
|
#!/usr/bin/env python
import os.path
import re
import tornado.auth
import tornado.database
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import unicodedata
import tornado.websocket
import json
import hashlib, uuid
from tornado.options import define, options
define("port", default=8080, help="run on the given port", type=int)
define("mysql_host", default="127.0.0.1:3306", help="blog database host")
define("mysql_database", default="qnadb", help="blog database name")
define("mysql_user", default="root", help="blog database user")
define("mysql_password", default="3rdr3d", help="blog database password")
db = tornado.database.Connection(
host=options.mysql_host, database=options.mysql_database,
user=options.mysql_user, password=options.mysql_password)
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/", RootHandler),
(r"/chat/(\w+)", ChatHandler)
# Add your routes here
]
settings = dict(
app_name=u"Sena Chat",
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
xsrf_cookies=True,
)
tornado.web.Application.__init__(self, handlers, debug=True,**settings)
# Have one global connection to the blog DB across all handlers
class ChatSession():
def __init__(self, username1, username2):
self.username1=username1
self.username2=username2
self.messages=[]
def add_message(self, payload):
self.messages.append(payload)
def get_messages(self):
import json
json.dumps(self.messages)
class ChatHandler(tornado.websocket.WebSocketHandler):
waiters=set()
users=dict()
cache=[]
cache_size=200
sessions=dict()
user_sessions=dict()
def allow_draft76(self):
return True
def open(self,username):
self.username=username
for con in ChatHandler.waiters:
con.write_message(json.dumps({"type":"presence","from":self.username, "status":"1","fullname":self.username}))
self.write_message(json.dumps({"type":"presence","from":con.username, "status":"1","fullname":con.username}))
ChatHandler.waiters.add(self)
ChatHandler.users[username]=self
ChatHandler.user_sessions[username]=[]
self.write_message(json.dumps({'message':'welcome','from':'admin'}))
#send presence
print "chat connection received from {0}".format(username)
def on_close(self):
print "chat connection closing"
try:
ChatHandler.waiters.remove(self)
del(ChatHandler.users[self.username])
del(ChatHandler.user_sessions[self.username])
except Exception, e:
print "error closing connection for client: {0}, error: {1}".format(self.username,e)
for con in ChatHandler.waiters:
con.write_message(json.dumps({"type":"presence","from":self.username, "status":"0","fullname":self.username}))
def error_msg(self, error_code):
if not error_code is None:
json_string=json.dumps({"type":"error","code":error_code})
print "sending error to client: {0}, error: {1}".format(self.username,json_string)
self.write_message("{0}".format(json_string))
else:
print "Eror code not found"
@classmethod
def get_user_sessions(cls, username):
if not username is None:
if username in cls.user_sessions:
sessions=cls.user_sessions[username]
return sessions
return []
@classmethod
def start_session(cls,from_user, to_user, payload):
try:
if from_user in cls.users and to_user in cls.users:
print "starting chat session for user: {0} and user {1}".format(from_user, to_user)
session_key="{0}-{1}".format(from_user.lower().strip(),to_user.lower().strip())
if session_key in cls.sessions or session_key[::-1] in cls.sessions:
print "session already exists, sending notification to user"
if not session_key in cls.sessions:
session_key=session_key[::-1]
join_msg={"type":"chatsession","sessionkey":session_key, "from":to_user,"to":from_user,'history':cls.sessions[session_key].messages}
cls.users[from_user].write_message(json.dumps(join_msg))
join_msg['from']=from_user
join_msg["to"]=to_user
cls.users[to_user].write_message(json.dumps(join_msg))
history=cls.sessions[sessionkey].messages
if not history is None and len(history)>0:
for i in history:
#cls.users[to_user].write_message(json.dumps(history[i]))
#cls.users[to_user].write_message(json.dumps(history[i]))
pass
return
session=ChatSession(from_user,to_user)
cls.sessions[session_key]=session
join_msg={"type":"chatsession","sessionkey":session_key, "from":to_user,"to":from_user,'history':cls.sessions[session_key].messages}
cls.users[from_user].write_message(json.dumps(join_msg))
join_msg['from']=from_user
join_msg['type']="chatinvite"
join_msg["to"]=to_user
cls.users[to_user].write_message(json.dumps(join_msg))
else:
json_data={"to":from_user, "from":"admin", "type":"offline", "from":to_user }
cls.users[from_user].write_message(json.dumps(json_data))
except Exception, e:
print "error while starting chat session {0}".format(e);
def on_message(self, message):
print "received message: {0} from {1}".format(message,self.username)
try:
json_data=json.loads(message)
if "type" in json_data:
message_type=json_data['type']
if message_type=='chatmessage':
from_user = json_data['from']
to_user = json_data['to']
sessionkey=json_data['sessionkey']
message=json_data["message"]
if sessionkey in ChatHandler.sessions:
ChatHandler.sessions[sessionkey].messages.append(json_data)
print "chat message received from {0} to {1}".format(from_user,to_user)
#ChatHandler.send_message_to_user(to_user, message)
try:
if to_user in ChatHandler.users :
ChatHandler.users[to_user].write_message(json.dumps({"from":from_user,"to":to_user,"sessionkey":sessionkey, "message":message, "type":"chatmessage"}))
except Exception, e:
print "error while sending message"
elif message_type == "startsession":
if "to" in json_data:
to_user = json_data['to']
print "starting chat session for user: {0} and {1}".format(self.username,json_data['to'])
ChatHandler.start_session(self.username,to_user,json_data)
elif message_type=='presence':
print "presence from {0} status {1}".format(from_user,message['status'])
ChatHandler.send_presence()
except Exception, e:
print "Error occurred during message received {0}".format(e)
self.error_msg("100")
@classmethod
def send_presence(cls, from_user, payload):
print "sending presence from user {0} to his/her friends ".format(from_user)
#get friends
friends=db.query("select u.username, c.userid from chatusercontacts c, chatuser where c.userid=u.id and c.userid=%s",from_user)
if not friends is None and len(friends)>0:
for friend in friends:
try:
con=cls.users[friend['username']]
con.write_message("{0}".format(payload))
except Exception, e:
pass
@classmethod
def send_message_to_user(cls, username, payload):
print "sending chat message to client {0}".format(username)
if not username is None and not payload is None :
try:
cls.users[username].write_message("{0}".format(json.dumps(payload)))
return True
except Exception, e:
print "error while sending mesage to user {0}, error: {1}".format(username,e)
return False
def send_message_to_users(cls, usernames_array, payload):
print "sending chat message to clients {0}".format(",".join(usernames_array))
if not usernames_array is None and not payload is None:
for con in cls.waiters:
if con.username in usernames_array:
try:
con.write_message("{0}".format(json.dumps(payload)))
except Exception, e:
pass
return True
return False
def create_chat_session(cls, username1, username2):
pass
class BaseHandler(tornado.web.RequestHandler):
@property
def db(self):
return self.application.db
class RootHandler(BaseHandler):
def get(self):
self.render("root.html")
def main():
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
|
import pyArango.collection as COL
import pyArango.validation as VAL
class VirusSequences(COL.Collection):
#_properties = {
# "keyOptions" : {
# "allowUserKeys": False,
# "type": "autoincrement",
# "increment": 1,
# "offset": 0,
# }
#}
_validation = {
'on_save': True,
'on_set': True,
'allow_foreign_fields': True
}
_fields = {
"Index": COL.Field(validators=[VAL.NotNull()]),
'Accession': COL.Field(validators=[VAL.NotNull()]),
'Sequence': COL.Field(validators=[VAL.NotNull()]),
'Version': COL.Field(),
'Sub_accession': COL.Field(),
'Protein_accession': COL.Field(),
'Release_Date': COL.Field(),
'Genus': COL.Field(),
'Family': COL.Field(),
'Length': COL.Field(),
'Nuc_Completeness': COL.Field(),
'Genotype': COL.Field(),
'Genome_Region': COL.Field(),
'Segment': COL.Field(),
'Authors': COL.Field(),
'Publications': COL.Field(),
'Geo_Location': COL.Field(),
'Host': COL.Field(),
'Authors': COL.Field(),
'Isolation_Source': COL.Field(),
'Collection_Date': COL.Field(),
'BioSample': COL.Field(),
'GenBank_Title': COL.Field(),
}
_field_types = {
'Index': 'float',
'Accession': "enumeration",
# 'Sequence': "enumeration",
'Version': "enumeration",
'Sub_accession': "enumeration",
'Protein_accession': "enumeration",
'Release_Date': "enumeration",
'Genus': "enumeration",
'Family': "enumeration",
'Length': "float",
'Nuc_Completeness': "enumeration",
'Genotype': "enumeration",
# 'Authors': COL.Field(),
# 'Publications': COL.Field(),
'Geo_Location': "enumeration",
'Host': "enumeration",
'Isolation_Source': "enumeration",
'Collection_Date': "enumeration",
'BioSample': "enumeration",
'GenBank_Title': "enumeration",
}
class Peptides(COL.Collection):
#_properties = {
# "keyOptions" : {
# "allowUserKeys": False,
# "type": "autoincrement",
# "increment": 1,
# "offset": 0,
# }
#}
_validation = {
'on_save': True,
'on_set': True,
'allow_foreign_fields': True
}
_fields = {
"Index": COL.Field(validators=[VAL.NotNull()]),
"Method": COL.Field(validators=[VAL.NotNull()]),
"Context_size": COL.Field(validators=[VAL.NotNull()]),
"Model_run": COL.Field(validators=[VAL.NotNull()]),
"Accession": COL.Field(validators=[VAL.NotNull()]),
"Sub_accession": COL.Field(validators=[VAL.NotNull()]),
#"Position": COL.Field(validators=[VAL.NotNull()]),
"Position": COL.Field(), # temporarily
"Length": COL.Field(validators=[VAL.NotNull()]),
"Sequence": COL.Field(validators=[VAL.NotNull()]),
"Score": COL.Field(validators=[VAL.NotNull()]),
"Name": COL.Field(validators=[VAL.NotNull()])
}
_field_types = {
"Index": "float",
"Method": "enumeration",
"Context_size": "enumeration",
"Model_run": "enumeration",
"Accession": "enumeration",
"Sub_accession": "enumeration",
"Position": "float",
"Length": "enumeration",
"Score": "float",
"Sequence": "enumeration",
"Name": "enumeration"
}
class Contacts(COL.Collection):
_validation = {
'on_save': True,
'on_set': True,
'allow_foreign_fields': False
}
_fields = {
"email": COL.Field(validators=[VAL.NotNull(), VAL.Email()]),
"nbDownloads": COL.Field(validators=[VAL.NotNull()], default=1)
}
__COLLECTIONS = { col.__name__: col for col in [VirusSequences, Peptides, Contacts] }
|
import numpy as np
import pandas as pd
from pandas import DataFrame
import matplotlib.pyplot as plt
import seaborn as sns
from os.path import exists
from pathlib import Path
from matplotlib.ticker import FormatStrFormatter
from matplotlib import ticker
def show_values_on_bars(axs, h_v="v", space=1):
def _show_on_single_plot(ax):
if h_v == "v":
for p in ax.patches:
_x = p.get_x() + p.get_width() / 2
_y = p.get_y() + p.get_height() + float(space)
try:
value = int(p.get_height())
except:
value = 0
ax.text(_x, _y, value, ha="center")
elif h_v == "h":
for p in ax.patches:
_x = p.get_x() + p.get_width() + float(space)
_y = p.get_y() + p.get_height()
try:
value = int(p.get_width())
except:
value = 0
ax.text(_x, _y, value, ha="left")
if isinstance(axs, np.ndarray):
for idx, ax in np.ndenumerate(axs):
_show_on_single_plot(ax)
else:
_show_on_single_plot(axs)
def load_analyze_csv(file_name):
data = pd.read_csv(file_name)
#new_data = data.iloc[:, 13:].copy()
return data
def process_data(data, reasoners, suffix):
new_columns = []
for reasoner in reasoners:
column_name = reasoner + "_" + suffix
new_column_name = reasoner + "_" + "Count"
if column_name in data.columns:
data[new_column_name] = np.where(np.isnan(data[column_name]), 0, 1)
new_columns.append(new_column_name)
data["Count"] = data[new_columns].sum(axis=1)
new_data = data[["Ontology", "Count"]].copy()
new_data = new_data.groupby(["Count"]).count().reset_index()
new_data.columns = ["Number of Reasoner", "Number of Ontology"]
return new_data
#new_data.to_csv("test.csv")
def plot_bar_chart(data, ax, task):
sns.set_color_codes("muted")
g = sns.barplot(ax = ax, data=data, x = "Number of Reasoner", y = "Number of Ontology", color="b")
g.set(ylabel=None)
g.set(xlabel=None)
g.set_title(task)
g.tick_params(left=False, bottom=False)
g.set(yticklabels=[])
if __name__ == '__main__':
input_folder="./output/"
output_folder="./output"
Path(output_folder).mkdir(parents=True, exist_ok=True)
reasoner_name = ["Factpp", "HermiT", "JFact", "Konclude", "KoncludeCLI", "Pellet", "Openllet"]
task_name = ["Consistency", "Classification", "Realization"]
#task_name = ["Consistency"]
i = 0
fig, axes = plt.subplots(1, 3, sharey=True, figsize=(11,5))
for task in task_name:
file_name = input_folder + task + "_mean.csv"
data = load_analyze_csv(file_name)
new_data = process_data(data, reasoner_name, task)
plot_bar_chart(new_data, axes[i], task)
show_values_on_bars(axes[i], space=20)
i = i + 1
axes[0].set(ylabel="Number of Ontologies")
axes[1].set(xlabel="Number of Reasoners")
#sns.despine(bottom=True, left=True)
#sns.despine()
plt.savefig(output_folder + "/count.pdf", bbox_inches='tight')
plt.savefig(output_folder + "/count.png", bbox_inches='tight')
plt.show()
|
#! /usr/bin/env python
from __future__ import print_function
import math
import sys
from collections import defaultdict
from OpenGL.GL import *
def normalize_vertex_list(verts):
""" Uniformly rescale 3D vertex data so that its axis of
greatest change is normalized to the range [0,1]
Original input is modified
"""
# Set initial min/max values to their inverse extremes so
# the loop below will change their values no matter what.
v_max = [sys.float_info.min] * 3
v_min = [sys.float_info.max] * 3
for v in verts:
for i in range(0,3):
if v[i] > v_max[i]:
v_max[i] = v[i]
if v[i] <v_min[i]:
v_min[i] = v[i]
# Generate statistical mean
mean = [a+b for a,b in zip(v_max,v_min)]
mean = [a/2.0 for a in mean]
# translate vertices
for v in verts:
for i in range(0,3):
v[i] -= mean[i]
v_max = [a+b for a,b in zip(v_max,mean)]
v_min = [a+b for a,b in zip(v_min,mean)]
global g_Translate
rad = math.fabs(max(v_max) - min(v_min))
#g_Translate[2] = -rad
return verts
def normalize_vertex_array(verts):
""" Uniformly rescale 3D vertex data so that its axis of
greatest change is normalized to the range [0,1]
Original input is unmodified
"""
new_verts = list(verts)
normalize_vertex_list(new_vert)
return new_verts
def parse_vertex_line(tokens):
""" Parse Wavefront OBJ Vertex line
Input:
tokens - Array of vertex line tokens as strings
Format1: ['v','1','2','3']
Format2: ['v','1','2','3','4','5','6']
"""
size = len(tokens)
# valid vertex strings are either v x y z or v x y z r g b (Meshlab dumps color to vertices)
# TODO: Look in to meshlab's output to find out if their color format is right.
if not (size == 7 or size == 4): return None
if not (tokens[0] in ['v','V']): return None
# x/y/z values will *always* be the first values after the line identifier
xyz = tokens[1:4]
return [float(xyz[0]),float(xyz[1]),float(xyz[2])]
def parse_uv_line(tokens):
""" Parse Wavefront OBJ UV line
Input:
tokens - Array of UV line tokens as strings
Format: ['vt','0.4','0.5']
"""
size = len(tokens)
# valid uv strings are 'vt 0.1 0.1'
if not (size == 3): return None
if not (tokens[0] in ['vt','VT']): return None
# x/y/z values will *always* be the first values after the line identifier
uv = tokens[1:3]
return [float(uv[0]),float(uv[1])]
def parse_normal_line(tokens):
""" Parse Wavefront OBJ normal line
Input:
tokens - Array of normal line tokens as strings
Format: ['vn','0.4','0.5','0.6']
"""
size = len(tokens)
# valid uv strings are 'vn 0.1 0.1 0.1'
if not (size == 4): return None
if not (tokens[0] in ['vn','VN']): return None
# x/y/z values will *always* be the first values after the line identifier
normals = tokens[1:4]
return [float(normals[0]),float(normals[1]),float(normals[2])]
def parse_face_line(tokens):
""" Parse Wavefront OBJ face line
Input:
tokens - Array of face line tokens as strings
Format1: ['f','1','2','3']
Format2: ['f','1//1','2//2','3//3']
Format3: ['f','1/1/1','2/2/2','3/3/3']
Output:
face - Single triangluar face.
Format: [corner_a,corner_b,corner_c,uv_a,uv_b,uv_c,normal_a,normal_b,normal_c]
TODO:
* Support faces like F 1/1 2/2 3/3, where I suppose we're assuming the 2nd coord is uv
"""
size = len(tokens)
# Currently this method only supports triangulated mesh data
if not (size == 4): return None
if not (tokens[0] in ['f','F']): return None
# face index values will *always* be the first values after the line identifier
fs = tokens[1:4]
f1 = fs[0].split('/')
f2 = fs[1].split('/')
f3 = fs[2].split('/')
ret = []
if len(f1) == 3:
a1 = int(f1[0]) if f1[0] != '' else None
b1 = int(f1[1]) if f1[1] != '' else None
c1 = int(f1[2]) if f1[2] != '' else None
a2 = int(f2[0]) if f2[0] != '' else None
b2 = int(f2[1]) if f2[1] != '' else None
c2 = int(f2[2]) if f2[2] != '' else None
a3 = int(f3[0]) if f3[0] != '' else None
b3 = int(f3[1]) if f3[1] != '' else None
c3 = int(f3[2]) if f3[2] != '' else None
return[a1,a2,a3,b1,b2,b3,c1,c2,c3]
if len(f1) == 1:
a1 = int(f1[0]) if f1[0] != '' else None
a2 = int(f2[0]) if f2[0] != '' else None
a3 = int(f3[0]) if f3[0] != '' else None
return [a1,a2,a3]
def load(file_name,normalize=False):
""" Load Wavefront OBJ
TODO:
* Support for usemtl and textures
* Support for 'o' groups
* Support for 'vn' normals and their respective per-face indexing
* Current version assumes UV data exists, make function
a bit more bulletproof towards unessential data.
"""
# Build line parse mappings -> {'wavefront line element':token_parser()}
obj_line_parsers = defaultdict(lambda : lambda a: None,{
'v':parse_vertex_line,
'V':parse_vertex_line,
'f':parse_face_line,
'F':parse_face_line,
'vt':parse_uv_line,
'vn':parse_normal_line,
})
obj_parse_assignment = defaultdict(lambda : lambda a: None,{
'v':lambda b:verts.append(b),
'V':lambda b:verts.append(b),
'f':lambda b:faces.append(b),
'F':lambda b:faces.append(b),
'vt':lambda b:uvs.append(b),
'vn':lambda b:norms.append(b)
})
verts = []
colors = None
faces = []
norms = []
uvs = []
with open(file_name,'r') as fr:
for line_index,line in enumerate(fr):
# tokenize each line (ie. Split lines up in to lists of elements)
# e.g. f 1//1 2//2 3//3 => [f,1//1,2//2,3//3]
tokens = line.strip().split(' ')
if tokens == None: continue
if tokens[0] == '': continue
try:
key = tokens[0]
value = obj_line_parsers[key](tokens)
obj_parse_assignment[key](value)
except Exception as err:
print("Ill formed line[%d]: %s"%(line_index,line))
print("Err: ",err)
if normalize:
verts = normalize_obj(verts)
return verts,faces,uvs,norms,colors
def cross(a,b):
return [a[1]*b[2] - a[2]*b[1],a[0]*b[2] - a[2]*b[0],a[0]*b[1] - a[1]*b[0]]
def vlen(a):
return math.sqrt(a[0]*a[0] + a[1]*a[1] + a[2]*a[2])
def vsub(a,b):
return [a[0]-b[0],a[1]-b[1],a[2]-b[2]]
def process_obj(verts,faces,uvs,normals,colors):
""" Split 6-component facial data in to individual triangles
for OpenGL
Note: Vertex indices are not used directly by OpenGL.
They are used here to create single, large, vertex array
grouped by triangle. This means each very will likely
be used several times.
"""
# Faces currently store 6 values, split them up a bit
out_verts = []
out_uvs = []
out_normals = []
for face in faces:
if len(face) == 9:
out_verts.append(verts[face[0]-1])
out_verts.append(verts[face[1]-1])
out_verts.append(verts[face[2]-1])
if len(uvs) == len(verts):
out_uvs.append(uvs[face[3]-1])
out_uvs.append(uvs[face[4]-1])
out_uvs.append(uvs[face[5]-1])
else:
out_uvs.append(0)
out_uvs.append(0)
out_uvs.append(0)
out_normals.append(normals[face[6]-1])
out_normals.append(normals[face[7]-1])
out_normals.append(normals[face[8]-1])
elif len(face) == 3:
out_verts.append(verts[face[0]-1])
out_verts.append(verts[face[1]-1])
out_verts.append(verts[face[2]-1])
edge1 = vsub(verts[face[2]-1],verts[face[0]-1])
edge2 = vsub(verts[face[1]-1],verts[face[0]-1])
normal = cross(edge1,edge2)
length = vlen(normal)
normal[0] /= length
normal[1] /= length
normal[2] /= length
out_normals.append(normal)
out_normals.append(normal)
out_normals.append(normal)
return out_verts,out_uvs,out_normals
def generate_2d_ctypes(data):
""" Covert 2D Python list of lists to 2D ctype array
Input:
data - 2D array like vertices[36][3]
Format: array[rows][cols] where rows are individual elements
and cols are components of each element.
"""
c = len(data[0])
r = len(data)
# multidimensional ctype arrays require parens
# or the array type below would become float[r*c]
# instead of float[r][c]. Alternative notation:
# array_type = GLfloat*c*r
array_type = r * (c*GLfloat)
ret = array_type()
for i in range(0,len(data)):
for j in range(0,c):
ret[i][j] = data[i][j]
return ret
def main():
print (10 * (3 * GLfloat))
print (GLfloat * 3 * 10)
# Note: Compare return value to vertex index - 1
# e.g. f 1 2 3 => [0,1,2]
# f = parse_face_line(['f','1','2','3'])
# assert(f[0] == 0 and f[1] == 1 and f[2] == 2)
# f = parse_face_line(['f','1//12','2//8','3//5'])
# assert(f[0] == 0 and f[1] == 1 and f[2] == 2)
# f = parse_face_line(['f','1/4/12','2/6/8','3/7/5'])
# assert(f[0] == 0 and f[1] == 1 and f[2] == 2)
# f = parse_face_line(['1//12','2//8','3//5'])
# assert(f==None)
# v = parse_vertex_line(['v','0.1','0.2','0.3'])
# assert(v[0] == 0.1 and v[1] == 0.2 and v[2] == 0.3)
# v = parse_vertex_line(['v','0.1','0.2','0.3','1','2','3'])
# assert(v[0] == 0.1 and v[1] == 0.2 and v[2] == 0.3)
# v = parse_vertex_line(['v','0.1','0.2'])
# assert(v == None)
# c = obj_line_parsers["#"](["asdasd"])
# #obj_parse_assignment["#"](c)
# print(c)
v,f,uv,n,c = load(".\\content\\suzanne.obj");
v,uv,n = process_obj(v,f,uv,n,c)
print(v[0])
print(uv[0])
print(n[0])
if __name__ == '__main__':
main()
|
sodas = ["Coke", "Pepsi", "Mountain Dew"]
candy = ["Snickers", "Twix", "Recess"]
while True:
choice = input("Would you like a SODA or some CANDY? ").lower()
try:
if choice == "soda":
snack = sodas.pop()
elif choice == "candy":
snack = candy.pop()
else:
print("Sorry I didn't understand that.")
continue
except IndexError:
print("We are all out of {}".format(choice))
else:
print("Heres your {}".format(snack))
|
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from _utils import *
import seaborn as sns
#############################################################################
################################# All patients ##############################
#############################################################################
cs = sns.color_palette()
doseFileLise = os.listdir("../PPO_policy/converge")
for file in doseFileLise.copy():
if "survival" in file or "patient036" in file or "patient078" in file:
doseFileLise.remove(file)
patientLables = []
patientCPA = []
patientLEU = []
patientSurvivalTime = []
for file in doseFileLise:
doseSeq = pd.read_csv("../PPO_policy/converge/" + file, names = ["Month", "CPA", "LEU"], header=0)
patient = file[:10]
patientLables.append(patient)
patientSurvivalTime.append(np.array(doseSeq).shape[0] * 28)
doseSeq["CPA"] = doseSeq["CPA"]/200
doseSeq["LEU"] = doseSeq["LEU"]/7.5
patientCPA.append(np.array(doseSeq["CPA"]))
patientLEU.append(np.array(doseSeq["LEU"]))
dfpatientCPA = pd.DataFrame(patientCPA, index = patientLables)
dfpatientCPA = dfpatientCPA.sort_index()
dfpatientLEU = pd.DataFrame(patientLEU, index = patientLables)
dfpatientLEU = dfpatientLEU.sort_index()
dfpatientTime = pd.DataFrame(patientSurvivalTime, index= patientLables, columns=["rl"])
dfpatientTime = dfpatientTime.sort_index()
plt.style.use("seaborn")
plt.style.use(["science", 'nature'])
fig, ax = plt.subplots(figsize = (20, 10))
for month in range(int(max(patientSurvivalTime)/28)):
# subPatientSurvivalTime = np.ones(len(doseFileLise)) * 28
for patient in dfpatientCPA.index: # range(len(doseFileLise)):
cpaData = dfpatientCPA.loc[patient, month] if ~np.isnan(dfpatientCPA.loc[patient, month]) else 0
leuData = dfpatientLEU.loc[patient, month] if ~np.isnan(dfpatientLEU.loc[patient, month]) else 0
if cpaData != 0:
if leuData != 0:
ax.barh(patient, 28, left = month * 28 , color = onColor, label = "Cpa&Leu-On", alpha = cpaData, hatch = "///", height = 0.8, tick_label = None)
else:
ax.barh(patient, 28, left=month * 28, color=onCpa, label="Cpa-On",
alpha=cpaData, height=0.8, tick_label=None)
#ax.barh(patientLables[patient], 28, left = month * 28, hatch = "/", label = "LEU-ON",alpha = 0, height = 0.5, tick_label = None)
if cpaData == 0 and leuData != 0:
ax.barh(patient, 28, left=month * 28, color=onLeu, label="Leu-On",hatch = "///",
alpha=0, height=0.8, tick_label=None)
if ~np.isnan(dfpatientCPA.loc[patient, month]) and cpaData==0 and leuData == 0:
ax.barh(patient, 28, left = month * 28, color=offColor, label='Treat-Off', height=0.8, tick_label = None)
locs, labels = plt.yticks()
plt.yticks(locs, labels, rotation = 20)
plt.ylabel("1 $\longleftarrow$ Patient No. $\longrightarrow$ 108", fontsize = 24)
plt.xticks(fontsize = 22)
plt.xlabel("Time (Day)", fontsize = 24)
plt.xlim(-10, 3900)
plt.legend([ a1, a2, a3, a4], ['C$\&$L-On',"Cpa-On ","Leu-On" ,'Treat-Off'],
handler_map={a1: AnyObjectHandler(), a2:AnyObjectHandler1(), a3:AnyObjectHandler2(), a4: AnyObjectHandler3()}
, fontsize =18)
if not os.path.exists("../PPO_Analysis/"):
os.mkdir("../PPO_Analysis/")
plt.savefig("../PPO_Analysis/RL_Dose_Strategy.png", dpi = 500)
plt.show()
plt.close()
# plt.legend([AnyObject()], ['CPA-LEU treatment On'],
# handler_map={AnyObject: AnyObjectHandler1()})
# plt.legend([AnyObject()], ['LEU treatment On'],
# handler_map={AnyObject: AnyObjectHandler2()})
# plt.legend([AnyObject()], ['Treatment Off'],
# handler_map={AnyObject: AnyObjectHandler3()})
extrapolatedlist = os.listdir("../Experts_policy/extrapolated")
print(len(extrapolatedlist))
doseFileLise.sort()
fig, ax = plt.subplots(figsize = (20, 10))
CliniDosageDict = {}
for file in doseFileLise:
patient = file[:10]
patientData = pd.read_csv("../Data/dataTanaka/Bruchovsky_et_al/" + patient + ".txt", header=None)
ONOFF = np.array(patientData.loc[:, 7])
cpa = np.array(patientData.loc[~np.isnan(patientData.loc[:,2]), 2]).sum().item()
leu = np.array(patientData.loc[~np.isnan(patientData.loc[:,3]), 3]).sum().item()
drugOnDays = 0
drugOffDays = 0
dosage = [cpa, leu ]
Days = np.array(patientData.loc[:, 9]) - np.array(patientData.loc[0, 9])
for ii in range(len(ONOFF) - 1):
if ONOFF[ii] == 1:
drugOnDays+=Days[ii+1]-Days[ii]
ax.barh(patient, Days[ii+1]-Days[ii], left = Days[ii], color = onColor, height = 0.8, tick_label = None, alpha = 0.4)
else:
drugOffDays+=Days[ii + 1] - Days[ii]
ax.barh(patient, Days[ii + 1] - Days[ii], left=Days[ii], color=offColor, height=0.8, tick_label=None, alpha = 0.4)
CliniDosageDict[patient] = dosage + [drugOnDays, drugOffDays]
for file in extrapolatedlist:
patient = file[:10]
if patient in patientLables:
extraDose = pd.read_csv("../Experts_policy/extrapolated/" + file)
patientData = pd.read_csv("../Data/dataTanaka/Bruchovsky_et_al/" + patient + ".txt", header=None)
Days = np.array(patientData.loc[:, 9]) - np.array(patientData.loc[0, 9])
left = Days[-1]
extraDose = np.array(extraDose)[:,-1]
for ii in range(extraDose.shape[0]):
if left > 28*120:
length = 28*121-left
else:
length = 28
if extraDose[ii]:
ax.barh(patient, length, left=left, color=onColor, height=0.8, tick_label=None)
else:
ax.barh(patient, length, left=left, color=offColor, height=0.8, tick_label=None)
left += 28
if left > 28*121:
break
locs, labels = plt.yticks()
plt.yticks(locs, labels, rotation = 20)
plt.ylabel("1 $\longleftarrow$ Patient No. $\longrightarrow$ 108", fontsize = 24)
plt.xticks(fontsize = 22)
plt.xlabel("Time (Day)", fontsize = 24)
plt.xlim(-10, 3900)
plt.legend([ a1, a2, a3, a4], ['Treat-On', 'Treat-Off', "Extra-On", "Extra-Off" ],
handler_map={a1: AnyObjectHandler12(), a2:AnyObjectHandler32(), a3: AnyObjectHandler1(), a4: AnyObjectHandler3()}
, fontsize = 18, loc= 'right')
if not os.path.exists("../Experts_Analysis/"):
os.mkdir("../Experts_Analysis/")
plt.savefig("../Experts_Analysis/Clinician_Dose_Strategy.png", dpi = 500)
plt.show()
plt.close()
CliniDosageDf = pd.DataFrame.from_dict(CliniDosageDict, orient = "index")
RlTotalCPA = (dfpatientCPA * 200).sum(axis = 1)
RLTotalLEU = (dfpatientLEU * 7.5 ).sum(axis = 1)
RlDaysCPA_Free = pd.DataFrame([sum(dfpatientCPA.loc[index] == 0) * 28 for index in dfpatientCPA.index], index = dfpatientCPA.index)
RlDaysLEU_Free = pd.DataFrame([sum(dfpatientLEU.loc[index] == 0) * 28 for index in dfpatientLEU.index], index = dfpatientLEU.index)
RlDays_Free = pd.concat((RlDaysCPA_Free, RlDaysLEU_Free), axis =1)
RLDosageDf = pd.DataFrame()
# Drug administration days percentage
RLDoseDayPercentage = RlDays_Free.min(axis = 1)/dfpatientTime['rl']
CliniDoseDayPercentage = CliniDosageDf.loc[:,2]/CliniDosageDf.loc[:,[2,3]].sum(axis = 1)
RLDoseCPAAva = RlTotalCPA / dfpatientTime['rl']
RLDoseLEUAva = RLTotalLEU / dfpatientTime['rl']
CliniDoseCPAAva = CliniDosageDf.loc[:,0]/(CliniDosageDf.loc[:,2]+ CliniDosageDf.loc[:,3])
CliniDoseLEUAva = CliniDosageDf.loc[:,1]/(CliniDosageDf.loc[:,3] + CliniDosageDf.loc[:,2])
ClinipatientTime = (CliniDosageDf.loc[:,3] + CliniDosageDf.loc[:,2])
CPAReducePercentage =(CliniDoseCPAAva - RLDoseCPAAva)/CliniDoseCPAAva
LEUReducepercentage = (CliniDoseLEUAva - RLDoseLEUAva)/CliniDoseLEUAva
from scipy.stats import ttest_ind
Diff = RLDoseDayPercentage - CliniDoseDayPercentage
ttest_ind(RLDoseDayPercentage, CliniDoseDayPercentage)
ttest_ind(RLDoseCPAAva, CliniDoseCPAAva)
ttest_ind(RLDoseLEUAva, CliniDoseLEUAva)
ttest_ind(dfpatientTime, ClinipatientTime)
time_diff = dfpatientTime['rl'] - ClinipatientTime
plt.figure(figsize=(15, 10))
dfpatientTime["Experts' Policy"] = ClinipatientTime
dfpatientTime.rename(columns={"rl": "RL agent's Policy"}, inplace=True)
# ax = sns.boxplot(data = dfpatientTime, palette = "Paired", orient='v', width = 0.3)
ax = sns.swarmplot(data = dfpatientTime,color = "grey", size = 10)
mean_survival_time = dfpatientTime.mean()
plt.axhline(y = mean_survival_time)
# Add jitter with the swarmplot function
# ax = sns.swarmplot(markIndex[0], color = 'red', label = 'patient006')
# ax = sns.swarmplot(markIndex[1], color = 'yellow', label = 'patient011')
# plt.xlabel("Resistance Index $\gamma$", fontsize=35)
plt.xticks(fontsize = 25)
plt.yticks(fontsize = 25)
plt.ylabel("Survival time / (Day)", fontsize = 25)
# adding transparency to colors
for patch in ax.artists:
r, g, b, a = patch.get_facecolor()
patch.set_facecolor((r, g, b, .3))
plt.show()
|
import multiprocessing
import time
from unittest import mock, TestCase
from worker_process import WorkerProcess
class WorkerProcessIntegrationTestCase(TestCase):
def test_succesful_initialization(self):
try:
with mock.patch('config.MACHINE') as MockMachine:
is_initialized = multiprocessing.Value('b', False)
def mock_initialize_method():
is_initialized.value = True
MockMachine.initialize.side_effect = mock_initialize_method
worker_process = WorkerProcess.create_and_start()
worker_process.send_message_initialize()
time.sleep(0.1)
self.assertTrue(is_initialized.value)
self.assertEqual(
worker_process.get_logs(),
[{'level': 'INFO', 'message': 'Machine initialized successfully'}],
)
finally:
worker_process.kill()
def test_failed_initialization(self):
try:
with mock.patch('config.MACHINE') as MockMachine:
MockMachine.initialize.side_effect = Exception("Failed to initialize machine")
worker_process = WorkerProcess.create_and_start()
worker_process.send_message_initialize()
time.sleep(0.1)
self.assertEqual(
worker_process.get_logs(),
[{'level': 'ERROR', 'message': 'Failed to initialize machine'}],
)
finally:
worker_process.kill()
def test_succesful_gcode_sending(self):
try:
with mock.patch('config.MACHINE') as MockMachine:
received_move_coordinates = multiprocessing.Array('c', range(100))
def mock_move_by_method(x, y, z, feed_rate):
received_move_coordinates.value = b'x=%f y=%f z=%f feed_rate=%f' % (
x,
y,
z,
feed_rate,
)
type(MockMachine).rapid_move_feed_rate = mock.PropertyMock(return_value=1000)
MockMachine.move_by.side_effect = mock_move_by_method
worker_process = WorkerProcess.create_and_start()
worker_process.send_message_initialize()
worker_process.send_message_gcode("G0 X3 Y2 Z1")
time.sleep(0.1)
self.assertEqual(
received_move_coordinates.value,
b'x=3.000000 y=2.000000 z=1.000000 feed_rate=1000.000000')
logs = worker_process.get_logs()
self.assertEqual(len(logs), 2)
self.assertEqual(logs[0]['level'], 'INFO')
self.assertEqual(logs[0]['message'], 'Machine initialized successfully')
self.assertEqual(logs[1]['level'], 'INFO')
self.assertTrue(logs[1]['message'].startswith("gcode interpreted successfully, took"))
finally:
worker_process.kill()
def test_failed_gcode_sending(self):
try:
with mock.patch('config.MACHINE'):
worker_process = WorkerProcess.create_and_start()
worker_process.send_message_initialize()
worker_process.send_message_gcode("Invalid")
time.sleep(0.1)
self.assertEqual(
worker_process.get_logs(),
[
{'level': 'INFO', 'message': 'Machine initialized successfully'},
{'level': 'ERROR', 'message': "word 'I' value invalid"},
]
)
finally:
worker_process.kill()
|
import pretty_midi
from unittest import TestCase
from massage.resynth.util import *
import mock
import sys
sys.modules['fluidsynth'] = mock.Mock()
TEST_VOICING_FILE = os.path.join(
os.path.dirname(__file__), '../data/chord_voicings.json')
TEST_MIDI_FILE = os.path.join(
os.path.dirname(__file__), '../data/test_midi.mid')
TEST_PICK_SF_MOCK_Y = os.path.join(
os.path.dirname(__file__), '../data/test_pick_sf_mock_y.npz')
TEST_FPATH = os.path.join(
os.path.dirname(__file__), '../data/acoustic_guitar.wav')
class TestUtil(TestCase):
def test_compute_avg_mfcc_zero(self):
y = np.zeros(1000)
avg_mfcc = compute_avg_mfcc(y=y, sr=44100)
target_mfcc = np.zeros(39)
self.assertTrue(np.allclose(avg_mfcc, target_mfcc))
def test_compute_avg_mfcc_fpath(self):
avg_mfcc = compute_avg_mfcc(TEST_FPATH)
self.assertEqual(avg_mfcc.shape[0], 39)
def test_onset_offset(self):
fs = 44100
noise = np.random.random(2205) - 0.5 # 50 ms of nosie
silence = np.zeros(2205)
y = np.concatenate((silence, noise, silence, noise, silence))
target_on_t = np.array([0.05, 0.15])
target_off_t = np.array([0.10, 0.20])
on_t, off_t, on_s = onset_offset(y=y, sr=fs)
on_close = np.abs(target_on_t - on_t) < 0.03 # 30 ms slack
off_close = np.abs(target_off_t - off_t) < 0.03
self.assertTrue(np.logical_and(on_close, off_close).all())
def test_compute_envelope(self):
noise = np.random.random(220500) - 0.5 # 5s of nosie
silence = np.zeros(220500)
y = np.concatenate((silence, noise, silence, noise, silence))
env = compute_envelope(y)
print(y.shape)
# import matplotlib.pyplot as plt
# plt.plot(env/ np.max(env))
# plt.plot(y)
# plt.show()
self.assertEqual(len(env), 220500*5)
def test_get_energy_envelope(self):
y = np.zeros((2, 100000))
env = get_energy_envelope(y)
self.assertEqual(np.sum(env), 0)
self.assertEqual(env.shape, y.shape)
def test_get_energy_envelope_mono(self):
y = np.zeros(100000)
env = get_energy_envelope(y)
self.assertEqual(np.sum(env), 0)
self.assertEqual(env.shape, (1,len(y)))
@mock.patch.object(pretty_midi.PrettyMIDI, 'fluidsynth', autospec=True)
def test_pick_sf(self, mock_fluidsynth):
# synthesis something different with the sf, and try to match
mock_y = np.load(TEST_PICK_SF_MOCK_Y)['arr_0']
mock_fluidsynth.return_value = mock_y
midi_data = pretty_midi.PrettyMIDI(TEST_MIDI_FILE)
test_sf_path = os.path.join(
os.path.dirname(__file__), '../data/28MBGM.sf2')
fs = 44100
y = midi_data.fluidsynth(sf2_path=test_sf_path, fs=fs)
# np.savez('test_pick_sf_mock_y', y)
sf_path, program = pick_sf(y, fs, 'acoustic guitar')
sf_base = os.path.basename(sf_path)
self.assertEqual(program, 25)
self.assertIsInstance(sf_base, str)
# the following test should work, but doesn't... right now the sf
# picked out is 'chorium.sf2' as opposed to 28MBGM
# self.assertEqual('sf_base', '28MBGM.sf2')
def test_pick_sf2(self):
passed = False
y = np.zeros(100)
fs = 44100
try:
out = pick_sf(y, fs, 'not a instrument')
except ValueError:
passed = True
self.assertTrue(passed)
def test_amplitude_to_velocity(self):
energies = [-1, 0, 0.5, 1]
velocities = amplitude_to_velocity(energies)
self.assertListEqual(list(velocities), [60, 90, 105, 120])
def test_midi_to_jams(self):
midi_data = pretty_midi.PrettyMIDI(TEST_MIDI_FILE)
jam = midi_to_jams(midi_data)
jam_len = len(jam.annotations[0].data)
midi_len = len(midi_data.instruments[0].notes)
self.assertEqual(jam_len, midi_len)
def test_voicing_dist(self):
v1 = [1, 3, 5, 7, 9]
v2 = [1, 3, 5, 7, 10]
self.assertEqual(voicing_dist(v1, v2), 0.2)
def test_get_all_voicings(self):
voicing_dict = get_all_voicings(TEST_VOICING_FILE)
self.assertEqual(
voicing_dict['G:maj'][0], [43, 47, 50, 55, 59])
def test_choose_voicing(self):
voicing_dict = get_all_voicings(TEST_VOICING_FILE)
voicing = choose_voicing('A#:maj', voicing_dict, [43, 47, 50, 55, 59])
self.assertIsNotNone(voicing[0])
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
import torch.nn.functional as F
from lib.pspnet import PSPNet
from lib.swp import Swp1d
psp_models = {
'resnet18': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=512, deep_features_size=256, backend='resnet18'),
'resnet34': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=512, deep_features_size=256, backend='resnet34'),
'resnet50': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend='resnet50'),
'resnet101': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend='resnet101'),
'resnet152': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend='resnet152')
}
class ModifiedResnet(nn.Module):
def __init__(self, usegpu=True):
super(ModifiedResnet, self).__init__()
self.model = psp_models['resnet18'.lower()]()
self.model = nn.DataParallel(self.model)
def forward(self, x):
x = self.model(x)
return x
class SymNetFeat(nn.Module): # point
def __init__(self, num_points):
super(SymNetFeat, self).__init__()
self.conv1 = torch.nn.Conv1d(3, 64, 1) # point feature
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.e_conv1 = torch.nn.Conv1d(32, 64, 1) # image embedding
self.e_conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv5 = torch.nn.Conv1d(256, 512, 1)
self.conv6 = torch.nn.Conv1d(512, 1024, 1)
self.ap1 = torch.nn.AvgPool1d(num_points) # num_points = 1000
self.swp = Swp1d(1, num_points, 1)
self.num_points = num_points
def forward(self, x, emb):
x = F.relu(self.conv1(x)) # 64
emb = F.relu(self.e_conv1(emb)) # 64
pointfeat_1 = torch.cat((x, emb), dim=1) # 64+64 = 128
x = F.relu(self.conv2(x)) # 1*128*1000
emb = F.relu(self.e_conv2(emb)) # 1*128*1000
pointfeat_2 = torch.cat((x, emb), dim=1) # 128+128 = 256
x = F.relu(self.conv5(pointfeat_2)) # 1*512×1000
x = F.relu(self.conv6(x)) # 1*1024*1000
# ap_x = self.ap1(x) # x_dim= 1*1024*1000=>1*1024*1
swp_x = self.swp(x)
swp_x = swp_x.view(-1, 1024, 1).repeat(1, 1, self.num_points) # (1,1024,1000)
return torch.cat([pointfeat_1, pointfeat_2, swp_x], 1) # 128 + 256 + 1024 = 1408
class SymNet(nn.Module):
def __init__(self, num_points):
super(SymNet, self).__init__()
self.num_points = num_points
self.cnn = ModifiedResnet()
self.feat = SymNetFeat(num_points) # point feature
self.conv1_cent = torch.nn.Conv1d(1408, 640, 1)
self.conv1_self1 = torch.nn.Conv1d(1408, 640, 1)
self.conv1_self2 = torch.nn.Conv1d(1408, 640, 1)
self.conv1_self3 = torch.nn.Conv1d(1408, 640, 1)
self.conv1_choose = torch.nn.Conv1d(1408, 640, 1)
self.conv1_mode = torch.nn.Conv1d(1408, 640, 1)
self.conv2_cent = torch.nn.Conv1d(640, 256, 1)
self.conv2_self1 = torch.nn.Conv1d(640, 256, 1)
self.conv2_self2 = torch.nn.Conv1d(640, 256, 1)
self.conv2_self3 = torch.nn.Conv1d(640, 256, 1)
self.conv2_choose = torch.nn.Conv1d(640, 256, 1)
self.conv2_mode = torch.nn.Conv1d(640, 256, 1)
self.conv3_cent = torch.nn.Conv1d(256, 128, 1)
self.conv3_self1 = torch.nn.Conv1d(256, 128, 1)
self.conv3_self2 = torch.nn.Conv1d(256, 128, 1)
self.conv3_self3 = torch.nn.Conv1d(256, 128, 1)
self.conv3_choose = torch.nn.Conv1d(256, 128, 1)
self.conv3_mode = torch.nn.Conv1d(256, 128, 1)
self.conv4_cent = torch.nn.Conv1d(128, 3, 1)
self.conv4_self1 = torch.nn.Conv1d(128, 9, 1)
self.conv4_self2 = torch.nn.Conv1d(128, 9, 1)
self.conv4_self3 = torch.nn.Conv1d(128, 3, 1)
self.conv4_choose = torch.nn.Conv1d(128, 3, 1)
self.conv4_mode = torch.nn.Conv1d(128, 3, 1)
def forward(self, img, x, choose): # choose = [1,1,1000],img.shape= [1,3,160,160]
out_img = self.cnn(img) # out_img size = [1,32,120,160]
bs, di, _, _ = out_img.size() # bs=1 di=32
emb = out_img.view(bs, di, -1) # (1,32,120*160)
choose = choose.repeat(1, di, 1) # (1,32,1000)
emb = torch.gather(emb, 2,
choose).contiguous() # choose is the index to select emb of dim2, now emb=(1,32,1000)
x = x.transpose(2, 1).contiguous()
ap_x = self.feat(x, emb)
cent_x = F.relu(self.conv1_cent(ap_x))
self1_x = F.relu(self.conv1_self1(ap_x))
self2_x = F.relu(self.conv1_self2(ap_x))
self3_x = F.relu(self.conv1_self3(ap_x))
choose_x = F.relu(self.conv1_choose(ap_x))
mode_x = F.relu(self.conv1_mode(ap_x))
cent_x = F.relu(self.conv2_cent(cent_x))
self1_x = F.relu(self.conv2_self1(self1_x))
self2_x = F.relu(self.conv2_self2(self2_x))
self3_x = F.relu(self.conv2_self3(self3_x))
choose_x = F.relu(self.conv2_choose(choose_x))
mode_x = F.relu(self.conv2_mode(mode_x))
cent_x = F.relu(self.conv3_cent(cent_x))
self1_x = F.relu(self.conv3_self1(self1_x))
self2_x = F.relu(self.conv3_self2(self2_x))
self3_x = F.relu(self.conv3_self3(self3_x))
choose_x = F.relu(self.conv3_choose(choose_x))
mode_x = F.relu(self.conv3_mode(mode_x))
cent_x = self.conv4_cent(cent_x).view(bs, 3, self.num_points)
self1_x = self.conv4_self1(self1_x).view(bs, 9, self.num_points)
self2_x = self.conv4_self2(self2_x).view(bs, 9, self.num_points)
self3_x = self.conv4_self3(self3_x).view(bs, 3, self.num_points)
choose_x = torch.sigmoid(self.conv4_choose(choose_x)).view(bs, 3, self.num_points)
mode_x = torch.sigmoid(self.conv4_mode(mode_x)).view(bs, 3, self.num_points)
out_cent = cent_x.contiguous().transpose(2, 1).contiguous()
out_self1 = self1_x.contiguous().transpose(2, 1).contiguous() # 3 possible reflection point
out_self2 = self2_x.contiguous().transpose(2, 1).contiguous() # 3 possible foot point
out_self3 = self3_x.contiguous().transpose(2, 1).contiguous() # foot point, axis, circle point
out_choose = choose_x.contiguous().transpose(2, 1).contiguous()
out_mode = mode_x.contiguous().transpose(2, 1).contiguous()
out_ref = out_self1
out_foot_ref = out_self2
out_rot = out_self3
return out_cent, \
out_ref,\
out_foot_ref,\
out_rot,\
out_choose,\
out_mode,\
emb.detach()
|
"""nornir_sql.plugins.inventory"""
from .sql import SQLInventory
__all__ = ("SQLInventory",)
|
import keras
import json
from nets.loss_functions import LAMDA
class LambdaUpdateCallBack(keras.callbacks.Callback):
def on_batch_end(self, batch, logs={}):
global LAMDA
if LAMDA<1:
LAMDA +=5e-5
return
class CustomModelCheckPoint(keras.callbacks.Callback):
def __init__(self,**kargs):
super(CustomModelCheckPoint,self).__init__(**kargs)
self.last_loss = 1000000000
self.last_accuracy = 0
self.current_model_number = 0;
self.epoch_number = 0
# def on_train_begin(self,epoch, logs={}):
# return
# def on_train_end(self, logs={}):
# return
def on_epoch_begin(self,epoch, logs={}):
return
def on_epoch_end(self, epoch, logs={}):
self.epoch_number+=1
current_val_loss = logs.get("val_loss")
current_loss = logs.get("loss")
if (self.last_loss-current_val_loss) > 0.01:
current_weights_name = "weights"+str(self.current_model_number)+".h5"
print(" loss improved from "+str(self.last_loss)+" to "+str(current_val_loss)+", Saving model to "+current_weights_name)
self.model.save_weights("models/"+current_weights_name);
self.model.save_weights("models/last_weight.h5")
self.current_model_number+=1
self.last_loss = current_val_loss
with open("logs/logs.txt","a+") as logfile:
logfile.write("________________________________________________________\n")
logfile.write("EPOCH =")
logfile.write(str(epoch)+"\n")
logfile.write("TRAIN_LOSS =")
logfile.write(str(current_loss)+"\n")
logfile.write("VAL_LOSS =")
logfile.write(str(current_val_loss)+"\n")
logfile.write("---------------------------------------------------------\n")
logfile.write("TRAIN_Age_LOSS =")
logfile.write(str(logs.get("age_estimation_loss"))+"\n")
logfile.write("TRAIN_GENDER_LOSS =")
logfile.write(str(logs.get("gender_probablity_loss"))+"\n")
logfile.write("---------------------------------------------------------\n")
logfile.write("TRAIN_Age_ACC =")
logfile.write(str(logs.get("age_estimation_acc"))+"\n")
logfile.write("TRAIN_GENDER_ACC =")
logfile.write(str(logs.get("gender_probablity_acc"))+"\n")
logfile.write("---------------------------------------------------------\n")
logfile.write("VAL_Age_LOSS =")
logfile.write(str(logs.get("val_age_estimation_loss"))+"\n")
logfile.write("VAL_GENDER_LOSS =")
logfile.write(str(logs.get("val_gender_probablity_loss"))+"\n")
logfile.write("---------------------------------------------------------\n")
logfile.write("VAL_Age_ACC =")
logfile.write(str(logs.get("val_age_estimation_acc"))+"\n")
logfile.write("VAL_GENDER_ACC =")
logfile.write(str(logs.get("val_gender_probablity_acc"))+"\n")
logfile.write("********************************************************\n")
with open("epoch_number.json","w+") as json_file:
data = {"epoch_number":self.epoch_number}
json.dump(data,json_file,indent=4)
|
"""
931. Median of K Sorted Arrays
https://www.lintcode.com/problem/median-of-k-sorted-arrays/description
"""
import sys
class Solution:
"""
@param nums: the given k sorted arrays
@return: the median of the given k sorted arrays
"""
def findMedian(self, nums):
# if not nums:
# return 0
total_numbers = self.count_how_many_numbers_total(nums)
if total_numbers == 0:
return 0
if total_numbers % 2 == 1:
return self.binary_search(nums, total_numbers // 2 + 1) * 1.0
# return (self.binary_search(nums, total_numbers // 2) + self.binary_search(nums, total_numbers // 2 + 1)) / 2.0
print ("total number: %d, kth = %d" % (total_numbers, total_numbers // 2 + 1))
return (self.binary_search(nums, total_numbers // 2 + 1))
def count_how_many_numbers_total(self, nums):
length_list = [len(x) for x in nums]
return sum(length_list)
def binary_search(self, nums, k):
smallest_elements = [x[0] if len(x) > 0 else sys.maxsize for x in nums]
biggest_elements = [x[-1] if len(x) > 0 else -sys.maxsize for x in nums]
start, end = min(smallest_elements), max(biggest_elements)
while start + 1 < end:
mid = (start + end) // 2
print ("mid = %d" % mid)
print ("start = %d, mid = %d, end = %d, k = %d" % (start, mid, end, k))
if self.count_elements_before_in_all_arrays(nums, mid) < k:
start = mid
else:
end = mid
print ("start = %d, mid = %d, end = %d, k = %d" % (start, mid, end, k))
if self.count_elements_before_in_all_arrays(nums, start) >= k:
return start
if self.count_elements_before_in_all_arrays(nums, end) >= k:
return end
return -1
def count_elements_before_in_all_arrays(self, array_of_lists, target):
count = 0
for arr in array_of_lists:
count += self.count_elements_before_in_an_array(arr, target)
print (array_of_lists, target, count)
return count
"""
find how many numbers >= target
"""
def count_elements_before_in_an_array(self, nums, target):
if len(nums) == 0:
return 0
start, end = 0, len(nums) - 1
while start + 1 < end:
mid = (start + end) // 2
if nums[mid] < target:
start = mid
else:
end = mid
if nums[start] > target:
return start
if nums[end] > target:
return end
return end + 1
s = Solution()
# nums = [[1],[],[2],[3],[3]]
nums = [[1,3],[2147483646,2147483647]]
print (s.findMedian(nums))
|
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=no-member
"""
This module defines the classes used to handle result
processing inside Workload Automation. There will be a
:class:`wlauto.core.workload.WorkloadResult` object generated for
every workload iteration executed. This object will have a list of
:class:`wlauto.core.workload.WorkloadMetric` objects. This list will be
populated by the workload itself and may also be updated by instrumentation
(e.g. to add power measurements). Once the result object has been fully
populated, it will be passed into the ``process_iteration_result`` method of
:class:`ResultProcessor`. Once the entire run has completed, a list containing
result objects from all iterations will be passed into ``process_results``
method of :class`ResultProcessor`.
Which result processors will be active is defined by the ``result_processors``
list in the ``~/.workload_automation/config.py``. Only the result_processors
who's names appear in this list will be used.
A :class:`ResultsManager` keeps track of active results processors.
"""
import logging
import traceback
from copy import copy
from contextlib import contextmanager
from datetime import datetime
from wlauto.core.extension import Extension
from wlauto.exceptions import WAError
from wlauto.utils.types import numeric
from wlauto.utils.misc import enum_metaclass, merge_dicts
class ResultManager(object):
"""
Keeps track of result processors and passes on the results onto the individual processors.
"""
def __init__(self):
self.logger = logging.getLogger('ResultsManager')
self.processors = []
self._bad = []
def install(self, processor):
self.logger.debug('Installing results processor %s', processor.name)
self.processors.append(processor)
def uninstall(self, processor):
if processor in self.processors:
self.logger.debug('Uninstalling results processor %s', processor.name)
self.processors.remove(processor)
else:
self.logger.warning('Attempting to uninstall results processor %s, which is not installed.',
processor.name)
def initialize(self, context):
# Errors aren't handled at this stage, because this gets executed
# before workload execution starts and we just want to propagte them
# and terminate (so that error can be corrected and WA restarted).
for processor in self.processors:
processor.initialize(context)
def add_result(self, result, context):
with self._manage_processors(context):
for processor in self.processors:
with self._handle_errors(processor):
processor.process_iteration_result(result, context)
for processor in self.processors:
with self._handle_errors(processor):
processor.export_iteration_result(result, context)
def process_run_result(self, result, context):
with self._manage_processors(context):
for processor in self.processors:
with self._handle_errors(processor):
processor.process_run_result(result, context)
for processor in self.processors:
with self._handle_errors(processor):
processor.export_run_result(result, context)
def finalize(self, context):
with self._manage_processors(context):
for processor in self.processors:
with self._handle_errors(processor):
processor.finalize(context)
def validate(self):
for processor in self.processors:
processor.validate()
@contextmanager
def _manage_processors(self, context, finalize_bad=True):
yield
for processor in self._bad:
if finalize_bad:
processor.finalize(context)
self.uninstall(processor)
self._bad = []
@contextmanager
def _handle_errors(self, processor):
try:
yield
except KeyboardInterrupt, e:
raise e
except WAError, we:
self.logger.error('"{}" result processor has encountered an error'.format(processor.name))
self.logger.error('{}("{}")'.format(we.__class__.__name__, we.message))
self._bad.append(processor)
except Exception, e: # pylint: disable=W0703
self.logger.error('"{}" result processor has encountered an error'.format(processor.name))
self.logger.error('{}("{}")'.format(e.__class__.__name__, e))
self.logger.error(traceback.format_exc())
self._bad.append(processor)
class ResultProcessor(Extension):
"""
Base class for result processors. Defines an interface that should be implemented
by the subclasses. A result processor can be used to do any kind of post-processing
of the results, from writing them out to a file, to uploading them to a database,
performing calculations, generating plots, etc.
"""
def initialize(self, context):
pass
def process_iteration_result(self, result, context):
pass
def export_iteration_result(self, result, context):
pass
def process_run_result(self, result, context):
pass
def export_run_result(self, result, context):
pass
def finalize(self, context):
pass
class RunResult(object):
"""
Contains overall results for a run.
"""
__metaclass__ = enum_metaclass('values', return_name=True)
values = [
'OK',
'OKISH',
'PARTIAL',
'FAILED',
'UNKNOWN',
]
@property
def status(self):
if not self.iteration_results or all([s.status == IterationResult.FAILED for s in self.iteration_results]):
return self.FAILED
elif any([s.status == IterationResult.FAILED for s in self.iteration_results]):
return self.PARTIAL
elif any([s.status == IterationResult.ABORTED for s in self.iteration_results]):
return self.PARTIAL
elif (any([s.status == IterationResult.PARTIAL for s in self.iteration_results]) or
self.non_iteration_errors):
return self.OKISH
elif all([s.status == IterationResult.OK for s in self.iteration_results]):
return self.OK
else:
return self.UNKNOWN # should never happen
def __init__(self, run_info, output_directory=None):
self.info = run_info
self.iteration_results = []
self.artifacts = []
self.events = []
self.non_iteration_errors = False
self.output_directory = output_directory
class RunEvent(object):
"""
An event that occured during a run.
"""
def __init__(self, message):
self.timestamp = datetime.utcnow()
self.message = message
def to_dict(self):
return copy(self.__dict__)
def __str__(self):
return '{} {}'.format(self.timestamp, self.message)
__repr__ = __str__
class IterationResult(object):
"""
Contains the result of running a single iteration of a workload. It is the
responsibility of a workload to instantiate a IterationResult, populate it,
and return it form its get_result() method.
Status explanations:
:NOT_STARTED: This iteration has not yet started.
:RUNNING: This iteration is currently running and no errors have been detected.
:OK: This iteration has completed and no errors have been detected
:PARTIAL: One or more instruments have failed (the iteration may still be running).
:FAILED: The workload itself has failed.
:ABORTED: The user interupted the workload
:SKIPPED: The iteration was skipped due to a previous failure
"""
__metaclass__ = enum_metaclass('values', return_name=True)
values = [
'NOT_STARTED',
'RUNNING',
'OK',
'NONCRITICAL',
'PARTIAL',
'FAILED',
'ABORTED',
'SKIPPED',
]
def __init__(self, spec):
self.spec = spec
self.id = spec.id
self.workload = spec.workload
self.iteration = None
self.status = self.NOT_STARTED
self.output_directory = None
self.events = []
self.metrics = []
self.artifacts = []
self.classifiers = {}
def add_metric(self, name, value, units=None, lower_is_better=False, classifiers=None):
classifiers = merge_dicts(self.classifiers, classifiers or {},
list_duplicates='last', should_normalize=False)
self.metrics.append(Metric(name, value, units, lower_is_better, classifiers))
def has_metric(self, name):
for metric in self.metrics:
if metric.name == name:
return True
return False
def add_event(self, message):
self.events.append(RunEvent(message))
def to_dict(self):
d = copy(self.__dict__)
d['events'] = [e.to_dict() for e in self.events]
return d
def __iter__(self):
return iter(self.metrics)
def __getitem__(self, name):
for metric in self.metrics:
if metric.name == name:
return metric
raise KeyError('Metric {} not found.'.format(name))
class Metric(object):
"""
This is a single metric collected from executing a workload.
:param name: the name of the metric. Uniquely identifies the metric
within the results.
:param value: The numerical value of the metric for this execution of
a workload. This can be either an int or a float.
:param units: Units for the collected value. Can be None if the value
has no units (e.g. it's a count or a standardised score).
:param lower_is_better: Boolean flag indicating where lower values are
better than higher ones. Defaults to False.
:param classifiers: A set of key-value pairs to further classify this metric
beyond current iteration (e.g. this can be used to identify
sub-tests).
"""
def __init__(self, name, value, units=None, lower_is_better=False, classifiers=None):
self.name = name
self.value = numeric(value)
self.units = units
self.lower_is_better = lower_is_better
self.classifiers = classifiers or {}
def to_dict(self):
return self.__dict__
def __str__(self):
result = '{}: {}'.format(self.name, self.value)
if self.units:
result += ' ' + self.units
result += ' ({})'.format('-' if self.lower_is_better else '+')
return '<{}>'.format(result)
__repr__ = __str__
|
import request
import log
# 设置日志输出级别
log.basicConfig(level=log.INFO)
http_log = log.getLogger("HTTP GET")
url = "http://httpbin.org/get"
response = request.get(url) # 发起http GET请求
http_log.info(response.json()) # 以json方式读取返回
|
from datetime import date, datetime, timedelta
from typing import Optional
import jwt
import markdown
from flask import abort, current_app, flash
from flask_login import UserMixin, current_user
from werkzeug.security import check_password_hash, generate_password_hash
from app import db, login
# token support
def encode_auth_token(id: str) -> bytes:
payload = {
"exp": datetime.utcnow() + timedelta(days=1000000),
"iat": datetime.utcnow(),
"sub": id
}
return jwt.encode(
payload,
current_app.config.get("SECRET_KEY"),
algorithm="HS256"
)
# return None if invalid
def decode_auth_token(auth_token: bytes) -> Optional[str]:
try:
payload = jwt.decode(auth_token, current_app.config.get(
"SECRET_KEY"), algorithms=["HS256"])
return payload["sub"]
except jwt.ExpiredSignatureError:
return None
except jwt.InvalidTokenError:
return None
# relationships:
# user many-to-many article
# article many-to-many user
#
# article one-to-many citation
# citation many-to-one article
# citation many-to-one source
# source one-to-many citation
# -> basically a many-to-many between article and source
# -> using citation as the edge e.g. storing page number of citation
user_article_association = db.Table(
"user_article_association", db.Model.metadata,
db.Column("user_id", db.Integer,
db.ForeignKey("user.id")),
db.Column("article_id", db.Integer,
db.ForeignKey("article.id"))
)
class User(UserMixin, db.Model):
__tablename__ = "user"
id = db.Column(db.Integer, primary_key=True)
# login
username = db.Column(db.String(64), index=True,
unique=True, nullable=False)
email = db.Column(db.String(120), index=True, unique=True, nullable=False)
password_hash = db.Column(db.String(128), nullable=False)
# data
full_name = db.Column(db.String(120), nullable=False)
last_seen = db.Column(db.DateTime, default=datetime.utcnow, nullable=False)
# flags
is_admin = db.Column(db.Boolean, default=False, nullable=False)
# role flags
is_admin = db.Column(db.Boolean, default=False, nullable=False)
is_author = db.Column(db.Boolean, default=False, nullable=False)
# many articles
articles = db.relationship(
"Article",
secondary=user_article_association,
back_populates="authors"
)
def change_admin_status(self, status: bool) -> None:
self.is_admin = status
def change_author_status(self, status: bool) -> None:
# remove all author rights
if not status:
for article in self.articles:
article.rm_author(self)
self.is_author = status
# passwords
def set_password(self, password: str) -> None:
self.password_hash = generate_password_hash(password)
def check_password(self, password: str) -> bool:
return check_password_hash(self.password_hash, password)
def __repr__(self):
return f"<User {self.username}>"
class Article(db.Model):
__tablename__ = "article"
id = db.Column(db.Integer, primary_key=True)
internal_name = db.Column(
db.String(64), index=True, unique=True, nullable=False)
title = db.Column(db.String(64), nullable=False)
subtitle = db.Column(db.String(64))
last_modified = db.Column(
db.DateTime, default=datetime.utcnow, nullable=False)
created_on = db.Column(
db.DateTime, default=datetime.utcnow, nullable=False)
unlisted = db.Column(db.Boolean, default=True, nullable=False)
# data
source = db.Column(db.String(1000000))
html = db.Column(db.String(1000000))
# many users
authors = db.relationship(
"User",
secondary=user_article_association,
back_populates="articles"
)
# many resources
# resources get deleted when not referenced anymore
resources = db.relationship(
"Resource",
back_populates="article",
cascade="all, delete-orphan"
)
# many citations
# citation get deleted when not referenced anymore
citations = db.relationship(
"Citation",
back_populates="article",
cascade="all, delete-orphan"
)
def gen_auth_token(self) -> str:
return encode_auth_token(self.internal_name)
def check_auth_token(self, token: str) -> bool:
# already checks if token has valid format
decoded = decode_auth_token(token)
return decoded is not None and decoded == self.internal_name
def modify(self) -> None:
self.last_modified = datetime.utcnow()
def compile(self) -> None:
self.html = markdown.markdown(self.source, extensions=["extra"])
def get_authors(self) -> str:
if len(self.authors) == 0:
return "Unknown"
authors = ""
for idx, author in enumerate(self.authors):
authors += author.full_name
if idx == len(self.authors) - 2:
authors += " and "
elif idx == len(self.authors) - 1:
pass
else:
authors += ", "
return authors
def allow_access(self) -> bool:
# admins and authors of this article are allowed
return current_user.is_authenticated and (current_user.is_admin or self.is_author(current_user))
# author control
def add_authors(self, *authors: "User") -> None:
for author in authors:
if not self.is_author(author):
self.authors.append(author)
def rm_author(self, author: "User") -> None:
if self.is_author(author):
self.authors.remove(author)
def is_author(self, author: "User") -> bool:
return author in self.authors
# resource control
def add_resources(self, *resources: "Resource") -> None:
for resource in resources:
if not self.is_resource(resource):
self.resources.append(resource)
def rm_resource(self, resource: "Resource") -> None:
if self.is_recourse(resource):
self.resources.remove(resource)
def is_resource(self, resource: "Resource") -> bool:
return resource in self.resources
# citation control
def add_citations(self, *citations: "Citation") -> None:
for citation in citations:
if not self.is_cited(citation):
self.citations.append(citation)
def rm_citation(self, citation: "Citation") -> None:
if self.is_cited(citation):
self.citations.remove(citation)
def is_cited(self, citation: "Citation") -> bool:
return citation in self.citations
def __repr__(self):
return f"<Article {self.internal_name}>"
# represent dynamic resource
class Resource(db.Model):
__tablename__ = "resource"
id = db.Column(db.Integer, primary_key=True)
filename = db.Column(db.String(64), nullable=False)
mimetype = db.Column(db.String(64), nullable=False)
data = db.Column(db.LargeBinary(20000000), nullable=False)
# one article this resource belongs to
article_id = db.Column(db.Integer, db.ForeignKey("article.id"))
article = db.relationship("Article", back_populates="resources")
def __repr__(self) -> str:
return f"<Resource '{self.filename}' of {str(self.article)}>"
# represent edge between a single article and source
# includes extra information
class Citation(db.Model):
__tablename__ = "citation"
id = db.Column(db.Integer, primary_key=True)
time_stamp_hour = db.Column(db.Integer)
time_stamp_minute = db.Column(db.Integer)
time_stamp_second = db.Column(db.Integer)
page = db.Column(db.Integer)
line = db.Column(db.Integer)
# one article this citation belongs to
article_id = db.Column(db.Integer, db.ForeignKey("article.id"))
article = db.relationship("Article", back_populates="citations")
# one source this citation uses
# citations must have one source, but if the source gets deleted, the citation gets flagged as corrupted instead of being deleted
source_id = db.Column(db.Integer, db.ForeignKey("source.id"))
source = db.relationship("Source", back_populates="citations")
def set_source(self, source: "Source") -> None:
source.add_citation(self)
def check_integrity(self) -> bool:
# corrupted if the source has been deleted
return self.article is not None
def get_time_stamp(self) -> str:
hour = self.time_stamp_hour if self.time_stamp_hour is not None else 0
minute = self.time_stamp_minute if self.time_stamp_minute is not None else 0
second = self.time_stamp_second if self.time_stamp_second is not None else 0
return f"{hour:02}:{minute:02}:{second:02}"
def __repr__(self):
name = ""
if self.time_stamp_hour is not None or self.time_stamp_minute is not None or self.time_stamp_second is not None:
name += self.get_time_stamp()
else:
if self.page is not None:
name += f"p. {self.page}"
if self.line is not None:
name += f" l. {self.line}"
return f"<Citation {name} from {str(self.article)} with {str(self.source)}>"
# source for citation, like with biblatex
class Source(db.Model):
id = db.Column(db.Integer, primary_key=True)
# like bibtex reference
ref = db.Column(db.String(32), unique=True)
# or "quote" if name unknown
name = db.Column(db.String(128))
author = db.Column(db.String(128))
publisher = db.Column(db.String(128))
year = db.Column(db.Integer)
url = db.Column(db.String(128))
accessed_on = db.Column(db.DateTime, default=datetime.utcnow)
# sources are allowed to not be used by a single citation
citations = db.relationship("Citation", back_populates="source")
def add_citation(self, citation: "Citation") -> None:
if not self.is_cited(citation):
self.citations.append(citation)
def rm_citation(self, citation: "Citation") -> None:
if self.is_cited(citation):
self.citations.remove(citation)
def is_cited(self, citation: "Citation") -> bool:
# todo: shouldn't use python in
return citation in self.citations
def __repr__(self):
return f"<Source {self.ref}>"
# load user from database to log them in
@login.user_loader
def login_user(id) -> User:
return User.query.get(int(id))
|
'''
###############################################################################
"MajoranaNanowire" Python3 Module
v 1.0 (2020)
Created by Samuel D. Escribano (2018)
###############################################################################
"H_class/Kane/builders" submodule
This sub-package builds 8-band k.p Hamiltonians for infinite nanowires.
###############################################################################
'''
#%%############################################################################
######################## Required Packages ############################
###############################################################################
import numpy as np
import scipy.sparse
import scipy.sparse.linalg
import scipy.linalg
import scipy.constants as cons
from MajoranaNanowires.Functions import diagonal, concatenate
#%%
def Kane_2D_builder(N,dis,mu,B=0,
params={},crystal='zincblende',
mesh=0,
sparse='yes'):
"""
2D 8-band k.p Hamiltonian builder. It obtaines the Hamiltoninan for a 3D
wire which is infinite in one direction, decribed using 8-band k.p theory.
Parameters
----------
N: int or arr
Number of sites.
dis: int or arr
Distance (in nm) between sites.
mu: float or arr
Chemical potential. If it is an array, each element is the on-site
chemical potential.
B: float
Magnetic field along the wire's direction.
params: dic or str
Kane/Luttinger parameters of the k.p Hamiltonian. 'InAs', 'InSb',
'GaAs' and 'GaSb' selects the defult parameters for these materials.
crystal: {'zincblende','wurtzite','minimal'}
Crystal symmetry along the nanowire growth. 'minimal' is a minimal
model in which the intra-valence band coupling are ignored.
mesh: mesh
If the discretization is homogeneous, mesh=0. Otherwise, mesh
provides a mesh with the position of the sites in the mesh.
sparse: {"yes","no"}
Sparsety of the built Hamiltonian. "yes" builds a dok_sparse matrix,
while "no" builds a dense matrix.
Returns
-------
H: arr
Hamiltonian matrix.
"""
if (params=={} or params=='InAs') and crystal=='minimal':
gamma0, gamma1, gamma2, gamma3 = 1, 0,0,0
P, m_eff = 919.7, 1.0
EF, Ecv, Evv, Ep = 0, -417, -390, (cons.hbar**2/(2*m_eff*cons.m_e)/cons.e*1e3*(1e9)**2)**(-1)*P**2
elif (params=={} or params=='InSb') and crystal=='minimal':
gamma0, gamma1, gamma2, gamma3 = 1, 0,0,0
P, m_eff = 940.2, 1.0
EF, Ecv, Evv, Ep = 0, -235, -810, (cons.hbar**2/(2*m_eff*cons.m_e)/cons.e*1e3*(1e9)**2)**(-1)*P**2
elif (params=={} or params=='InAs') and (crystal=='zincblende'):
gamma0, gamma1, gamma2, gamma3 = 1, 20.4, 8.3, 9.1
P, m_eff = 919.7, 1.0
EF, Ecv, Evv, Ep = 0, -417, -390, (cons.hbar**2/(2*m_eff*cons.m_e)/cons.e*1e3*(1e9)**2)**(-1)*P**2
gamma1, gamma2, gamma3 = gamma1-np.abs(Ep/(3*Ecv)), gamma2-np.abs(Ep/(6*Ecv)), gamma3-np.abs(Ep/(6*Ecv))
elif (params=={} or params=='InSb') and (crystal=='zincblende'):
gamma0, gamma1, gamma2, gamma3 = 1, 34.8, 15.5, 16.5
P, m_eff = 940.2, 1.0
EF, Ecv, Evv, Ep = 0, -235, -810, (cons.hbar**2/(2*m_eff*cons.m_e)/cons.e*1e3*(1e9)**2)**(-1)*P**2
gamma1, gamma2, gamma3 = gamma1-np.abs(Ep/(3*Ecv)), gamma2-np.abs(Ep/(6*Ecv)), gamma3-np.abs(Ep/(6*Ecv))
elif (params=={} or params=='GaAs') and (crystal=='zincblende'):
gamma0, gamma1, gamma2, gamma3 = 1, 6.98, 2.06, 2.93
P, m_eff = 1097.45, 1.0
EF, Ecv, Evv, Ep = 0, -1519, -341, (cons.hbar**2/(2*m_eff*cons.m_e)/cons.e*1e3*(1e9)**2)**(-1)*P**2
Ep=3/(0.063)/(3/np.abs(Ecv)+1/np.abs(Ecv+Evv))
gamma1, gamma2, gamma3 = gamma1-np.abs(Ep/(3*Ecv)), gamma2-np.abs(Ep/(6*Ecv)), gamma3-np.abs(Ep/(6*Ecv))
elif (params=={} or params=='GaSb') and (crystal=='zincblende'):
gamma0, gamma1, gamma2, gamma3 = 1, 13.4, 4.7, 6.0
P, m_eff = 971.3, 1.0
EF, Ecv, Evv, Ep = 0, -812, -760, (cons.hbar**2/(2*m_eff*cons.m_e)/cons.e*1e3*(1e9)**2)**(-1)*P**2
gamma1, gamma2, gamma3 = gamma1-np.abs(Ep/(3*Ecv)), gamma2-np.abs(Ep/(6*Ecv)), gamma3-np.abs(Ep/(6*Ecv))
elif (params=={} or params=='InAs') and (crystal=='wurtzite'):
m_eff = 1.0
D1,D2,D3,D4=100.3,102.3,104.1,38.8
A1,A2,A3,A4,A5,A6,A7=-1.5726,-1.6521,-2.6301,0.5126,0.1172,1.3103,-49.04
B1,B2,B3=-2.3925,2.3155,-1.7231
e1,e2=-3.2005,0.6363
P1,P2=838.6,689.87
alpha1,alpha2,alpha3=-1.89,-28.92,-51.17
beta1,beta2=-6.95,-21.71
gamma1,Ec, Ev=53.06,0,-664.9
elif crystal=='minimal' or crystal=='zincblende':
gamma0, gamma1, gamma2, gamma3 = params['gamma0'], params['gamma1'], params['gamma2'], params['gamma3']
P, m_eff = params['P'], params['m_eff']
EF, Ecv, Evv = params['EF'], params['Ecv'], params['Evv']
if crystal=='zincblende':
Ep=(cons.hbar**2/(2*m_eff*cons.m_e)/cons.e*1e3*(1e9)**2)**(-1)*P**2
gamma1, gamma2, gamma3 = gamma1-np.abs(Ep/(3*Ecv)), gamma2-np.abs(Ep/(6*Ecv)), gamma3-np.abs(Ep/(6*Ecv))
## Make sure that the onsite parameters are arrays:
Nx, Ny = N[0], N[1]
if np.ndim(dis)==0:
dis_x, dis_y = dis, dis
else:
dis_x, dis_y = dis[0], dis[1]
if np.isscalar(mesh):
xi_x, xi_y = np.ones(N), np.ones(N)
elif len(mesh)==2:
xi_x, xi_y = dis_x/mesh[0]*np.ones(N), dis_y/mesh[1]*np.ones(N)
else:
xi_x, xi_y = dis_x/mesh[0], dis_y/mesh[1]
if np.isscalar(mu):
mu = mu * np.ones((Nx,Ny))
#Number of bands and sites
m_b = 8 * Nx * Ny
m_s = Nx * Ny
#Obtain the eigenenergies:
tx=cons.hbar**2/(2*m_eff*cons.m_e*(dis_x*1e-9)**2)/cons.e*1e3*(xi_x[1::,:]+xi_x[:-1,:])/2
ty=cons.hbar**2/(2*m_eff*cons.m_e*(dis_y*1e-9)**2)/cons.e*1e3*(xi_y[:,1::]+xi_y[:,:-1])/2
txy=cons.hbar**2/(2*m_eff*cons.m_e*(dis_x*1e-9)*(dis_y*1e-9))/cons.e*1e3*np.append(np.zeros((1,Ny)),xi_x[1::,:]+xi_x[:-1,:],axis=0)/2*np.append(np.zeros((Nx,1)),xi_y[:,1::]+xi_y[:,:-1],axis=1)/2
txy=txy[1::,1::]
ax=(xi_x[1::,:]+xi_x[:-1,:])/2/(2*dis_x)
ay=(xi_y[:,1::]+xi_y[:,:-1])/2/(2*dis_y)
e = np.append(2*tx[0,:].reshape(1,Ny),np.append(tx[1::,:]+tx[:-1,:],2*tx[-1,:].reshape(1,Ny),axis=0),axis=0)
em = e - np.append(2*ty[:,0].reshape(Nx,1),np.append(ty[:,1::]+ty[:,:-1],2*ty[:,-1].reshape(Nx,1),axis=1),axis=1)
e += np.append(2*ty[:,0].reshape(Nx,1),np.append(ty[:,1::]+ty[:,:-1],2*ty[:,-1].reshape(Nx,1),axis=1),axis=1)
ty=np.insert(ty,np.arange(Ny-1,(Ny-1)*Nx,(Ny-1)),np.zeros(Nx-1))
ay=np.insert(ay,np.arange(Ny-1,(Ny-1)*Nx,(Ny-1)),np.zeros(Nx-1))
txy=np.insert(txy,np.arange(Ny-1,(Ny-1)*Nx,(Ny-1)),np.zeros(Nx-1))
e, em, mu, tx, ty = e.flatten(), em.flatten(), mu.flatten(), tx.flatten(), ty.flatten()
ax,ay=ax.flatten(),ay.flatten()
if not(B==0):
x, y = np.zeros(N), np.zeros(N)
if np.isscalar(mesh) and mesh==0:
mesh=np.ones((2,Nx,Ny))*dis[0]
for i in range(Nx):
for j in range(Ny):
x[i,j]=np.sum(mesh[0,0:i+1,j])-(Nx-1)*dis_x/2
y[i,j]=np.sum(mesh[1,i,0:j+1])-(Ny-1)*dis_y/2
for i in range(int((Nx-1)/2)):
x[Nx-i-1,:]=-x[i,:]
x[int((Nx-1)/2),:]=0
x=x/np.abs(x[0,0])*(Nx-1)*dis_x/2
for j in range(int((Ny-1)/2)):
y[:,Ny-j-1]=-y[:,j]
y[:,int((Ny-1)/2)]=0
y=y/np.abs(y[0,0])*(Ny-1)*dis_y/2
fact_B=cons.e/cons.hbar*1e-18
Mx, My = -fact_B*y/2*B, fact_B*x/2*B
Mx_kx, My_ky = (xi_x[1::,:]*Mx[1::,:]+xi_x[:-1,:]*Mx[:-1,:])/2/(2*dis_x), (xi_y[:,1::]*My[:,1::]+xi_y[:,:-1]*My[:,:-1])/2/(2*dis_y)
My_ky=np.insert(My_ky,np.arange(Ny-1,(Ny-1)*Nx,(Ny-1)),np.zeros(Nx-1))
Mm_kx, Mm_ky = (xi_x[1::,:]*(Mx[1::,:]-1j*My[1::,:])+xi_x[:-1,:]*(Mx[:-1,:]-1j*My[:-1,:]))/2/(2*dis_x), -(xi_y[:,1::]*(Mx[:,1::]+1j*My[:,1::])+xi_y[:,:-1]*(Mx[:,:-1]+1j*My[:,:-1]))/2/(2*dis_y)
Mm_ky=np.insert(Mm_ky,np.arange(Ny-1,(Ny-1)*Nx,(Ny-1)),np.zeros(Nx-1))
Mx, My = Mx.flatten(), My.flatten()
Mx_kx, My_ky = Mx_kx.flatten(), My_ky.flatten()
Mm_kx, Mm_ky = Mm_kx.flatten(), Mm_ky.flatten()
## Built the Hamiltonian:
if crystal=='zincblende':
T=(concatenate((e,-tx,-tx,-ty,-ty)),
concatenate((diagonal(m_s),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny),diagonal(m_s,k=1),diagonal(m_s,k=-1))))
G1=(concatenate((P/np.sqrt(6)*ay,-P/np.sqrt(6)*ay,-1j*P/np.sqrt(6)*ax,1j*P/np.sqrt(6)*ax)),
concatenate((diagonal(m_s,k=1),diagonal(m_s,k=-1),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny))))
O1=(concatenate(((-1/np.sqrt(3)*(gamma2+2*gamma3))*em,-tx*(-1/np.sqrt(3)*(gamma2+2*gamma3)),-tx*(-1/np.sqrt(3)*(gamma2+2*gamma3)),
(ty*(-1/np.sqrt(3)*(gamma2+2*gamma3))),ty*(-1/np.sqrt(3)*(gamma2+2*gamma3)),-1j*txy[0:-1]/2*(-1/np.sqrt(3)*(gamma2+2*gamma3)),
(1j*txy/2*(-1/np.sqrt(3)*(gamma2+2*gamma3))),1j*txy/2*(-1/np.sqrt(3)*(gamma2+2*gamma3)),-1j*txy[0:-1]/2*(-1/np.sqrt(3)*(gamma2+2*gamma3)))),
concatenate((diagonal(m_s),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny),diagonal(m_s,k=1),diagonal(m_s,k=-1),diagonal(m_s,k=Ny+1),diagonal(m_s,k=Ny-1,init=1),diagonal(m_s,k=-Ny+1,init=1),diagonal(m_s,k=-Ny-1))))
if not(B==0):
B_m=((Mx-1j*My),(diagonal(m_s)))
B_s=(((Mx**2+My**2)*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3),(diagonal(m_s)))
B_k=(concatenate((-2*1j*My_ky*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3,
2*1j*My_ky*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3,
-2*1j*Mx_kx*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3,
2*1j*Mx_kx*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3)),concatenate((diagonal(m_s,k=1),diagonal(m_s,k=-1),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny))))
B_s_m=(((Mx**2-My**2-2*1j*Mx*My)*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3),(diagonal(m_s)))
B_k_m=(concatenate((2*Mm_ky*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3,
-2*Mm_ky*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3,
-2*1j*Mm_kx*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3,
2*1j*Mm_kx*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3)),concatenate((diagonal(m_s,k=1),diagonal(m_s,k=-1),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny))))
### Upper diagonal:
## row 0:
# (0,2)
args=G1[0]
index=(G1[1][0]+0,G1[1][1]+2*m_s)
# (0,4)
args=np.append(args,np.conj(G1[0])*np.sqrt(3))
index=(np.append(index[0],G1[1][1]+0),np.append(index[1],G1[1][0]+4*m_s))
# (0,7)
args=np.append(args,G1[0]*np.sqrt(2))
index=(np.append(index[0],G1[1][0]+0),np.append(index[1],G1[1][1]+7*m_s))
## row 1:
# (1,3)
args=np.append(args,-G1[0]*np.sqrt(3))
index=(np.append(index[0],G1[1][0]+m_s), np.append(index[1],G1[1][1]+3*m_s))
# (1,5)
args=np.append(args,-np.conj(G1[0]))
index=(np.append(index[0],G1[1][1]+m_s),np.append(index[1],G1[1][0]+5*m_s))
# (1,6)
args=np.append(args,np.sqrt(2)*np.conj(G1[0]))
index=(np.append(index[0],G1[1][1]+m_s), np.append(index[1],G1[1][0]+6*m_s))
## row 2:
# (2,4)
args=np.append(args,O1[0])
index=(np.append(index[0],O1[1][0]+2*m_s),np.append(index[1],O1[1][1]+4*m_s))
# (2,7)
args=np.append(args,-np.sqrt(2)*T[0]*gamma3)
index=(np.append(index[0],T[1][0]+2*m_s),np.append(index[1],T[1][1]+7*m_s))
## row 3:
# (3,5)
args=np.append(args,O1[0])
index=(np.append(index[0],O1[1][0]+3*m_s),np.append(index[1],O1[1][1]+5*m_s))
# (3,6)
args=np.append(args,-np.sqrt(2)*np.conj(O1[0]))
index=(np.append(index[0],O1[1][1]+3*m_s),np.append(index[1],O1[1][0]+6*m_s))
## row 4:
# (4,7)
args=np.append(args,np.sqrt(2)*np.conj(O1[0]))
index=(np.append(index[0],O1[1][1]+4*m_s),np.append(index[1],O1[1][0]+7*m_s))
## row 5:
# (5,6)
args=np.append(args,np.sqrt(2)*T[0]*gamma3)
index=(np.append(index[0],T[1][0]+5*m_s),np.append(index[1],T[1][1]+6*m_s))
# # If there is magentic field:
if not(B==0):
## row 0:
# (0,2)
args=np.append(args,P/np.sqrt(6)*np.conj(B_m[0]))
index=(np.append(index[0],B_m[1][1]+0),np.append(index[1],B_m[1][0]+2*m_s))
# (0,4)
args=np.append(args,P/np.sqrt(2)*B_m[0])
index=(np.append(index[0],B_m[1][0]+0),np.append(index[1],B_m[1][1]+4*m_s))
# (0,7)
args=np.append(args,P/np.sqrt(3)*np.conj(B_m[0]))
index=(np.append(index[0],B_m[1][1]+0),np.append(index[1],B_m[1][0]+7*m_s))
## row 1:
# (1,3)
args=np.append(args,-P/np.sqrt(2)*np.conj(B_m[0]))
index=(np.append(index[0],B_m[1][1]+m_s),np.append(index[1],B_m[1][0]+3*m_s))
# (1,5)
args=np.append(args,-P/np.sqrt(6)*B_m[0])
index=(np.append(index[0],B_m[1][0]+m_s),np.append(index[1],B_m[1][1]+5*m_s))
# (1,6)
args=np.append(args,P/np.sqrt(3)*B_m[0])
index=(np.append(index[0],B_m[1][0]+m_s),np.append(index[1],B_m[1][1]+6*m_s))
## row 2:
# (2,7)
args=np.append(args,-np.sqrt(2)*gamma3*B_s[0])
index=(np.append(index[0],B_s[1][0]+2*m_s),np.append(index[1],B_s[1][1]+7*m_s))
args=np.append(args,-np.sqrt(2)*gamma3*B_k[0])
index=(np.append(index[0],B_k[1][0]+2*m_s),np.append(index[1],B_k[1][1]+7*m_s))
# (2,4)
args=np.append(args,-1/np.sqrt(3)*(gamma2+2*gamma3)*B_s_m[0])
index=(np.append(index[0],B_s_m[1][0]+2*m_s),np.append(index[1],B_s_m[1][1]+4*m_s))
args=np.append(args,-1/np.sqrt(3)*(gamma2+2*gamma3)*B_k_m[0])
index=(np.append(index[0],B_k_m[1][0]+2*m_s),np.append(index[1],B_k_m[1][1]+4*m_s))
## row 3:
# (3,5)
args=np.append(args,-1/np.sqrt(3)*(gamma2+2*gamma3)*B_s_m[0])
index=(np.append(index[0],B_s_m[1][0]+3*m_s),np.append(index[1],B_s_m[1][1]+5*m_s))
args=np.append(args,-1/np.sqrt(3)*(gamma2+2*gamma3)*B_k_m[0])
index=(np.append(index[0],B_k_m[1][0]+3*m_s),np.append(index[1],B_k_m[1][1]+5*m_s))
# (3,6)
args=np.append(args,np.sqrt(2/3)*(gamma2+2*gamma3)*np.conj(B_s_m[0]))
index=(np.append(index[0],B_s_m[1][1]+3*m_s),np.append(index[1],B_s_m[1][0]+6*m_s))
args=np.append(args,np.sqrt(2/3)*(gamma2+2*gamma3)*np.conj(B_k_m[0]))
index=(np.append(index[0],B_k_m[1][1]+3*m_s),np.append(index[1],B_k_m[1][0]+6*m_s))
## row 4:
# (4,7)
args=np.append(args,-np.sqrt(2/3)*(gamma2+2*gamma3)*np.conj(B_s_m[0]))
index=(np.append(index[0],B_s_m[1][1]+4*m_s),np.append(index[1],B_s_m[1][0]+7*m_s))
args=np.append(args,-np.sqrt(2/3)*(gamma2+2*gamma3)*np.conj(B_k_m[0]))
index=(np.append(index[0],B_k_m[1][1]+4*m_s),np.append(index[1],B_k_m[1][0]+7*m_s))
## row 5:
# (5,6)
args=np.append(args,np.sqrt(2)*gamma3*B_s[0])
index=(np.append(index[0],B_s[1][0]+5*m_s),np.append(index[1],B_s[1][1]+6*m_s))
args=np.append(args,np.sqrt(2)*gamma3*B_k[0])
index=(np.append(index[0],B_k[1][0]+5*m_s),np.append(index[1],B_k[1][1]+6*m_s))
### Lower diagonal:
args=np.append(args,np.conj(args))
index=(np.append(index[0],index[1]),np.append(index[1],index[0]))
### Diagonal:
# (0,0)
args=np.append(args,T[0])
index=(np.append(index[0],T[1][0]+0),np.append(index[1],T[1][1]+0))
# (1,1)
args=np.append(args,T[0])
index=(np.append(index[0],T[1][0]+m_s),np.append(index[1],T[1][1]+m_s))
# (2,2)
args=np.append(args,(gamma3-gamma1)*T[0])
index=(np.append(index[0],T[1][0]+2*m_s),np.append(index[1],T[1][1]+2*m_s))
# (3,3)
args=np.append(args,-(gamma3+gamma1)*T[0])
index=(np.append(index[0],T[1][0]+3*m_s),np.append(index[1],T[1][1]+3*m_s))
# (4,4)
args=np.append(args,-(gamma3+gamma1)*T[0])
index=(np.append(index[0],T[1][0]+4*m_s),np.append(index[1],T[1][1]+4*m_s))
# (5,5)
args=np.append(args,(gamma3-gamma1)*T[0])
index=(np.append(index[0],T[1][0]+5*m_s),np.append(index[1],T[1][1]+5*m_s))
# (6,6)
args=np.append(args,-gamma1*T[0])
index=(np.append(index[0],T[1][0]+6*m_s),np.append(index[1],T[1][1]+6*m_s))
# (7,7)
args=np.append(args,-gamma1*T[0])
index=(np.append(index[0],T[1][0]+7*m_s),np.append(index[1],T[1][1]+7*m_s))
if not(B==0):
# (0,0)
args=np.append(args,B_s[0])
index=(np.append(index[0],B_s[1][0]+0),np.append(index[1],B_s[1][1]+0))
args=np.append(args,B_k[0])
index=(np.append(index[0],B_k[1][0]+0),np.append(index[1],B_k[1][1]+0))
# (1,1)
args=np.append(args,B_s[0])
index=(np.append(index[0],B_s[1][0]+m_s),np.append(index[1],B_s[1][1]+m_s))
args=np.append(args,B_k[0])
index=(np.append(index[0],B_k[1][0]+m_s),np.append(index[1],B_k[1][1]+m_s))
# (2,2)
args=np.append(args,(gamma3-gamma1)*B_s[0])
index=(np.append(index[0],B_s[1][0]+2*m_s),np.append(index[1],B_s[1][1]+2*m_s))
args=np.append(args,(gamma3-gamma1)*B_k[0])
index=(np.append(index[0],B_k[1][0]+2*m_s),np.append(index[1],B_k[1][1]+2*m_s))
# (3,3)
args=np.append(args,-(gamma3+gamma1)*B_s[0])
index=(np.append(index[0],B_s[1][0]+3*m_s),np.append(index[1],B_s[1][1]+3*m_s))
args=np.append(args,-(gamma3-gamma1)*B_k[0])
index=(np.append(index[0],B_k[1][0]+3*m_s),np.append(index[1],B_k[1][1]+3*m_s))
# (4,4)
args=np.append(args,-(gamma3+gamma1)*B_s[0])
index=(np.append(index[0],B_s[1][0]+4*m_s),np.append(index[1],B_s[1][1]+4*m_s))
args=np.append(args,-(gamma3-gamma1)*B_k[0])
index=(np.append(index[0],B_k[1][0]+4*m_s),np.append(index[1],B_k[1][1]+4*m_s))
# (5,5)
args=np.append(args,(gamma3-gamma1)*B_s[0])
index=(np.append(index[0],B_s[1][0]+5*m_s),np.append(index[1],B_s[1][1]+5*m_s))
args=np.append(args,(gamma3-gamma1)*B_k[0])
index=(np.append(index[0],B_k[1][0]+5*m_s),np.append(index[1],B_k[1][1]+5*m_s))
# (6,6)
args=np.append(args,-gamma1*B_s[0])
index=(np.append(index[0],B_s[1][0]+6*m_s),np.append(index[1],B_s[1][1]+6*m_s))
args=np.append(args,-gamma1*B_k[0])
index=(np.append(index[0],B_k[1][0]+6*m_s),np.append(index[1],B_k[1][1]+6*m_s))
# (7,7)
args=np.append(args,-gamma1*B_s[0])
index=(np.append(index[0],B_s[1][0]+7*m_s),np.append(index[1],B_s[1][1]+7*m_s))
args=np.append(args,-gamma1*B_k[0])
index=(np.append(index[0],B_k[1][0]+7*m_s),np.append(index[1],B_k[1][1]+7*m_s))
### Built matrix:
H=scipy.sparse.csc_matrix((args,index),shape=(m_b,m_b))
if sparse=='no':
H=H.todense()
### Add potential and band edges:
H[diagonal(m_b)]+=-np.tile(mu,8) + concatenate((EF*np.ones(2*m_s),Ecv*np.ones(4*m_s),(Ecv+Evv)*np.ones(2*m_s)))
elif crystal=='wurtzite':
Kc=(concatenate((e,-tx,-tx,-ty,-ty)),
concatenate((diagonal(m_s),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny),diagonal(m_s,k=1),diagonal(m_s,k=-1))))
Kp=(concatenate((ay,-ay,-1j*ax,1j*ax)),
concatenate((diagonal(m_s,k=1),diagonal(m_s,k=-1),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny))))
Kpc=(concatenate((em,-tx,-tx,ty,ty,-1j*txy[0:-1]/2,1j*txy/2,1j*txy/2,-1j*txy[0:-1]/2)),
concatenate((diagonal(m_s),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny),diagonal(m_s,k=1),diagonal(m_s,k=-1),diagonal(m_s,k=Ny+1),diagonal(m_s,k=Ny-1,init=1),diagonal(m_s,k=-Ny+1,init=1),diagonal(m_s,k=-Ny-1))))
### Upper diagonal:
## row 0:
# (0,1)
args=-A5*np.conj(Kpc[0])
index=(Kpc[1][1]+0,Kpc[1][0]+m_s)
# (0,2)
args=np.append(args,1j*(A7-alpha1/np.sqrt(2))*np.conj(Kp[0]))
index=(np.append(index[0],Kp[1][1]+0),np.append(index[1],Kp[1][0]+2*m_s))
# (0,4)
args=np.append(args,-1j*alpha2*np.conj(Kp[0]))
index=(np.append(index[0],Kp[1][1]+0),np.append(index[1],Kp[1][0]+4*m_s))
# (0,6)
args=np.append(args,-(P2-beta1)/np.sqrt(2)*np.conj(Kp[0]))
index=(np.append(index[0],Kp[1][1]+0),np.append(index[1],Kp[1][0]+6*m_s))
## row 1:
# (1,2)
args=np.append(args,-1j*(A7+alpha1/np.sqrt(2))*Kp[0])
index=(np.append(index[0],Kp[1][0]+m_s),np.append(index[1],Kp[1][1]+2*m_s))
# (1,3)
args=np.append(args,-1j*alpha2*np.conj(Kp[0]))
index=(np.append(index[0],Kp[1][1]+m_s),np.append(index[1],Kp[1][0]+3*m_s))
# (1,5)
args=np.append(args,np.sqrt(2)*D3*np.ones(m_s))
index=(np.append(index[0],diagonal(m_s)[0]+m_s),np.append(index[1],diagonal(m_s)[1]+5*m_s))
# (1,6)
args=np.append(args,(P2+beta1)/np.sqrt(2)*Kp[0])
index=(np.append(index[0],Kp[1][0]+m_s),np.append(index[1],Kp[1][1]+6*m_s))
# (1,7)
args=np.append(args,1j*np.sqrt(2)*D4*np.ones(m_s))
index=(np.append(index[0],diagonal(m_s)[0]+m_s),np.append(index[1],diagonal(m_s)[1]+7*m_s))
## row 2:
# (2,4)
args=np.append(args,np.sqrt(2)*D3*np.ones(m_s))
index=(np.append(index[0],diagonal(m_s)[0]+2*m_s),np.append(index[1],diagonal(m_s)[1]+4*m_s))
# (2,5)
args=np.append(args,-1j*alpha3*np.conj(Kp[0]))
index=(np.append(index[0],Kp[1][1]+2*m_s),np.append(index[1],Kp[1][0]+5*m_s))
# (2,6)
args=np.append(args, 1j*B2*Kc[0])
index=(np.append(index[0],Kc[1][0]+2*m_s),np.append(index[1],Kc[1][1]+6*m_s))
# (2,7)
args=np.append(args, beta2*np.conj(Kp[0]))
index=(np.append(index[0],Kp[1][1]+2*m_s),np.append(index[1],Kp[1][0]+7*m_s))
## row 3:
# (3,4)
args=np.append(args,-A5*Kpc[0])
index=(np.append(index[0],Kpc[1][0]+3*m_s),np.append(index[1],Kpc[1][1]+4*m_s))
# (3,5)
args=np.append(args,-1j*(A7-alpha1/np.sqrt(2))*Kp[0])
index=(np.append(index[0],Kp[1][0]+3*m_s),np.append(index[1],Kp[1][1]+5*m_s))
# (3,7)
args=np.append(args,(P2-beta1)/np.sqrt(2)*Kp[0])
index=(np.append(index[0],Kp[1][0]+3*m_s),np.append(index[1],Kp[1][1]+7*m_s))
## row 4:
# (4,5)
args=np.append(args,1j*(A7+alpha1/np.sqrt(2))*np.conj(Kp[0]))
index=(np.append(index[0],Kp[1][1]+4*m_s),np.append(index[1],Kp[1][0]+5*m_s))
# (4,6)
args=np.append(args,1j*np.sqrt(2)*D4*np.ones(m_s))
index=(np.append(index[0],diagonal(m_s)[0]+4*m_s),np.append(index[1],diagonal(m_s)[1]+6*m_s))
# (4,7)
args=np.append(args,-(P2+beta1)/np.sqrt(2)*np.conj(Kp[0]))
index=(np.append(index[0],Kp[1][1]+4*m_s),np.append(index[1],Kp[1][0]+7*m_s))
## row 5:
# (5,6)
args=np.append(args,-beta2*Kp[0])
index=(np.append(index[0],Kp[1][0]+5*m_s),np.append(index[1],Kp[1][1]+6*m_s))
# (5,7)
args=np.append(args, 1j*B2*Kc[0])
index=(np.append(index[0],Kc[1][0]+5*m_s),np.append(index[1],Kc[1][1]+7*m_s))
## row 6:
# (6,7)
args=np.append(args,-1j*gamma1*np.conj(Kp[0]))
index=(np.append(index[0],Kp[1][1]+6*m_s),np.append(index[1],Kp[1][0]+7*m_s))
### Lower diagonal:
args=np.append(args,np.conj(args))
index=(np.append(index[0],index[1]),np.append(index[1],index[0]))
### Diagonal:
# (0,0)
args=np.append(args,(A2+A4)*Kc[0])
index=(np.append(index[0],Kc[1][0]+0),np.append(index[1],Kc[1][1]+0))
# (1,1)
args=np.append(args,(A2+A4)*Kc[0])
index=(np.append(index[0],Kc[1][0]+m_s),np.append(index[1],Kc[1][1]+m_s))
# (2,2)
args=np.append(args,(A2)*Kc[0])
index=(np.append(index[0],Kc[1][0]+2*m_s),np.append(index[1],Kc[1][1]+2*m_s))
# (3,3)
args=np.append(args,(A2+A4)*Kc[0])
index=(np.append(index[0],Kc[1][0]+3*m_s),np.append(index[1],Kc[1][1]+3*m_s))
# (4,4)
args=np.append(args,(A2+A4)*Kc[0])
index=(np.append(index[0],Kc[1][0]+4*m_s),np.append(index[1],Kc[1][1]+4*m_s))
# (5,5)
args=np.append(args,(A2)*Kc[0])
index=(np.append(index[0],Kc[1][0]+5*m_s),np.append(index[1],Kc[1][1]+5*m_s))
# (6,6)
args=np.append(args,(e2)*Kc[0])
index=(np.append(index[0],Kc[1][0]+6*m_s),np.append(index[1],Kc[1][1]+6*m_s))
# (7,7)
args=np.append(args,(e2)*Kc[0])
index=(np.append(index[0],Kc[1][0]+7*m_s),np.append(index[1],Kc[1][1]+7*m_s))
### Built matrix:
H=scipy.sparse.csc_matrix((args,index),shape=(m_b,m_b))
if sparse=='no':
H=H.todense()
### Add potential and band edges:
H[diagonal(m_b)]+=-np.tile(mu,8) + concatenate(((D1+D2+Ev)*np.ones(m_s),(D1-D2+Ev)*np.ones(m_s),(Ev)*np.ones(m_s),
(D1+D2+Ev)*np.ones(m_s),(D1-D2+Ev)*np.ones(m_s),(Ev)*np.ones(m_s),
(Ec)*np.ones(m_s),(Ec)*np.ones(m_s)))
elif crystal=='minimal':
T=(concatenate((e,-tx,-tx,-ty,-ty)),
concatenate((diagonal(m_s),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny),diagonal(m_s,k=1),diagonal(m_s,k=-1))))
G1=(concatenate((P/np.sqrt(6)*ay,-P/np.sqrt(6)*ay,-1j*P/np.sqrt(6)*ax,1j*P/np.sqrt(6)*ax)),
concatenate((diagonal(m_s,k=1),diagonal(m_s,k=-1),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny))))
if not(B==0):
B_m=((Mx-1j*My),(diagonal(m_s)))
B_s=(((Mx**2+My**2)*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3),(diagonal(m_s)))
B_k=(concatenate((-2*1j*My_ky*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3,
2*1j*My_ky*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3,
-2*1j*Mx_kx*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3,
2*1j*Mx_kx*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3)),concatenate((diagonal(m_s,k=1),diagonal(m_s,k=-1),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny))))
### Upper diagonal:
## row 0:
# (0,2)
args=G1[0]
index=(G1[1][0]+0,G1[1][1]+2*m_s)
# (0,4)
args=np.append(args,np.conj(G1[0])*np.sqrt(3))
index=(np.append(index[0],G1[1][1]+0),np.append(index[1],G1[1][0]+4*m_s))
# (0,7)
args=np.append(args,G1[0]*np.sqrt(2))
index=(np.append(index[0],G1[1][0]+0),np.append(index[1],G1[1][1]+7*m_s))
## row 1:
# (1,3)
args=np.append(args,-G1[0]*np.sqrt(3))
index=(np.append(index[0],G1[1][0]+m_s), np.append(index[1],G1[1][1]+3*m_s))
# (1,5)
args=np.append(args,-np.conj(G1[0]))
index=(np.append(index[0],G1[1][1]+m_s),np.append(index[1],G1[1][0]+5*m_s))
# (1,6)
args=np.append(args,np.sqrt(2)*np.conj(G1[0]))
index=(np.append(index[0],G1[1][1]+m_s), np.append(index[1],G1[1][0]+6*m_s))
## If there is magentic field:
if not(B==0):
## row 0:
# (0,2)
args=np.append(args,P/np.sqrt(6)*np.conj(B_m[0]))
index=(np.append(index[0],B_m[1][1]+0),np.append(index[1],B_m[1][0]+2*m_s))
# (0,4)
args=np.append(args,P/np.sqrt(2)*B_m[0])
index=(np.append(index[0],B_m[1][0]+0),np.append(index[1],B_m[1][1]+4*m_s))
# (0,7)
args=np.append(args,P/np.sqrt(3)*np.conj(B_m[0]))
index=(np.append(index[0],B_m[1][1]+0),np.append(index[1],B_m[1][0]+7*m_s))
## row 1:
# (1,3)
args=np.append(args,-P/np.sqrt(2)*np.conj(B_m[0]))
index=(np.append(index[0],B_m[1][1]+m_s),np.append(index[1],B_m[1][0]+3*m_s))
# (1,5)
args=np.append(args,-P/np.sqrt(6)*B_m[0])
index=(np.append(index[0],B_m[1][0]+m_s),np.append(index[1],B_m[1][1]+5*m_s))
# (1,6)
args=np.append(args,P/np.sqrt(3)*B_m[0])
index=(np.append(index[0],B_m[1][0]+m_s),np.append(index[1],B_m[1][1]+6*m_s))
### Lower diagonal:
args=np.append(args,np.conj(args))
index=(np.append(index[0],index[1]),np.append(index[1],index[0]))
### Diagonal:
# (0,0)
args=np.append(args,gamma0*T[0])
index=(np.append(index[0],T[1][0]+0),np.append(index[1],T[1][1]+0))
# (1,1)
args=np.append(args,gamma0*T[0])
index=(np.append(index[0],T[1][0]+m_s),np.append(index[1],T[1][1]+m_s))
# (2,2)
args=np.append(args,-gamma1*T[0])
index=(np.append(index[0],T[1][0]+2*m_s),np.append(index[1],T[1][1]+2*m_s))
# (3,3)
args=np.append(args,-gamma1*T[0])
index=(np.append(index[0],T[1][0]+3*m_s),np.append(index[1],T[1][1]+3*m_s))
# (4,4)
args=np.append(args,-gamma1*T[0])
index=(np.append(index[0],T[1][0]+4*m_s),np.append(index[1],T[1][1]+4*m_s))
# (5,5)
args=np.append(args,-gamma1*T[0])
index=(np.append(index[0],T[1][0]+5*m_s),np.append(index[1],T[1][1]+5*m_s))
# (6,6)
args=np.append(args,-gamma1*T[0])
index=(np.append(index[0],T[1][0]+6*m_s),np.append(index[1],T[1][1]+6*m_s))
# (7,7)
args=np.append(args,-gamma1*T[0])
index=(np.append(index[0],T[1][0]+7*m_s),np.append(index[1],T[1][1]+7*m_s))
if not(B==0):
# (0,0)
args=np.append(args,gamma0*B_s[0])
index=(np.append(index[0],B_s[1][0]+0),np.append(index[1],B_s[1][1]+0))
args=np.append(args,gamma0*B_k[0])
index=(np.append(index[0],B_k[1][0]+0),np.append(index[1],B_k[1][1]+0))
# (1,1)
args=np.append(args,gamma0*B_s[0])
index=(np.append(index[0],B_s[1][0]+m_s),np.append(index[1],B_s[1][1]+m_s))
args=np.append(args,gamma0*B_k[0])
index=(np.append(index[0],B_k[1][0]+m_s),np.append(index[1],B_k[1][1]+m_s))
# (2,2)
args=np.append(args,-gamma1*B_s[0])
index=(np.append(index[0],B_s[1][0]+2*m_s),np.append(index[1],B_s[1][1]+2*m_s))
args=np.append(args,-gamma1*B_k[0])
index=(np.append(index[0],B_k[1][0]+2*m_s),np.append(index[1],B_k[1][1]+2*m_s))
# (3,3)
args=np.append(args,-gamma1*B_s[0])
index=(np.append(index[0],B_s[1][0]+3*m_s),np.append(index[1],B_s[1][1]+3*m_s))
args=np.append(args,-gamma1*B_k[0])
index=(np.append(index[0],B_k[1][0]+3*m_s),np.append(index[1],B_k[1][1]+3*m_s))
# (4,4)
args=np.append(args,-gamma1*B_s[0])
index=(np.append(index[0],B_s[1][0]+4*m_s),np.append(index[1],B_s[1][1]+4*m_s))
args=np.append(args,-gamma1*B_k[0])
index=(np.append(index[0],B_k[1][0]+4*m_s),np.append(index[1],B_k[1][1]+4*m_s))
# (5,5)
args=np.append(args,-gamma1*B_s[0])
index=(np.append(index[0],B_s[1][0]+5*m_s),np.append(index[1],B_s[1][1]+5*m_s))
args=np.append(args,-gamma1*B_k[0])
index=(np.append(index[0],B_k[1][0]+5*m_s),np.append(index[1],B_k[1][1]+5*m_s))
# (6,6)
args=np.append(args,-gamma1*B_s[0])
index=(np.append(index[0],B_s[1][0]+6*m_s),np.append(index[1],B_s[1][1]+6*m_s))
args=np.append(args,-gamma1*B_k[0])
index=(np.append(index[0],B_k[1][0]+6*m_s),np.append(index[1],B_k[1][1]+6*m_s))
# (7,7)
args=np.append(args,-gamma1*B_s[0])
index=(np.append(index[0],B_s[1][0]+7*m_s),np.append(index[1],B_s[1][1]+7*m_s))
args=np.append(args,-gamma1*B_k[0])
index=(np.append(index[0],B_k[1][0]+7*m_s),np.append(index[1],B_k[1][1]+7*m_s))
### Built matrix:
H=scipy.sparse.csc_matrix((args,index),shape=(m_b,m_b))
if sparse=='no':
H=H.todense()
### Add potential and band edges:
H[diagonal(m_b)]+=-np.tile(mu,8) + concatenate((EF*np.ones(2*m_s),Ecv*np.ones(4*m_s),(Ecv+Evv)*np.ones(2*m_s)))
return (H)
|
# Generated by Django 2.1.1 on 2018-09-08 22:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('alert', '0008_auto_20180908_1949'),
]
operations = [
migrations.AddField(
model_name='alert',
name='image',
field=models.ImageField(blank=True, null=True, upload_to=''),
),
migrations.AlterField(
model_name='alertvote',
name='alert',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='votes', to='alert.Alert'),
),
]
|
#!/bin/sh
import unittest
from jinja2.utils import generate_lorem_ipsum
# Generate content with image
TEST_CONTENT_IMAGE_URL = 'https://testimage.com/test.jpg'
TEST_CONTENT = str(generate_lorem_ipsum(n=3, html=True)) + '<img src="' + TEST_CONTENT_IMAGE_URL + '"/>'+ str(generate_lorem_ipsum(n=2,html=True))
TEST_SUMMARY_IMAGE_URL = 'https://testimage.com/summary.jpg'
TEST_SUMMARY_WITHOUTIMAGE = str(generate_lorem_ipsum(n=1, html=True))
TEST_SUMMARY_WITHIMAGE = TEST_SUMMARY_WITHOUTIMAGE + '<img src="' + TEST_SUMMARY_IMAGE_URL + '"/>'
TEST_CUSTOM_IMAGE_URL = 'https://testimage.com/custom.jpg'
from pelican.contents import Article
import representative_image
class TestRepresentativeImage(unittest.TestCase):
def setUp(self):
super(TestRepresentativeImage, self).setUp()
representative_image.register()
def test_extract_image_from_content(self):
args = {
'content': TEST_CONTENT,
'metadata': {
'summary': TEST_SUMMARY_WITHOUTIMAGE,
},
}
article = Article(**args)
self.assertEqual(article.featured_image, TEST_CONTENT_IMAGE_URL)
def test_extract_image_from_summary(self):
args = {
'content': TEST_CONTENT,
'metadata': {
'summary': TEST_SUMMARY_WITHIMAGE,
},
}
article = Article(**args)
self.assertEqual(article.featured_image, TEST_SUMMARY_IMAGE_URL)
self.assertEqual(article.summary, TEST_SUMMARY_WITHOUTIMAGE)
def test_extract_image_from_summary_with_custom_image(self):
args = {
'content': TEST_CONTENT,
'metadata': {
'summary': TEST_SUMMARY_WITHIMAGE,
'image': TEST_CUSTOM_IMAGE_URL,
},
}
article = Article(**args)
self.assertEqual(article.featured_image, TEST_CUSTOM_IMAGE_URL)
self.assertEqual(article.summary, TEST_SUMMARY_WITHOUTIMAGE)
if __name__ == '__main__':
unittest.main()
|
import gym
import time
from keras.models import Sequential
from keras.layers import Dense, Activation
import collections
from matplotlib import pyplot as plt
import numpy as np
class BlackBox:
def __init__(self):
self._init_model()
def _init_model(self, shape=[2,2], actions=2):
model = Sequential([
Dense(2, input_dim=4, activation='relu'),
Dense(actions, activation='softmax'),
])
self.model = model
self.shape = shape
self.actions = actions
return
def flatten(self, weights):
w = []
for l in weights:
if isinstance(l, collections.Iterable):
w = w + self.flatten(l)
else:
w = w + [l]
return w
def unflatten(self, flat_weights):
w = []
i = 0
for l, size in enumerate(self.shape):
layer = self.model.layers[l].get_weights()
params = layer[0]
bias = layer[1]
new_layer = []
new_params = []
new_bias = []
for param in params:
new_params.append(flat_weights[i:i+size])
i += size
for b in bias:
new_bias.append(flat_weights[i])
i += 1
w.append(np.array(new_params))
w.append(np.array(new_bias))
return w
def get_weights(self):
return self.model.get_weights()
def set_weights(self, weights):
self.model.set_weights(weights)
def get_flat_weights(self):
return self.flatten(self.get_weights())
def set_flat_weights(self, flat_weights):
return self.set_weights(self.unflatten(flat_weights))
def produce_action(self, state):
inp = np.array([np.array(state).T])
action_dist = self.model.predict(inp)
action = np.random.choice(self.actions,1,p=action_dist[0])[0]
# print("state: ", state, "actions: ", action_dist, "take: ", action)
return action
def run_sim():
env = gym.make('CartPole-v1')
alpha = 0.1
sigma = 3
bb = BlackBox()
w = bb.get_flat_weights()
pop_size = 200
test = collections.deque(maxlen=10)
fitnesses = []
for generation in range(40):
observation = env.reset()
noise = np.random.randn(pop_size, len(w))
population = w + sigma*noise
F = []
for agent in population:
observation = env.reset()
bb.set_flat_weights(agent)
fitness = 0
actions = collections.defaultdict(int)
for t in range(200):
# time.sleep(0.02)
# env.render()
action = bb.produce_action(np.array(list(observation)))
actions[action] += 1
observation, reward, done, info = env.step(action)
# We will sum position and velocity to get reward
fitness += reward
if done:
# print("Episode finished after {} timesteps".format(t+1))
break
# print("action distribution: ", dict(actions), "fitness: ", fitness)
# print()
F.append(fitness)
# print(
# 'Gen: ', generation,
# '| Net_R: %.1f' % average_reward,
# )
w = w + alpha*(1/(pop_size*sigma))*(noise.T*F).T.sum(axis=0)
current_fitness = test_convergence(w, test, env)
fitnesses.append(current_fitness)
test.append(current_fitness)
if (sum(test) / len(test)) > 195:
print("Convergence Reached after {} Generations".format(t+1))
# Cache Model
bb.set_flat_weights(w)
bb.model.save("ES-CartPole-v1.hdf5")
plt.plot(fitnesses)
plt.savefig('fitnesses.png')
plt.show()
def test_convergence(w, test, env, max_steps=200, num_trials=10):
bb = BlackBox()
bb.set_flat_weights(w)
average_fitness = 0
for i in range(num_trials):
fitness = 0
observation = env.reset()
for t in range(max_steps):
action = bb.produce_action(np.array(list(observation)))
observation, reward, done, info = env.step(action)
fitness += reward
if done:
break
average_fitness += fitness / num_trials
print("Current fitness: ", average_fitness)
return average_fitness
def test_model():
env = gym.make('CartPole-v1')
bb = BlackBox()
bb.model.load_weights('ES-CartPole-v1.hdf5')
for generation in range(20):
observation = env.reset()
for t in range(500):
time.sleep(0.01)
env.render()
action = bb.produce_action(np.array(list(observation)))
# print(action)
observation, reward, done, info = env.step(action)
if done:
print("Episode finished after {} timesteps".format(t+1))
break
if __name__ == '__main__':
run_sim()
# test_model()
|
from core.FormatAnalyzer import format_analyse
from core.FormatTokenizer import get_all_format_probabilities
from core.utils import is_ascii, is_noise
from core.utils import divide_consecutive_vars, normalize_index
class FormatPredictResult:
def __init__(self, analyzed_root=None, var_information=None):
self.analyzed_root = analyzed_root
self.var_information = var_information
def format_predictor(format, samples):
format = format.replace("\n", " ").replace("…", " ").replace("...", " ").replace(
"..", " ").replace("\ ", " ").replace("}", "} ").replace(" ", " ")
format = divide_consecutive_vars(format)
format = normalize_index(format)
format = format.replace("{", "").replace("}", "")
tokens = [x for x in format.split(
" ") if x != "" and is_ascii(x) and not is_noise(x)]
tokenize_result = get_all_format_probabilities(tokens)
for to_1d_flag in [False, True]:
for candidate_format in tokenize_result:
rootnode, varinfo = format_analyse(
candidate_format, to_1d_flag)
try:
current_dic = {}
for sample in samples:
sample = sample[0].replace(" ", "[SP] ")
sample = sample.replace("\n", "[NL] ")
# print(samples)
# tokens = [(name,sep)]*
tokens = [(x[:-4], ' ' if x[-4:] == '[SP]' else '\n' if x[-4:] == '[NL]' else 'ERR') for x in
sample.split(" ") if x != ""] # "abc[SP]" -> "abc
# print(tokens)
current_dic = rootnode.verify_and_get_types(
tokens, current_dic)
for k, var in current_dic.items():
varinfo[k].type = var[1]
res = FormatPredictResult(rootnode, varinfo)
# print(str(rootnode))
return res
except Exception as e:
pass
return None
|
from turtle import *
turtles = [Turtle() for i in range(3)]
x = 180
bgcolor("#1E0394")
color("#FF5F1F")
pensize(6)
ht()
pu()
fd(x)
seth(90)
pd()
circle(x, 360)
c = ["#FF7A1F", "#FF9C1F"]
for index, t in enumerate(turtles):
t.ht()
t.pensize(5)
t.pu()
t.fd(x)
t.seth(90)
t.circle(x, 120 * index)
t.pd()
t.color(c[0])
t.circle((x * 2 / 3), 120)
t.color(c[1])
t.pensize(4)
t.circle((x * 2 / 6) + 20, 126)
done()
|
"""
Unit tests for bad_column code.
"""
import unittest
import numpy as np
from lsst.eotest.image_utils import bad_column
def make_column(pixel_counts):
"""
Concatenate alternating sequences of 0's and 1's with lengths
given by the entries in pixel_counts. Return a list with the
y-coordinates of the masked (value 1) pixels.
"""
column = []
for i, count in enumerate(pixel_counts):
column += [i % 2]*count
column = np.array(column)
return list(np.where(column != 0)[0])
class BadColumnTestCase(unittest.TestCase):
"""
TestCase class for bad_column function.
"""
def setUp(self):
"Create bad and ok columns of masked pixels."
bad_pixel_counts = ((40, 30, 500),
(331, 20, 439),
(144, 60, 3421),
(10, 21, 309),
(0, 3000, 0),
(92, 10, 400, 33))
self.bad_columns = (make_column(x) for x in bad_pixel_counts)
ok_pixel_counts = ((50, 10, 40, 10, 500),
(0,),
(400, 19, 30, 19, 494),
(19, 19, 1, 19, 333))
self.ok_columns = (make_column(x) for x in ok_pixel_counts)
def tearDown(self):
"Nothing to tear down."
pass
def test_bad_column(self):
"Test the output of the bad_column function for the different cases."
threshold = 20
for column in self.bad_columns:
self.assertTrue(bad_column(column, threshold))
for column in self.ok_columns:
self.assertFalse(bad_column(column, threshold))
if __name__ == '__main__':
unittest.main()
|
# Copyright (c) 2014, Matt Layman
import inspect
import os
import stat
import tempfile
import unittest
import mock
from handroll.composers import Composer
from handroll.composers import Composers
from handroll.composers import CopyComposer
from handroll.composers.atom import AtomComposer
from handroll.composers.generic import GenericHTMLComposer
from handroll.composers.md import MarkdownComposer
from handroll.composers.rst import ReStructuredTextComposer
from handroll.composers.sass import SassComposer
from handroll.composers.txt import TextileComposer
from handroll.exceptions import AbortError
class TestComposer(unittest.TestCase):
def test_compose_not_implemented(self):
composer = Composer()
self.assertRaises(
NotImplementedError, composer.compose, None, None, None)
class TestComposers(unittest.TestCase):
def test_selects_composer(self):
composers = Composers()
composer = composers.select_composer_for('sample.md')
self.assertTrue(isinstance(composer, MarkdownComposer))
class TestAtomComposer(unittest.TestCase):
def setUp(self):
site = tempfile.mkdtemp()
self.source_file = os.path.join(site, 'feed.atom')
open(self.source_file, 'w').close()
self.outdir = tempfile.mkdtemp()
self.output_file = os.path.join(self.outdir, 'feed.xml')
def test_composes_feed(self):
source = """{
"title": "Fakity Fake",
"id": "let's pretend this is unique",
"entries": [{
"title": "Sample A",
"updated": "2014-02-23T00:00:00",
"published": "2014-02-22T00:00:00",
"url": "http://some.website.com/a.html",
"summary": "A summary of the sample post"
}]
}"""
with open(self.source_file, 'w') as f:
f.write(source)
composer = AtomComposer()
composer.compose(None, self.source_file, self.outdir)
self.assertTrue(os.path.exists(self.output_file))
def test_must_have_entries(self):
source = """{
"title": "Fakity Fake",
"id": "let's pretend this is unique"
}"""
with open(self.source_file, 'w') as f:
f.write(source)
composer = AtomComposer()
self.assertRaises(
AbortError, composer.compose, None, self.source_file, self.outdir)
@mock.patch('handroll.composers.atom.json')
def test_skips_up_to_date(self, json):
open(self.output_file, 'w').close()
composer = AtomComposer()
composer.compose(None, self.source_file, self.outdir)
self.assertFalse(json.loads.called)
class TestCopyComposer(unittest.TestCase):
@mock.patch('handroll.composers.shutil')
def test_skips_same_files(self, shutil):
marker = 'marker.txt'
source = tempfile.mkdtemp()
source_file = os.path.join(source, marker)
outdir = tempfile.mkdtemp()
open(source_file, 'w').close()
open(os.path.join(outdir, marker), 'w').close()
composer = CopyComposer()
composer.compose(None, source_file, outdir)
self.assertFalse(shutil.copy.called)
@mock.patch('handroll.composers.shutil')
def test_copies_when_content_differs(self, shutil):
marker = 'marker.txt'
source = tempfile.mkdtemp()
source_file = os.path.join(source, marker)
outdir = tempfile.mkdtemp()
open(source_file, 'w').close()
with open(os.path.join(outdir, marker), 'w') as f:
f.write('something different')
composer = CopyComposer()
composer.compose(None, source_file, outdir)
self.assertTrue(shutil.copy.called)
class TestGenericHTMLComposer(unittest.TestCase):
def test_composes_file(self):
catalog = mock.MagicMock()
site = tempfile.mkdtemp()
source_file = os.path.join(site, 'sample.generic')
open(source_file, 'w').close()
outdir = ''
composer = GenericHTMLComposer()
self.assertRaises(
NotImplementedError,
composer.compose, catalog, source_file, outdir)
def test_selects_default_template(self):
catalog = mock.MagicMock()
default = mock.PropertyMock()
type(catalog).default = default
composer = GenericHTMLComposer()
composer.select_template(catalog, {})
self.assertTrue(default.called)
def test_selects_specified_template(self):
catalog = mock.MagicMock()
composer = GenericHTMLComposer()
composer.select_template(catalog, {'template': 'base.j2'})
catalog.get_template.assert_called_once_with('base.j2')
def test_gets_frontmatter(self):
source = inspect.cleandoc("""%YAML 1.1
---
title: A Fake Title
---
The Content
""")
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(source.encode('utf-8'))
composer = GenericHTMLComposer()
data, source = composer._get_data(f.name)
self.assertEqual('A Fake Title', data['title'])
self.assertEqual('The Content', source)
def test_needs_update(self):
site = tempfile.mkdtemp()
output_file = os.path.join(site, 'output.md')
open(output_file, 'w').close()
future = os.path.getmtime(output_file) + 1
source_file = os.path.join(site, 'test.md')
open(source_file, 'w').close()
os.utime(source_file, (future, future))
template = mock.MagicMock()
template.last_modified = future
composer = GenericHTMLComposer()
self.assertTrue(composer._needs_update(None, source_file, output_file))
past = future - 10
os.utime(source_file, (past, past))
self.assertTrue(
composer._needs_update(template, source_file, output_file))
template.last_modified = past
self.assertFalse(
composer._needs_update(template, source_file, output_file))
class TestMarkdownComposer(unittest.TestCase):
def test_generates_html(self):
source = '**bold**'
composer = MarkdownComposer()
html = composer._generate_content(source)
self.assertEqual('<p><strong>bold</strong></p>', html)
def test_composes_no_update(self):
site = tempfile.mkdtemp()
source_file = os.path.join(site, 'test.md')
open(source_file, 'w').close()
source_mtime = os.path.getmtime(source_file)
future = source_mtime + 1
outdir = tempfile.mkdtemp()
output_file = os.path.join(outdir, 'test.html')
open(output_file, 'w').close()
os.utime(output_file, (future, future))
template = mock.MagicMock()
template.last_modified = source_mtime
catalog = mock.MagicMock()
catalog.default = template
composer = MarkdownComposer()
composer.compose(catalog, source_file, outdir)
self.assertFalse(template.render.called)
class TestReStructuredTextComposer(unittest.TestCase):
def test_generates_html(self):
source = '**bold**'
composer = ReStructuredTextComposer()
html = composer._generate_content(source)
expected = '<div class="document">\n' \
'<p><strong>bold</strong></p>\n' \
'</div>\n'
self.assertEqual(expected, html)
class TestSassComposer(unittest.TestCase):
def _make_fake_sass_bin(self):
fake_bin = tempfile.mkdtemp()
fake_sass = os.path.join(fake_bin, 'sass')
with open(fake_sass, 'w') as f:
f.write('#!/usr/bin/env python')
st = os.stat(fake_sass)
os.chmod(fake_sass, st.st_mode | stat.S_IEXEC)
return fake_bin
def test_abort_with_no_sass(self):
"""Test that handroll aborts if ``sass`` is not installed."""
# The fake bin directory has no sass executable.
fake_bin = tempfile.mkdtemp()
self.assertRaises(AbortError, SassComposer, fake_bin)
def test_create(self):
fake_bin = self._make_fake_sass_bin()
composer = SassComposer(fake_bin)
self.assertTrue(isinstance(composer, SassComposer))
def test_build_command(self):
fake_bin = self._make_fake_sass_bin()
composer = SassComposer(fake_bin)
source_file = '/in/sassy.scss'
output_file = '/out/sass.css'
expected = [
os.path.join(fake_bin, 'sass'), '--style', 'compressed',
source_file, output_file]
actual = composer.build_command(source_file, output_file)
self.assertEqual(expected, actual)
@mock.patch('handroll.composers.sass.subprocess')
def test_failed_sass_aborts(self, subprocess):
fake_bin = self._make_fake_sass_bin()
composer = SassComposer(fake_bin)
source_file = '/in/sassy.scss'
output_dir = '/out'
subprocess.Popen.return_value.communicate.return_value = ('boom', '')
subprocess.Popen.return_value.returncode = 1
self.assertRaises(
AbortError, composer.compose, None, source_file, output_dir)
class TestTextileComposer(unittest.TestCase):
def test_generates_html(self):
source = '*bold*'
composer = TextileComposer()
html = composer._generate_content(source)
self.assertEqual('\t<p><strong>bold</strong></p>', html)
|
# coding: utf-8
# In[1]:
# Imports
import numpy as np
import random
import pickle
import os
from keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint
from keras.layers import Dense, Activation, LSTM, TimeDistributed, Dropout
from keras.optimizers import RMSprop
from keras.models import Sequential, load_model
import matplotlib.pyplot as plt
# In[3]:
# Parameters
max_length = 120
epochs = 100
batch_size = 256
# In[4]:
def get_bills(nb_bills=24, mix=True):
all_bills = os.listdir(os.path.join('..', 'data'))
if mix:
return random.sample(all_bills, nb_bills)
else:
return all_bills[:nb_bills]
# In[5]:
# Load Data
def load_data(bills, max_length=max_length, step=1):
chars = set()
sequences, next_chars = [], []
for bill_fn in bills:
with open(os.path.join('..', 'data', bill_fn), 'r') as bill_file:
bill = bill_file.read()
chars |= set(bill)
for i in range(0, len(bill) - max_length, step):
sequences.append(bill[i: i + max_length])
next_chars.append(bill[i + max_length])
sequences.append(bill[-max_length:])
next_chars.append('*END*')
chars = sorted(list(chars)) + ['*END*']
chr_to_int = dict((c, i) for i, c in enumerate(chars))
int_to_chr = dict((i, c) for i, c in enumerate(chars))
return (sequences, next_chars), (len(chars), chr_to_int, int_to_chr)
# In[6]:
# Process Data
def process_data(sequences, next_chars, num_chars, chr_to_int):
X = np.zeros((len(sequences), max_length, num_chars), dtype=np.bool)
y = np.zeros((len(sequences), num_chars), dtype=np.bool)
for i, sequence in enumerate(sequences):
for j, char in enumerate(sequence):
X[i, j, chr_to_int[char]] = 1
y[i, chr_to_int[next_chars[i]]] = 1
return X, y
# In[7]:
# Get Model
def get_model(num_chars, max_length=max_length):
model = Sequential()
model.add(LSTM(512, input_shape=(max_length, num_chars), return_sequences=True))
model.add(Dropout(0.5))
model.add(LSTM(512, return_sequences=True))
model.add(Dropout(0.5))
model.add(LSTM(512, return_sequences=False))
model.add(Dropout(0.5))
model.add(Dense(num_chars))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer=RMSprop(lr=0.001))
return model
# In[8]:
def sample(preds, temperature=1.0):
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
# In[9]:
# (Run) Load Data
if __name__ == "__main__":
bills = get_bills()
(sequences, next_chars), (num_chars, chr_to_int, int_to_chr) = load_data(bills)
with open(os.path.join('..', 'models', 'char-table.pickle'), 'wb') as table_file:
pickle.dump(chr_to_int, table_file)
X, y = process_data(sequences, next_chars, num_chars, chr_to_int)
print(X.shape, y.shape)
# In[9]:
# (Run) Train
if __name__ == "__main__":
model = get_model(num_chars)
save1 = ModelCheckpoint(os.path.join('..', 'models', 'bill-gen.h5'), # This will prob be overfit but could still be useful
monitor='loss',
verbose=0,
save_best_only=True)
save2 = ModelCheckpoint(os.path.join('..', 'models', 'bill-gen2.h5'),
monitor='val_loss',
verbose=0,
save_best_only=True)
history = model.fit(X, y,
batch_size=batch_size,
epochs=epochs,
validation_split=.1,
verbose=1,
callbacks=[save1, save2])
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.legend(['TrainLoss', 'TestLoss'])
plt.show()
# In[14]:
def generate_bill():
model = load_model(os.path.join('..', 'models', 'bill-gen2.h5'))
bill_fn = random.choice(os.listdir(os.path.join('..', 'data')))
with open(os.path.join('..', 'data', bill_fn), 'r') as bill_file:
rand_bill = bill_file.read()
new_bill = rand_bill[:max_length]
print("Starting w/:\n" + new_bill)
for i in range(15000):
X = np.zeros((1, max_length, num_chars))
for t, char in enumerate(new_bill[-max_length:]):
X[0, t, chr_to_int[char]] = 1
preds = model.predict(X, verbose=0)[0]
next_index = sample(preds, .6)
if int_to_chr[next_index] != '*END*' or "body" == new_bill[-4:]:
new_bill += int_to_chr[next_index]
else:
break
with open('test.html', 'w') as bill_file:
bill_file.write(new_bill)
# In[15]:
if __name__ == "__main__":
generate_bill()
|
# mouse_pos.py
# A module to calculate mouse position
import sys
def read_current_mouse_position():
""" Reads the current mouse position on the user's screen
:returns:
A tuple with the (x, y) coordinates of the cursor
"""
import pyautogui
pyautogui.FAILSAFE = False
return pyautogui.position()
def get_mouse_pos(new_x_coord, new_y_coord):
""" Gets the updated mouse position
:param new_x_coord:
The new x coordinate as reported by the controller
:param new_y_coord:
The new y coordinate as reported by the controller
"""
x_change = 0
y_change = 0
# if the joystick returned to its default position (0,0), stop mouse movement
if not (new_x_coord == 0 and new_y_coord == 0):
if new_x_coord == 0:
x_change = 0
else:
x_change = new_x_coord
if new_y_coord == 0:
y_change = 0
else:
y_change = -new_y_coord
return (int(x_change), int(y_change))
def get_absolute_pos(x, y, base):
""" Returns the absolute mouse position based on the mouse position of the joystick
:param x:
The new x
:param y:
The new y
:param base:
A tuple containing the base position
"""
# give a small deadzone
new_x = base[0] + (int(x / 2) if abs(x) > 2 else 0)
new_y = base[1] - (int(y / 2) if abs(y) > 2 else 0)
return (new_x, new_y)
|
# coding:utf-8
"""
某校在积极推行无人监考制度,但是总有学生是不自觉的,如果将两个很熟的异性朋友放在同一个考场里,他们就会交流甚至作弊。因此一个考场中不能允许两个很熟的异性朋友存在,学校希望通过搬出一部分学生的方法来改善这一问题。
但是又因为教室数量有限,因此希望一个教室中容下的学生尽可能多,即需要搬出教室的学生数量尽可能少,请你输出搬出教室人数最少,且字典序最小的方案。
输入
输入第一行有两个整数n和m,分别表示有n个男生和n个女生,有m个朋友关系。
(1<=n<=500,1<=m<=100000)
接下来m行,每行有两个整数,x和y,表示第x号男生和第y号女生是朋友。男生的编号均为[1,n],女生的编号为[n+1,2n]。
输出
输出第一行包含一个整数a,表示最少需要搬出教室的人数。
输出第二行有a个整数,即a个需要搬出教室的人的编号,要求人数最少,且字典序最小。
样例输入
2 2
1 3
1 4
样例输出
1
1
"""
import sys
# AC 36%
def solver(n, relations):
"""n表示有n个男生和n个女生,relations表示朋友关系"""
# 生成朋友关系图
friends = {}
for relation in relations:
for s in relation:
friends[s] = []
for nums in relations:
if nums[0] != nums[1]:
friends[nums[0]].append(nums[1])
friends[nums[1]].append(nums[0])
ret = []
while True:
rela = getMaxOutdegree(friends) # 获得最多朋友的学生编号, 朋友数大于0
if rela == -1:
break
ret.append(rela)
deleteNode(friends, rela) # 删除朋友关系
ret.sort()
return ret
def getMaxOutdegree(friends):
"""获得朋友数最多的学生编号
@returns -1 未找到
"""
# 根据字典排序
keys = sorted(friends.keys())
maxCount = 0
retS = -1
for s in keys:
if len(friends[s]) > maxCount:
maxCount = len(friends[s])
retS = s
return retS
def deleteNode(friends, rela):
"""删除朋友关系"""
for k,v in friends.items():
if rela in v:
index = v.index(rela)
v.pop(index)
friends[rela] = [] # 还需要删除当前朋友最多学生编号的关系
def printRet(ret):
print(len(ret))
if len(ret) == 0:
print(0)
for n in ret:
print(n)
def test():
n = 2
relations = [[1,3],
[1, 4]]
ret = solver(n, relations)
printRet(ret)
def inputs():
lines = input()
nums = list(map(int, lines.split(" ")))
relations = []
for i in range(nums[1]):
cur = list(map(int, input().split(" ")))
relations.append(cur)
ret = solver(nums[0], relations)
printRet(ret)
if __name__ == '__main__':
test()
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from app.models.base_model_ import Model
import re # noqa: F401,E501
from app import util
class RequestUser(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, username: str=None, plain_password: str=None, email: str=None, first_name: str=None, last_name: str=None): # noqa: E501
"""RequestUser - a model defined in Swagger
:param username: The username of this RequestUser. # noqa: E501
:type username: str
:param plain_password: The plain_password of this RequestUser. # noqa: E501
:type plain_password: str
:param email: The email of this RequestUser. # noqa: E501
:type email: str
:param first_name: The first_name of this RequestUser. # noqa: E501
:type first_name: str
:param last_name: The last_name of this RequestUser. # noqa: E501
:type last_name: str
"""
self.swagger_types = {
'username': str,
'plain_password': str,
'email': str,
'first_name': str,
'last_name': str
}
self.attribute_map = {
'username': 'username',
'plain_password': 'plain_password',
'email': 'email',
'first_name': 'first_name',
'last_name': 'last_name'
}
self._username = username
self._plain_password = plain_password
self._email = email
self._first_name = first_name
self._last_name = last_name
@classmethod
def from_dict(cls, dikt) -> 'RequestUser':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The RequestUser of this RequestUser. # noqa: E501
:rtype: RequestUser
"""
return util.deserialize_model(dikt, cls)
@property
def username(self) -> str:
"""Gets the username of this RequestUser.
:return: The username of this RequestUser.
:rtype: str
"""
return self._username
@username.setter
def username(self, username: str):
"""Sets the username of this RequestUser.
:param username: The username of this RequestUser.
:type username: str
"""
if username is None:
raise ValueError("Invalid value for `username`, must not be `None`") # noqa: E501
self._username = username
@property
def plain_password(self) -> str:
"""Gets the plain_password of this RequestUser.
:return: The plain_password of this RequestUser.
:rtype: str
"""
return self._plain_password
@plain_password.setter
def plain_password(self, plain_password: str):
"""Sets the plain_password of this RequestUser.
:param plain_password: The plain_password of this RequestUser.
:type plain_password: str
"""
if plain_password is None:
raise ValueError("Invalid value for `plain_password`, must not be `None`") # noqa: E501
self._plain_password = plain_password
@property
def email(self) -> str:
"""Gets the email of this RequestUser.
:return: The email of this RequestUser.
:rtype: str
"""
return self._email
@email.setter
def email(self, email: str):
"""Sets the email of this RequestUser.
:param email: The email of this RequestUser.
:type email: str
"""
if email is None:
raise ValueError("Invalid value for `email`, must not be `None`") # noqa: E501
self._email = email
@property
def first_name(self) -> str:
"""Gets the first_name of this RequestUser.
:return: The first_name of this RequestUser.
:rtype: str
"""
return self._first_name
@first_name.setter
def first_name(self, first_name: str):
"""Sets the first_name of this RequestUser.
:param first_name: The first_name of this RequestUser.
:type first_name: str
"""
self._first_name = first_name
@property
def last_name(self) -> str:
"""Gets the last_name of this RequestUser.
:return: The last_name of this RequestUser.
:rtype: str
"""
return self._last_name
@last_name.setter
def last_name(self, last_name: str):
"""Sets the last_name of this RequestUser.
:param last_name: The last_name of this RequestUser.
:type last_name: str
"""
self._last_name = last_name
|
from django.db.models import Q
from rest_framework.generics import ListCreateAPIView, ListAPIView, RetrieveAPIView, RetrieveUpdateDestroyAPIView, \
CreateAPIView
from rest_framework.permissions import IsAuthenticated, AllowAny
from news.models import News, Comment, UserNews
from news.serializers import NewsSerializer, NewsListSerializer, CommentSerializer, UserNewsSerializer
class NewsListView(ListAPIView):
permission_classes = [IsAuthenticated]
serializer_class = NewsListSerializer
def get_queryset(self):
return News.objects.filter(~Q(usernews__user=self.request.user))
class FavoriteNewsListView(ListAPIView):
permission_classes = [IsAuthenticated]
serializer_class = NewsListSerializer
def get_queryset(self):
return News.objects.filter(usernews__user=self.request.user, usernews__liked=True).distinct()
class ReadNewsListView(ListAPIView):
permission_classes = [IsAuthenticated]
serializer_class = NewsListSerializer
def get_queryset(self):
return News.objects.filter(usernews__user=self.request.user, usernews__read=True).distinct()
class NewsDetailView(RetrieveAPIView):
permission_classes = [IsAuthenticated]
serializer_class = NewsSerializer
queryset = News.objects.all()
lookup_field = 'pk'
class UserNewsCreateView(CreateAPIView):
permission_classes = [IsAuthenticated]
serializer_class = UserNewsSerializer
queryset = UserNews.objects.all()
class CommentCreateView(CreateAPIView):
permission_classes = [IsAuthenticated]
serializer_class = CommentSerializer
queryset = Comment.objects.all()
class CommentListView(ListAPIView):
permission_classes = [IsAuthenticated]
serializer_class = CommentSerializer
def get_queryset(self):
news_id = self.kwargs['pk']
return Comment.objects.filter(news_id=news_id)
class CommentDetailView(RetrieveUpdateDestroyAPIView):
permission_classes = [IsAuthenticated]
serializer_class = CommentSerializer
queryset = Comment.objects.all()
lookup_field = 'pk'
|
from importlib import import_module
import aiohttp
from disnake.ext import commands
import disnake
from typing import Optional
from .music.spotify import spotify_client
from utils.db import Database, LocalDatabase
import os
import traceback
from utils.music.models import music_mode
class BotCore(commands.Bot):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.session: Optional[aiohttp.ClientError] = None
self.db: Optional[LocalDatabase, Database] = None
self.spotify = spotify_client()
self.config = kwargs.pop('config', {})
self.music = music_mode(self)
self.session = aiohttp.ClientSession()
self.ws_users = {}
self.color = kwargs.pop("embed_color", None)
self.bot_ready = False
self.player_skins = {}
self.default_skin = self.config.get("DEFAULT_SKIN", "default")
self.load_skins()
self.commit = kwargs.get("commit", "N/A")
self.default_prefix = kwargs.get("default_prefix", None)
def load_skins(self):
for skin in os.listdir("./utils/music/skins"):
if not skin.endswith(".py"):
continue
try:
skin_file = import_module(f"utils.music.skins.{skin[:-3]}")
if not hasattr(skin_file, "load"):
print(f"Skin ignorada: {skin} | Função load() não configurada/encontrada...")
continue
self.player_skins[skin[:-3]] = skin_file.load
except Exception:
print(f"Falha ao carregar skin: {traceback.format_exc()}")
if not self.default_skin in self.player_skins:
self.default_skin = "default"
def check_skin(self, skin: str):
if not skin in self.player_skins:
return "default"
return skin
async def on_message(self, message: disnake.Message):
if message.content == f"<@{self.user.id}>" or message.content == f"<@!{self.user.id}>":
prefix = (await self.get_prefix(message))[-1]
embed = disnake.Embed(
description=f"Olá, meu prefixo atual é: **{prefix}**\n"
f"Caso queira ver meus comandos de texto use **{prefix}help**\n",
color=self.get_color(message.guild.me)
)
if self.slash_commands:
embed.description += f"Veja também meus comandos de barra usando: **/**"
await message.reply(embed=embed)
return
await self.process_commands(message)
def get_color(self, me: disnake.Member):
if self.color:
return self.color
if me.color.value == 0:
return 0x2F3136
return me.color
async def on_application_command(self, inter: disnake.ApplicationCommandInteraction):
if not self.bot_ready:
await inter.send("O bot ainda não está pronto para uso.", ephemeral=True)
return
if self.db:
# inter.user_data = await bot.db.get_data(inter.author.id, db_name="users")
inter.guild_data = await self.db.get_data(inter.guild.id, db_name="guilds")
else:
# inter.user_data = None
inter.guild_data = None
await self.process_application_commands(inter)
def load_modules(self):
modules_dir = "modules"
load_status = {
"reloaded": [],
"loaded": [],
"error": []
}
for item in os.walk(modules_dir):
files = filter(lambda f: f.endswith('.py'), item[-1])
for file in files:
filename, _ = os.path.splitext(file)
module_filename = os.path.join(modules_dir, filename).replace('\\', '.').replace('/', '.')
try:
self.reload_extension(module_filename)
print(f"{'=' * 50}\n[OK] {self.user} - {filename}.py Recarregado.")
load_status["reloaded"].append(filename)
except (commands.ExtensionAlreadyLoaded, commands.ExtensionNotLoaded):
try:
self.load_extension(module_filename)
print(f"{'=' * 50}\n[OK] {self.user} - {filename}.py Carregado.")
load_status["loaded"].append(filename)
except Exception:
print((f"{'=' * 50}\n[ERRO] {self.user} - Falha ao carregar/recarregar o módulo: {filename} | Erro:"
f"\n{traceback.format_exc()}"))
load_status["error"].append(filename)
except Exception:
print((f"{'=' * 50}\n[ERRO] {self.user} - Falha ao carregar/recarregar o módulo: {filename} | Erro:"
f"\n{traceback.format_exc()}"))
load_status["error"].append(filename)
print(f"{'=' * 50}")
return load_status
|
# coding: utf-8
import os
import numpy as np
import pandas as pd
import os.path as op
import subprocess as sp
import pytest
import sys
from collections import namedtuple
from nibabel.testing import data_path
import nibabel as nb
from fitlins.interfaces.nistats import prepare_contrasts
from fitlins.interfaces.afni import (
create_glt_test_info,
set_intents,
get_afni_design_matrix,
get_glt_rows,
get_afni_intent_info,
get_afni_intent_info_for_subvol,
)
ContrastInfo = namedtuple('ContrastInfo', ('name', 'conditions', 'weights', 'test', 'entities'))
def get_reml_bucket_test_data():
afni_test_dir = "/tmp/afni_reml_test_data"
base_url = "https://afni.nimh.nih.gov/pub/dist/data/afni_ci_test_data/"
brik_url = ".git/annex/objects/Xf/MM/MD5E-s9120000--fcf7ebb9679ae37c9db37c02a927193f.BRIK/MD5E-s9120000--fcf7ebb9679ae37c9db37c02a927193f.BRIK"
head_url = ".git/annex/objects/f4/62/MD5E-s12015--3c2c821cd46d0cd47ae32c1bb8f4d8cc.HEAD/MD5E-s12015--3c2c821cd46d0cd47ae32c1bb8f4d8cc.HEAD"
brick = op.join(afni_test_dir, "reml_bucket.BRIK")
os.makedirs(afni_test_dir, exist_ok=True)
if not os.path.exists(brick):
sp.run(f"wget -O {brick} {op.join(base_url,brik_url)}", shell=True)
head = op.join(afni_test_dir, "reml_bucket.HEAD")
if not os.path.exists(head):
sp.run(f"wget -O {head} {op.join(base_url,head_url)}", shell=True)
return nb.load(brick)
def test_get_afni_design_matrix():
entities = {
"space": "MNI152NLin2009cAsym",
"subject": "01",
"task": "rhymejudgment",
}
contrast_info = [
ContrastInfo(
'a_test',
['trial_type.pseudoword', 'trial_type.word'],
[[2, 5], [1, -5]],
'F',
entities,
)
]
design = pd.DataFrame(
{
"trial_type.pseudoword": [11.2, 1],
"trial_type.word": [20, -1],
"noise": [7, 7],
"drift": [1, 2],
}
)
contrast_info = [c._asdict().copy() for c in contrast_info]
contrasts = prepare_contrasts(contrast_info, design.columns.tolist())
t_r = 2
stim_labels = ["trial_type.pseudoword", "trial_type.word"]
stim_labels_with_tag = ['stim_' + sl for sl in stim_labels]
test_info = create_glt_test_info(design, contrasts)
design_vals = design.to_csv(sep=" ", index=False, header=False)
cols = "; ".join(design.columns)
expected = f"""\
# <matrix
# ni_type = "{design.shape[1]}*double"
# ni_dimen = "{design.shape[0]}"
# RowTR = "{t_r}"
# GoodList = "0..{design.shape[0] - 1}"
# NRowFull = "{design.shape[0]}"
# CommandLine = "{' '.join(sys.argv)}"
# ColumnLabels = "{cols}"
# {test_info}
# Nstim = 2
# StimBots = "0; 1"
# StimTops = "0; 1"
# StimLabels = "{'; '.join(stim_labels_with_tag)}"
# >
{design_vals}
# </matrix>
"""
expected = "\n".join([x.lstrip() for x in expected.splitlines()])
assert expected == get_afni_design_matrix(design, contrasts, stim_labels, t_r)
def test_create_glt_test_info():
entities = {
"space": "MNI152NLin2009cAsym",
"subject": "01",
"task": "rhymejudgment",
}
contrast_info = [
ContrastInfo(
'a_test',
['trial_type.pseudoword', 'trial_type.word'],
[[2, 5], [1, -5]],
'F',
entities,
)
]
design = pd.DataFrame(
{
"trial_type.pseudoword": [11.2, 1],
"trial_type.word": [20, -1],
"noise": [7, 7],
"drift": [1, 2],
}
)
contrast_info = [c._asdict().copy() for c in contrast_info]
contrasts = prepare_contrasts(contrast_info, design.columns.tolist())
expected = """
# Nglt = "1"
# GltLabels = "a_test"
# GltMatrix_000000 = "2; 4; 2; 5; 0; 0; 1; -5; 0; 0; "\
"""
assert expected == create_glt_test_info(design, contrasts)
def test_get_glt_rows():
wt_arrays = (np.array([[0, 1, -1]]), np.array([[0, 1, 1], [-1, -1, -1]]))
assert get_glt_rows(wt_arrays) == [
'GltMatrix_000000 = "1; 3; 0; 1; -1; "',
'GltMatrix_000001 = "2; 3; 0; 1; 1; -1; -1; -1; "',
]
def test_set_intents():
import nibabel as nb
img = nb.Nifti1Image(np.zeros(5), None)
img_list = set_intents([img], [("f test", (2, 10))])
expected = ("f test", (2.0, 10.0), "")
assert expected == img_list[0].header.get_intent()
def test_get_afni_intent_info_for_subvol():
brick = get_reml_bucket_test_data()
intent_info = get_afni_intent_info_for_subvol(brick, 1)
assert intent_info == ("t test", (420,))
def test_get_afni_intent_info():
img = nb.load(op.join(data_path, "example4d+orig.HEAD"))
img.header.info["BRICK_STATSYM"] = "Ftest(2,10);none;none"
expected = [("f test", (2, 10)), ("none", ()), ("none", ())]
assert expected == get_afni_intent_info(img)
img.header.info.pop("BRICK_STATSYM")
assert get_afni_intent_info(img) == [("none", ()), ("none", ()), ("none", ())]
img.header.info["BRICK_STATSYM"] = "Zscore();none;none"
assert get_afni_intent_info(img) == [("z score", ()), ("none", ()), ("none", ())]
with pytest.raises(ValueError):
img.header.info["BRICK_STATSYM"] = "Zscore();none"
get_afni_intent_info(img)
|
from .constants import PERSISTENCE_VERSION
from .inspect import inspect_model, accuracy, plot_training, save_stats, load_stats
from .linalg import column_vector, row_vector, row_vector_list, column_vector_list
from .logs import get_logger
from .preprocessing import split_training_test
from .typehints import Num
__all__ = [
'Num', 'get_logger', 'column_vector', 'row_vector', 'row_vector_list', 'column_vector_list', 'PERSISTENCE_VERSION',
'inspect_model', 'accuracy', 'split_training_test', 'plot_training', 'save_stats', 'load_stats'
]
|
'''FilterFiltering expressions.
These are generated by a __getitem__ call to an `Expr`, when
passed a non-numeric, non-tuple type, (e.g. x[idx_x]).
'''
import numpy as np
from .. import util, blob_ctx
from ..array import extent, tile, distarray
from ..core import LocalKernelResult
from ..util import Assert, join_tuple
from .base import Expr, ListExpr, TupleExpr
from . import base
from traits.api import Instance, PythonValue
class FilterExpr(Expr):
'''Represents an indexing operation.
Attributes:
src: `Expr` to index into
idx: `tuple` (for slicing) or `Expr` (for bool/integer indexing)
'''
src = Instance(Expr)
idx = PythonValue(None, desc="Tuple or Expr")
def __init__(self, *args, **kw):
super(FilterExpr, self).__init__(*args, **kw)
assert not isinstance(self.src, ListExpr)
assert not isinstance(self.idx, ListExpr)
assert not isinstance(self.idx, TupleExpr)
def compute_shape(self):
if isinstance(self.idx, (int, slice, tuple)):
src_shape = self.src.compute_shape()
ex = extent.from_shape(src_shape)
slice_ex = extent.compute_slice(ex, self.idx)
return slice_ex.shape
else:
raise base.NotShapeable
def _evaluate(self, ctx, deps):
src = deps['src']
idx = deps['idx']
assert not isinstance(idx, list)
util.log_debug('Evaluating index: %s', idx)
return eval_index(ctx, src, idx)
def _int_index_mapper(ex, src, idx, dst):
'''Kernel function for indexing via an integer array.
Iterate over entries in ``idx`` and fetch the values
from ``src``, writing into ``dst``.
Args:
ex: `Extent` to process.
src (DistArray):
idx (DistArray):
dst (DistArray):
'''
idx_vals = idx.fetch(extent.drop_axis(ex, -1))
output = []
for dst_idx, src_idx in enumerate(idx_vals):
output.append(src.select(src_idx))
output_ex = extent.create(
([ex.ul[0]] + [0] * (len(dst.shape) - 1)),
([ex.lr[0]] + list(output[0].shape)),
(dst.shape))
#util.log_info('%s %s', output_ex.shape, np.array(output).shape)
output_tile = tile.from_data(np.array(output))
tile_id = blob_ctx.get().create(output_tile).wait().tile_id
return LocalKernelResult(result=[(output_ex, tile_id)])
def _bool_index_mapper(ex, src, idx):
'''Kernel function for boolean indexing.
Fetches the input file from ``src`` and applies the mask from ``idx``.
Args:
ex: `Extent` to process.
src (DistArray):
idx (DistArray):
'''
val = src.fetch(ex)
mask = idx.fetch(ex)
#util.log_info('\nVal: %s\n Mask: %s', val, mask)
masked_val = np.ma.masked_array(val, mask)
output_tile = tile.from_data(masked_val)
tile_id = blob_ctx.get().create(output_tile).wait().tile_id
return LocalKernelResult(result=[(ex, tile_id)])
def eval_index(ctx, src, idx):
'''
Index an array by another array (boolean or integer).
Args:
ctx: `BlobCtx`
src: :py:class:`DistArray` to read from
idx: `DistArray` of bool or integer index.
Returns:
DistArray: The result of src[idx]
'''
Assert.isinstance(idx, (np.ndarray, distarray.DistArray))
if idx.dtype == np.bool or idx.dtype == np.bool_:
# return a new array masked by `idx`
dst = src.map_to_array(_bool_index_mapper, kw={'src': src, 'idx': idx})
return dst
else:
util.log_info('Integer indexing...')
Assert.eq(len(idx.shape), 1)
# create empty destination of same first dimension as the index array
dst = distarray.create(join_tuple([idx.shape[0]], src.shape[1:]), dtype=src.dtype)
# map over it, fetching the appropriate values for each tile.
return dst.map_to_array(_int_index_mapper, kw={'src': src, 'idx': idx, 'dst': dst})
|
from setuptools import setup, find_packages
setup(
name="been",
description="A life stream collector.",
version="0.1",
author="Max Goodman",
author_email="c@chromakode.com",
keywords="feed lifestream",
license="BSD",
classifiers=[
"Programming Language :: Python",
"Topic :: Internet :: WWW/HTTP",
],
packages=find_packages(),
install_requires=[
"feedparser",
"markdown",
"CouchDB",
"redis",
],
extras_require={
"twitter": ["python-twitter"],
},
entry_points={
"console_scripts": [
"been = been.cli:main",
],
}
)
|
class WarehouseClickAndCollectOption:
DISABLED = "disabled"
LOCAL_STOCK = "local"
ALL_WAREHOUSES = "all"
CHOICES = [
(DISABLED, "Disabled"),
(LOCAL_STOCK, "Local stock only"),
(ALL_WAREHOUSES, "All warehouses"),
]
|
import socket
use_internet_socket = socket.AF_INET
use_tcp_protocol = socket.SOCK_STREAM
with socket.socket(use_internet_socket, use_tcp_protocol) as s:
host = 'bangbangcon.com'
port = 80
s.connect((host, port))
s.sendall(b"GET / HTTP/1.1\r\n")
s.sendall(b"Host: bangbangcon.com\r\n")
s.sendall(b"Connection: close\r\n")
s.sendall(b"\r\n")
response = s.recv(2048)
print("✨ i travelled across the internet to get you this 👇 ✨")
print(response.decode("ascii"))
|
from rest_framework.permissions import BasePermission
class IsAuthenticatedOrOptionsOnly(BasePermission):
"""Доступ если пользователь авторизирован, или
если метод OPTIONS"""
def has_permission(self, request, view):
return bool(
request.method == "OPTIONS" or
request.user and
request.user.is_authenticated
)
|
from cifar10_data_random_labels import CIFAR10RandomLabels
from cifar10_data_subset import CIFAR10Subset
from torchvision import datasets, transforms
import torch
def get_loader(args):
CIFAR10_mean, CIFAR10_var = (0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)
transform_train = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(CIFAR10_mean, CIFAR10_var),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(CIFAR10_mean, CIFAR10_var),
])
if args.random_labels:
kwargs = {'num_workers': 2, 'pin_memory': True}
## Here, we try with shuffle or not shuffling data
train_loader = torch.utils.data.DataLoader(
CIFAR10RandomLabels(root='../../data/random_labels', train=True, download=True,
label_path = args.random_label_path,
transform=transform_train, num_classes=args.num_classes,
corrupt_prob=args.label_corrupt_prob, trainset = True),
batch_size=args.train_bs, shuffle=args.shuffle_random_data, **kwargs)
test_loader = torch.utils.data.DataLoader(
CIFAR10RandomLabels(root='../../data/random_labels', train=False,
label_path = args.random_label_path_test,
transform=transform_test, num_classes=args.num_classes,
corrupt_prob=args.label_corrupt_prob, trainset = False, test_on_noise=args.test_on_noise),
batch_size=args.test_bs, shuffle=False, **kwargs)
elif args.data_subset:
kwargs = {'num_workers': 2, 'pin_memory': True}
train_set = CIFAR10Subset(root='../../data', train=True, download=True, subset=args.subset,
label_path = args.random_label_path, corrupt_prob=args.label_corrupt_prob,
trainset = True, subset_noisy=args.subset_noisy,
transform=transform_train)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.train_bs, shuffle=True, **kwargs)
if args.subset_noisy and args.test_on_noise:
test_set = CIFAR10RandomLabels(root='../../data/random_labels', train=False,
label_path = args.random_label_path_test,
transform=transform_test, num_classes=args.num_classes,
corrupt_prob=args.label_corrupt_prob, trainset = False, test_on_noise=args.test_on_noise)
else:
test_set = datasets.CIFAR10(root='../../data', train=False, download=True, transform=transform_test)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=args.test_bs, shuffle=False)
else:
trainset = datasets.CIFAR10(root='../../data', train=True, download=True, transform=transform_train)
train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.train_bs, shuffle=True)
testset = datasets.CIFAR10(root='../../data', train=False, download=True, transform=transform_test)
test_loader = torch.utils.data.DataLoader(testset, batch_size=args.test_bs, shuffle=False)
return train_loader, test_loader
|
## @ingroup Core
# redirect.py
#
# Created: Aug 2015, T. Lukacyzk
# Modified: Feb 2016, T. MacDonald
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
import os, sys, shutil, copy
# -------------------------------------------------------------------
# Output Redirection
# -------------------------------------------------------------------
## @ingroup Core
class output(object):
""" Temporarily redirects sys.stdout and sys.stderr when used in
a 'with' contextmanager
Example:
with SU2.io.redirect_output('stdout.txt','stderr.txt'):
sys.stdout.write("standard out")
sys.stderr.write("stanrard error")
# code
#: with output redirection
Inputs:
stdout - None, a filename, or a file stream
stderr - None, a filename, or a file stream
None will not redirect outptut
Source:
http://stackoverflow.com/questions/6796492/python-temporarily-redirect-stdout-stderr
"""
def __init__(self, stdout=None, stderr=None):
""" Initializes a new output() class
Assumptions:
N/A
Source:
N/A
Inputs:
N/A
Outputs:
N/A
Properties Used:
N/A
"""
_newout = False
_newerr = False
if isinstance(stdout,str):
stdout = open(stdout,'a')
_newout = True
if isinstance(stderr,str):
stderr = open(stderr,'a')
_newerr = True
self._stdout = stdout or sys.stdout
self._stderr = stderr or sys.stderr
self._newout = _newout
self._newerr = _newerr
def __enter__(self):
self.old_stdout, self.old_stderr = sys.stdout, sys.stderr
self.old_stdout.flush(); self.old_stderr.flush()
sys.stdout, sys.stderr = self._stdout, self._stderr
def __exit__(self, exc_type, exc_value, traceback):
self._stdout.flush(); self._stderr.flush()
sys.stdout = self.old_stdout
sys.stderr = self.old_stderr
if self._newout:
self._stdout.close()
if self._newerr:
self._stderr.close()
# -------------------------------------------------------------------
# Folder Redirection
# -------------------------------------------------------------------
## @ingroup Core
class folder(object):
""" Temporarily redirects to a working folder, pulling
and pushing needed files
Example:
folder = 'temp'
pull = ['file1.txt','file2.txt']
link = ['file3.big']
force = True
# original path
import os
print os.getcwd()
# enter folder
with SU2.io.redirect_folder(folder,pull,link,force) as push:
print os.getcwd()
# code
push.append('file4.txt')
#: with folder redirection
# returned to original path
print os.getcwd()
Inputs:
folder - working folder, relative or absolute
pull - list of files to pull (copy to working folder)
link - list of files to link (symbolic link in working folder)
force - True/False overwrite existing files in working folder
Targets:
push - list of files to push (copy to originating path)
Notes:
push must be appended or extended, not overwritten
links in Windows not supported, will simply copy
"""
def __init__(self, folder, pull=None, link=None, force=True ):
''' folder redirection initialization
see help( folder ) for more info
'''
if pull is None: pull = []
if link is None: link = []
if not isinstance(pull,list) : pull = [pull]
if not isinstance(link,list) : link = [link]
origin = os.getcwd()
origin = os.path.abspath(origin).rstrip('/')+'/'
folder = os.path.abspath(folder).rstrip('/')+'/'
self.origin = origin
self.folder = folder
self.pull = copy.deepcopy(pull)
self.push = []
self.link = copy.deepcopy(link)
self.force = force
def __enter__(self):
origin = self.origin # absolute path
folder = self.folder # absolute path
pull = self.pull
push = self.push
link = self.link
force = self.force
# check for no folder change
if folder == origin:
return []
# check, make folder
if not os.path.exists(folder):
os.makedirs(folder)
# copy pull files
for name in pull:
old_name = os.path.abspath(name)
new_name = os.path.split(name)[-1]
new_name = os.path.join(folder,new_name)
if old_name == new_name: continue
if os.path.exists( new_name ):
if force: os.remove( new_name )
else: continue
shutil.copy(old_name,new_name)
# make links
for name in link:
old_name = os.path.abspath(name)
new_name = os.path.split(name)[-1]
new_name = os.path.join(folder,new_name)
if old_name == new_name: continue
if os.path.exists( new_name ):
if force: os.remove( new_name )
else: continue
make_link(old_name,new_name)
# change directory
os.chdir(folder)
# return empty list to append with files to push to super folder
return push
def __exit__(self, exc_type, exc_value, traceback):
origin = self.origin
folder = self.folder
push = self.push
force = self.force
# check for no folder change
if folder == origin:
return
# move assets
for name in push:
old_name = os.path.abspath(name)
name = os.path.split(name)[-1]
new_name = os.path.join(origin,name)
# links
if os.path.islink(old_name):
source = os.path.realpath(old_name)
if source == new_name: continue
if os.path.exists( new_name ):
if force: os.remove( new_name )
else: continue
make_link(source,new_name)
# moves
else:
if old_name == new_name: continue
if os.path.exists( new_name ):
if force: os.remove( new_name )
else: continue
shutil.move(old_name,new_name)
# change directory
os.chdir(origin)
def make_link(src,dst):
""" make_link(src,dst)
makes a relative link
Inputs:
src - source file
dst - destination to place link
Windows links currently unsupported, will copy file instead
"""
assert os.path.exists(src) , 'source file does not exist \n%s' % src
if os.name == 'nt':
# can't make a link in windows, need to look for other options
if os.path.exists(dst): os.remove(dst)
shutil.copy(src,dst)
else:
# find real file, incase source itself is a link
src = os.path.realpath(src)
# normalize paths
src = os.path.normpath(src)
dst = os.path.normpath(dst)
# check for self referencing
if src == dst: return
# find relative folder path
srcfolder = os.path.join( os.path.split(src)[0] ) + '/'
dstfolder = os.path.join( os.path.split(dst)[0] ) + '/'
srcfolder = os.path.relpath(srcfolder,dstfolder)
src = os.path.join( srcfolder, os.path.split(src)[1] )
# make unix link
if os.path.exists(dst): os.remove(dst)
os.symlink(src,dst)
|
import re
import sys
from setuptools import setup, find_packages
setup(
name='vaultauth',
version='0.0.1',
author='Joakim Uddholm',
author_email='tethik@gmail.com',
description='Program to authenticate into vault',
url='https://github.com/Tethik/lame-cli-programs',
py_modules=['vaultauth'],
entry_points = {
'console_scripts': ['vaultauthpy=vaultauth:main'],
},
scripts = ['bin/vaultauth'],
package_data={'': ['README.md']},
include_package_data=True,
install_requires=[
"hvac",
"keyring",
"keyrings.alt"
]
)
|
# Generated by Django 2.2.2 on 2019-06-25 02:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0007_auto_20190624_1405'),
]
operations = [
migrations.AddField(
model_name='post',
name='date',
field=models.DateTimeField(auto_now_add=True, null=True),
),
]
|
#!/usr/bin/env python
import argparse
import contextlib
from collections import defaultdict
import string
import sys
import os
def main():
script_path = os.path.realpath(__file__)
script_dir = os.path.dirname(script_path)
default_input = os.path.join(
script_dir, "UnitTests", "TestData", "gen", "completion"
)
default_output = os.path.join(script_dir, "LanguageServer", "Test", "GenTests.cs")
parser = argparse.ArgumentParser(
description="Generate completion and hover tests",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--ignore",
type=str,
help="comma separated list of tests to disable, of the form <filename>(:<linenum>)",
)
parser.add_argument(
"--only", type=str, help="comma separated list of tests to generate"
)
parser.add_argument(
"-o",
"--out",
nargs="?",
type=argparse.FileType("w"),
default=default_output,
help="output file",
)
parser.add_argument(
"-i",
"--input",
type=str,
default=default_input,
help="location of completions directory",
)
parser.add_argument(
"--table",
nargs="?",
type=argparse.FileType("w"),
default=os.devnull,
help="file to write test names to",
)
args = parser.parse_args()
if args.only:
to_generate = set(args.only.split(","))
else:
to_generate = set(DEFAULT_TEST_FILES)
line_skip = defaultdict(set)
if args.ignore:
for i in args.ignore.split(","):
if ":" not in i:
to_generate.discard(i)
else:
name, line = i.split(":")
try:
line = int(line)
except:
print(f"error in format of ignored item {i}", file=sys.stderr)
return
line_skip[name].add(line)
to_generate = sorted(to_generate)
with contextlib.redirect_stdout(args.out):
print(PREAMBLE)
for name in to_generate:
filename = os.path.join(args.input, name + ".py")
ignored_lines = line_skip[name]
create_tests(name, filename, ignored_lines, args.table)
print(POSTAMBLE)
def create_tests(name, filename, ignored_lines, table_f):
camel_name = snake_to_camel(name)
with open(filename) as fp:
lines = fp.read().splitlines()
width = len(str(len(lines)))
tests = []
for i, line in enumerate(lines):
if i in ignored_lines:
continue
line: str = line.strip()
if not line.startswith("#?"):
continue
line = line[2:].strip()
next_line = lines[i + 1]
col = len(next_line)
if " " in line:
maybe_num = line.split(" ", 1)
try:
col = int(maybe_num[0])
line = maybe_num[1]
except ValueError:
pass
filt = next_line[:col].lstrip()
filt = rightmost_token(filt, ". {[(\t@")
args = line.strip()
func_name = "Line_{0:0{pad}}".format(i + 1, pad=width)
func_name = camel_name + "_" + func_name
is_completion = args.startswith("[")
func_name += "_Completion" if is_completion else "_Hover"
tmpl = COMPLETION_TEST if is_completion else HOVER_TEST
print(func_name, file=table_f)
tests.append(
tmpl.format(
name=func_name,
module=csharp_str(name),
line=i + 1,
col=col,
args=csharp_str(args),
filter=csharp_str(filt),
)
)
if tests:
print(CLASS_PREAMBLE.format(name=camel_name))
for t in tests:
print(t)
print(CLASS_POSTAMBLE)
DEFAULT_TEST_FILES = [
"arrays",
"async_",
"basic",
"classes",
"completion",
"complex",
"comprehensions",
"context",
"decorators",
"definition",
"descriptors",
"docstring",
"dynamic_arrays",
"dynamic_params",
"flow_analysis",
"fstring",
"functions",
"generators",
"imports",
"invalid",
"isinstance",
"keywords",
"lambdas",
"named_param",
"on_import",
"ordering",
"parser",
"pep0484_basic",
"pep0484_comments",
"pep0484_typing",
"pep0526_variables",
"precedence",
"recursion",
"stdlib",
"stubs",
"sys_path",
"types",
]
PREAMBLE = """// Python Tools for Visual Studio
// Copyright(c) Microsoft Corporation
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the License); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at http://www.apache.org/licenses/LICENSE-2.0
//
// THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS
// OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY
// IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
// MERCHANTABILITY OR NON-INFRINGEMENT.
//
// See the Apache Version 2.0 License for specific language governing
// permissions and limitations under the License.
using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using FluentAssertions;
using Microsoft.Python.Analysis;
using Microsoft.Python.Analysis.Analyzer;
using Microsoft.Python.Analysis.Core.Interpreter;
using Microsoft.Python.Analysis.Documents;
using Microsoft.Python.Core.Idle;
using Microsoft.Python.Core.Services;
using Microsoft.Python.Core.Text;
using Microsoft.Python.LanguageServer;
using Microsoft.Python.LanguageServer.Completion;
using Microsoft.Python.LanguageServer.Sources;
using Microsoft.Python.LanguageServer.Tests;
using Microsoft.Python.Parsing;
using Microsoft.Python.Parsing.Tests;
using Microsoft.VisualStudio.TestTools.UnitTesting;
using NSubstitute;
using TestUtilities;
namespace GenTests {"""
POSTAMBLE = """
public class GenTest : LanguageServerTestBase {
private static readonly Dictionary<string, Task<IDocumentAnalysis>> _analysis = new Dictionary<string, Task<IDocumentAnalysis>>();
private static readonly InterpreterConfiguration _interpreter = PythonVersions.LatestAvailable3X;
private static readonly PythonLanguageVersion _version = _interpreter.Version.ToLanguageVersion();
private static readonly CompletionSource _cs = new CompletionSource(new PlainTextDocumentationSource(), ServerSettings.completion);
private static readonly HoverSource _hs = new HoverSource(new PlainTextDocumentationSource());
static GenTest() {
_interpreter.TypeshedPath = TestData.GetDefaultTypeshedPath();
}
protected async Task<IDocumentAnalysis> GetGenAnalysisAsync(string module) {
// Create an analyzer per module. This is slower than creating a single
// analyzer shared between all GenTest instances, but sharing them makes
// the "types" module fail (due to a bug where the name "types" shadows
// a builtin module name).
if (_analysis.TryGetValue(module, out var analysis)) {
return await analysis;
}
var root = TestData.GetPath("TestData", "gen", "completion");
var sm = CreateServiceManager();
sm.AddService(new PythonAnalyzer(sm));
sm.AddService(await PythonInterpreter.CreateAsync(_interpreter, root, sm));
sm.AddService(new RunningDocumentTable(sm));
var src = TestData.GetPath("TestData", "gen", "completion", module + ".py");
analysis = GetAnalysisAsync(File.ReadAllText(src), sm, modulePath: src);
_analysis[module] = analysis;
return await analysis;
}
protected async Task DoCompletionTest(string module, int lineNum, int col, string args, string filter) {
filter = filter.ToLowerInvariant();
var tests = string.IsNullOrWhiteSpace(args) ? new List<string>() : ParseStringList(args).Select(s => s.ToLowerInvariant()).ToList();
var analysis = await GetGenAnalysisAsync(module);
var res = _cs.GetCompletions(analysis, new SourceLocation(lineNum + 1, col + 1));
var items = res?.Completions?.Select(item => item.insertText.ToLowerInvariant())
.Where(t => t.ToLowerInvariant().Contains(filter))
.ToList() ?? new List<string>();
if (tests.Count == 0) {
items.Should().BeEmpty();
} else {
items.Should().Contain(tests);
}
}
protected async Task DoHoverTest(string module, int lineNum, int col, string args) {
var tests = string.IsNullOrWhiteSpace(args)
? new List<string>()
: args.Split(' ', options: StringSplitOptions.RemoveEmptyEntries).Select(s => s.EndsWith("()") ? s.Substring(0, s.Length - 2) : s).ToList();
var analysis = await GetGenAnalysisAsync(module);
var res = _hs.GetHover(analysis, new SourceLocation(lineNum + 1, col + 1));
if (tests.Count == 0) {
res?.contents.value.Should().BeEmpty();
} else {
res.Should().NotBeNull();
res.contents.value.Should().ContainAll(tests);
}
}
protected List<string> ParseStringList(string s) {
var list = new List<string>();
using (var reader = new StringReader(s)) {
var tokenizer = new Tokenizer(_version);
tokenizer.Initialize(reader);
while (!tokenizer.IsEndOfFile) {
var token = tokenizer.GetNextToken();
if (token.Kind == TokenKind.EndOfFile) {
break;
}
switch (token.Kind) {
case TokenKind.Constant when token != Tokens.NoneToken && (token.Value is string || token.Value is AsciiString):
list.Add(token.Image);
break;
}
}
}
return list;
}
}
}
"""
CLASS_PREAMBLE = """ [TestClass]
public class {name}Tests : GenTest {{
public TestContext TestContext {{ get; set; }}
[TestInitialize]
public void TestInitialize() => TestEnvironmentImpl.TestInitialize($"{{TestContext.FullyQualifiedTestClassName}}.{{TestContext.TestName}}");
[TestCleanup]
public void TestCleanup() => TestEnvironmentImpl.TestCleanup();"""
CLASS_POSTAMBLE = """
}"""
COMPLETION_TEST = """
[TestMethod, Priority(0)] public async Task {name}() => await DoCompletionTest({module}, {line}, {col}, {args}, {filter});"""
HOVER_TEST = """
[TestMethod, Priority(0)] public async Task {name}() => await DoHoverTest({module}, {line}, {col}, {args});"""
def snake_to_camel(s):
return string.capwords(s, "_").replace("_", "")
def rightmost_token(s, cs):
for c in cs:
i = s.rfind(c)
if i != -1:
s = s[i + 1 :]
return s
def csharp_str(s):
if s is None:
return "null"
s = s.replace('"', '""')
return '@"{}"'.format(s)
if __name__ == "__main__":
main()
|
from urllib.request import urlopen
with urlopen('https://www.bred-it.com/') as response :
for line in response :
line = line.decode('utf-8')
print(line)
# if 'EST' in line or 'EDT' in line :
# print(line)
|
import logging
import shutil
import subprocess
import sys
from pathlib import Path
from redash import extensions
from redash.tasks import periodic_job_definitions
from tests import BaseTestCase
logger = logging.getLogger(__name__)
dummy_extension = "redash-dummy"
this_dir = Path(__file__).parent.resolve()
app_dir = this_dir.parent.parent
dummy_path = str(this_dir / dummy_extension)
test_bundle = (
app_dir / "client" / "app" / "extensions" / "wide_footer" / "WideFooter.jsx"
)
class TestExtensions(BaseTestCase):
@classmethod
def setUpClass(cls):
sys.path.insert(0, dummy_path)
@classmethod
def tearDownClass(cls):
sys.path.remove(dummy_path)
def test_working_extension(self):
self.assertIn("working_extension", extensions.extensions.keys())
self.assertEqual(
extensions.extensions.get("working_extension"), "extension loaded"
)
def test_assertive_extension(self):
self.assertNotIn("assertive_extension", extensions.extensions.keys())
def test_not_findable_extension(self):
self.assertNotIn("not_findable_extension", extensions.extensions.keys())
def test_not_importable_extension(self):
self.assertNotIn("not_importable_extension", extensions.extensions.keys())
def test_non_callable_extension(self):
self.assertNotIn("non_callable_extension", extensions.extensions.keys())
def test_dummy_periodic_task(self):
# need to load the periodic tasks manually since this isn't
# done automatically on test suite start but only part of
# the worker configuration
extensions.load_periodic_jobs(logger)
self.assertIn("dummy_periodic_job", extensions.periodic_jobs.keys())
def test_dummy_periodic_task_definitions(self):
jobs = periodic_job_definitions()
from redash_dummy.jobs import job_callback
self.assertIn(job_callback, [job.get("func", None) for job in jobs])
class TestBundles(BaseTestCase):
@classmethod
def setUpClass(cls):
# Install the redash-dummy package temporarily using pip
# in the user's local site package directory under ~/.local/
subprocess.call(["pip", "install", "--user", dummy_path])
@classmethod
def tearDownClass(cls):
subprocess.call(["pip", "uninstall", "-y", "redash-dummy"])
def test_bundle_extensions(self):
# cleaning up after running bundle-extensions again
self.addCleanup(lambda: shutil.rmtree(test_bundle.parent))
assert not test_bundle.exists()
subprocess.run(str(app_dir / "bin" / "bundle-extensions"), check=True)
assert test_bundle.exists()
|
"""
word2vec_2.py:
1.获取整合后的文档集合doc_set.txt
2.借助gensim包训练Word2Vec模型获得词向量:
word2vec.txt ------------ 词向量(24094*200)
vocab_set.txt ----------- 语料库词汇表
"""
from gensim.models.word2vec import LineSentence, Word2Vec
import os
PATH_DATA_SOURCE = './data/clause_keywords_emotion.txt'
PATH_DOC_SET = './data/doc_set.txt'
PATH_WORD_VEC_MODEL = './data/word2vec'
PATH_WORD_VEC_TXT = './data/word_vec.txt'
PATH_VOCAB_SET = './data/vocab_set.txt'
# ======================= 将fin中内容非重复的文档逐一写入fout中 =======================================================
def get_doc_set(fin, fout):
# doc集合
doc_set = []
# 保存文档doc总数
doc_num = 0
# 保存数据集中子句最大长度,即单个子句最大单词数目
max_clause_len = 0
# 保存前一子句的index
idx_clause_pre = 0
# 保存当前doc的内容
doc = ''
# 保存数据集中doc最大长度,即所有子句中最大的index
max_doc_len = 0
# 保存不重复doc中的子句总数
num_lines = 0
# 当前doc中的子句总数
cur_lines = 0
for line in fin:
# 移除每条数据中开头或结尾的空格和换行符
line = line.strip().split(',')
if not line:
continue
# 求单个doc中子句最大数目
if int(line[2]) > max_doc_len:
max_doc_len = int(line[2])
# line[0]:该段doc的index
# 与前一子句在同一doc内
if line[0] == idx_clause_pre:
cur_lines += 1
doc = doc + ' ' + line[-1].strip()
# 求单个子句的最大长度
if len(line[-1].strip()) > max_clause_len:
max_clause_len = len(line[-1].strip())
else:
# 与已保存的doc内容不同, 则存入doc_set内
if doc not in doc_set and idx_clause_pre != 0:
doc_set.append(doc)
doc_num += 1
num_lines += cur_lines
# 更新当前状态信息
cur_lines = 1
doc = line[-1].strip()
idx_clause_pre = line[0]
# 写入最后一条doc
if doc not in doc_set:
doc_set.append(doc)
doc_num += 1
num_lines += cur_lines
# 将doc逐一写入fout中
for doc in doc_set:
fout.write(doc + '\n')
# # 单个子句最大长度(单词个数)
# print(max_clause_len) # 117
# # 单个文本最大长度(子句个数)
# print(max_doc_len) # 73
# # 子句总数
# print(num_lines) # 28553
return doc_num
if __name__ == "__main__":
# 整合训练数据中的文本
if not os.path.exists(PATH_DOC_SET):
with open(PATH_DOC_SET, 'wt', encoding='utf-8') as fout:
with open(PATH_DATA_SOURCE, 'rt', encoding='utf-8') as fin:
# 返回文档总数
doc_num = get_doc_set(fin, fout)
# print(doc_num) # 1933
# 加载语料
sentences = LineSentence(PATH_DOC_SET)
model = Word2Vec(sentences, size=200, sg=1, min_count=1)
model.save(PATH_WORD_VEC_MODEL)
# word2vec.txt:词向量表(24094*200)
# vocab.txt:保存语料库中所有词
model.wv.save_word2vec_format(PATH_WORD_VEC_TXT, fvocab=PATH_VOCAB_SET)
|
"""
This is the command-line interface for the NTCIR-10 Math converter package.
"""
from argparse import ArgumentParser
import logging
from logging import getLogger
from pathlib import Path
from sys import stdout
from .converter import convert_judgements, get_judged_identifiers, process_dataset
LOG_FILE = Path("__main__.log")
LOG_FORMAT = "%(asctime)s : %(levelname)s : %(message)s"
ROOT_LOGGER = getLogger()
LOGGER = getLogger(__name__)
def main():
""" Main entry point of the app """
ROOT_LOGGER.setLevel(logging.DEBUG)
file_handler = logging.StreamHandler(LOG_FILE.open("wt"))
formatter = logging.Formatter(LOG_FORMAT)
file_handler.setFormatter(formatter)
file_handler.setLevel(logging.DEBUG)
ROOT_LOGGER.addHandler(file_handler)
terminal_handler = logging.StreamHandler(stdout)
terminal_handler.setFormatter(formatter)
terminal_handler.setLevel(logging.INFO)
ROOT_LOGGER.addHandler(terminal_handler)
LOGGER.debug("Parsing command-line arguments")
parser = ArgumentParser(
description="""
Convert NTCIR-10 Math XHTML5 dataset and relevance judgements to the NTCIR-11 Math-2,
and NTCIR-12 MathIR XHTML5 format.
""")
parser.add_argument(
"--dataset", nargs='+', required=True, type=Path, help="""
A path to a directory containing the NTCIR-10 Math XHTML5 dataset, and a path to a
non-existent directory that will contain resulting dataset in the NTCIR-11 Math-2, and
NTCIR-12 MathIR XHTML5 format. If only the path to the NTCIR-10 Math dataset is
specified, the dataset will be read to find out the mapping between element identifiers,
and paragraph identifiers. This is required for converting the relevance judgements.
""")
parser.add_argument(
"--judgements", nargs='+', type=Path, help="""
Paths to the files containing NTCIR-10 Math relevance judgements (odd arguments),
followed by paths to the files that will contain resulting relevance judgements in the
NTCIR-11 Math-2, and NTCIR-12 MathIR format (even arguments).
""")
parser.add_argument(
"--num-workers", type=int, default=1, help="""
The number of processes that will be used for processing the NTCIR-10 Math dataset.
Defaults to %(default)d.
""")
args = parser.parse_args()
LOGGER.debug("Performing sanity checks on the command-line arguments")
input_dir = args.dataset[0]
output_dir = args.dataset[1] / Path("xhtml5") if len(args.dataset) > 1 else None
assert input_dir.exists() and input_dir.is_dir()
if output_dir:
assert not output_dir.exists()
if args.judgements:
assert len(args.judgements) % 2 == 0
assert args.num_workers > 0
if args.judgements:
judged_identifiers = {}
for i in range(len(args.judgements) // 2):
input_file = args.judgements[2 * i]
LOGGER.info(
"Retrieving judged document names, and element identifiers from %s", input_file)
with input_file.open("rt") as f:
for document_name, element_identifier in get_judged_identifiers(f):
LOGGER.debug(
"Document %s, element id %s is judged", document_name, element_identifier)
if document_name not in judged_identifiers:
judged_identifiers[document_name] = set()
judged_identifiers[document_name].add(element_identifier)
else:
judged_identifiers = None
if args.judgements:
LOGGER.info("Processing dataset %s", input_dir)
identifier_map = process_dataset(
input_dir, output_dir, judged_identifiers, args.num_workers)
if args.judgements:
for i in range(len(args.judgements) // 2):
input_file = args.judgements[2 * i]
output_file = args.judgements[2 * i + 1]
LOGGER.info(
"Converting relevance judgements %s -> %s", input_file, output_file)
with input_file.open("rt") as f1, output_file.open("wt") as f2:
convert_judgements(f1, f2, identifier_map)
if __name__ == "__main__":
""" This is executed when run from the command line """
main()
|
#!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import binascii
import gzip
import json
import os
import shutil
import sys
import tempfile
import unittest
from tracing.extras.symbolizer import symbolize_trace
_THIS_DIR_PATH = os.path.abspath(os.path.dirname(__file__))
_TRACING_DIR = os.path.abspath(
os.path.join(_THIS_DIR_PATH,
os.path.pardir,
os.path.pardir,
os.path.pardir))
_PY_UTILS_PATH = os.path.abspath(os.path.join(
_TRACING_DIR,
os.path.pardir,
'common',
'py_utils'))
sys.path.append(_PY_UTILS_PATH)
# pylint: disable=import-error
import py_utils.cloud_storage as cloud_storage
def _DownloadFromCloudStorage(path):
print 'Downloading %s from gcs.' % (path)
cloud_storage.GetIfChanged(path, cloud_storage.PARTNER_BUCKET)
def GetJSonCrc(root):
checksum = 0
if isinstance(root, dict):
for key, value in root.iteritems():
checksum = checksum ^ (GetJSonCrc(key) + GetJSonCrc(value))
elif isinstance(root, list):
for value in root:
checksum = checksum ^ GetJSonCrc(value)
else:
checksum = binascii.crc32(json.dumps(root))
return checksum
def GetTraceCrc(filename):
with gzip.open(filename, 'rb') as fd:
content = json.loads(fd.read())
checksum = GetJSonCrc(content)
return checksum
class SymbolizeTraceEndToEndTest(unittest.TestCase):
def _RunSymbolizationOnTrace(self, pre_symbolization, post_symbolization,
extra_options):
trace_presymbolization_path = os.path.join(
_THIS_DIR_PATH, 'data', pre_symbolization)
_DownloadFromCloudStorage(trace_presymbolization_path)
self.assertTrue(os.path.exists(trace_presymbolization_path))
trace_postsymbolization_path = os.path.join(
_THIS_DIR_PATH, 'data', post_symbolization)
_DownloadFromCloudStorage(trace_postsymbolization_path)
self.assertTrue(os.path.exists(trace_postsymbolization_path))
temporary_fd, temporary_trace = tempfile.mkstemp(suffix='.json.gz')
symbolization_options = ['--only-symbolize-chrome-symbols',
'--no-backup',
'--cloud-storage-bucket',
cloud_storage.PARTNER_BUCKET,
temporary_trace]
symbolization_options.extend(extra_options)
# On windows, a pre-built version of addr2line-pdb is provided.
if sys.platform == 'win32':
addr2line_path = os.path.join(
_THIS_DIR_PATH, 'data', 'addr2line-pdb.exe')
_DownloadFromCloudStorage(addr2line_path)
self.assertTrue(os.path.exists(addr2line_path))
symbolization_options += ['--addr2line-executable', addr2line_path]
# Execute symbolization and compare results with the expected trace.
temporary_trace_crc = None
expected_crc = None
try:
shutil.copy(trace_presymbolization_path, temporary_trace)
self.assertTrue(symbolize_trace.main(symbolization_options))
temporary_trace_crc = GetTraceCrc(temporary_trace)
expected_crc = GetTraceCrc(trace_postsymbolization_path)
finally:
os.close(temporary_fd)
if os.path.exists(temporary_trace):
os.remove(temporary_trace)
# Checksums must match.
self.assertTrue(temporary_trace_crc and expected_crc and
temporary_trace_crc == expected_crc)
def testMacv1(self):
if sys.platform != 'darwin':
return
# The corresponding macOS Chrome symbols must be uploaded to
# "gs://chrome-partner-telemetry/desktop-symbolizer-test/61.0.3135.4/mac64/"
# "Google Chrome.dSYM.tar.bz2"
# since the waterfall bots do not have access to the chrome-unsigned bucket.
self._RunSymbolizationOnTrace('mac_trace_v1_presymbolization.json.gz',
'mac_trace_v1_postsymbolization.json.gz',
[])
def testMacv1Breakpad(self):
# The trace produced by the breakpad symbolizer is slightly different for
# function name that are omitted. Breakpad is producing "<name omitted>"
# for some function name. See:
# https://cs.chromium.org/chromium/src/breakpad/src/common/dwarf_cu_to_module.cc?l=551&rcl=7a65a47345a86c9e9a3fbc2e92a756a429a0c82f
self._RunSymbolizationOnTrace(
'mac_trace_v1_presymbolization.json.gz',
'mac_trace_v1_breakpad_postsymbolisation.json.gz',
['--use-breakpad-symbols'])
def testWin64v1(self):
if sys.platform != 'win32':
return
# The corresponding Win64 Chrome symbols must be uploaded to
# "gs://chrome-partner-telemetry/desktop-symbolizer-test/61.0.3130.0/"
# "win64-pgo/chrome-win32-syms.zip"
# and the corresponding executables to
# "gs://chrome-partner-telemetry/desktop-symbolizer-test/61.0.3130.0/"
# "win64-pgo/chrome-win64-pgo.zip"
# since the waterfall bots do not have access to the chrome-unsigned bucket.
self._RunSymbolizationOnTrace('windows_trace_v1_presymbolization.json.gz',
'windows_trace_v1_postsymbolization.json.gz',
[])
def testWin64v2(self):
if sys.platform != 'win32':
return
# The corresponding Win64 Chrome symbols must be uploaded to
# "gs://chrome-partner-telemetry/desktop-symbolizer-test/61.0.3142.0/"
# "win64-pgo/chrome-win32-syms.zip"
# and the corresponding executables to
# "gs://chrome-partner-telemetry/desktop-symbolizer-test/61.0.3142.0/"
# "win64-pgo/chrome-win64-pgo.zip"
# since the waterfall bots do not have access to the chrome-unsigned bucket.
self._RunSymbolizationOnTrace('windows_trace_v2_presymbolization.json.gz',
'windows_trace_v2_postsymbolization.json.gz',
[])
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Created by Ross on 19-3-22
class Solution:
def findMin(self, nums: list) -> int:
l = 0
r = len(nums) - 1
while l < r:
mid = (l + r) >> 1
if nums[mid] < nums[r]:
r = mid
else:
l = mid + 1
return nums[l]
if __name__ == '__main__':
solution = Solution()
print(solution.findMin([3, 4, 5, 1, 2]))
print(solution.findMin([4, 5, 6, 7, 0, 1, 2]))
|
import os
from maya import cmds as mc
from dcc.abstract import afnscene
import logging
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
class FnScene(afnscene.AFnScene):
"""
Overload of AFnBase used to interface with Maya scenes.
"""
__slots__ = ()
def isNewScene(self):
"""
Evaluates whether this is an untitled scene file.
:rtype: bool
"""
return len(mc.file(query=True, sceneName=True)) == 0
def isSaveRequired(self):
"""
Evaluates whether the open scene file has changes that need to be saved.
:rtype: bool
"""
return mc.file(query=True, modified=True)
def currentFilename(self):
"""
Returns the name of the open scene file.
:rtype: str
"""
return os.path.split(self.currentFilePath())[-1]
def currentFilePath(self):
"""
Returns the path of the open scene file.
:rtype: str
"""
if not self.isNewScene():
return os.path.normpath(mc.file(query=True, sceneName=True))
else:
return ''
def currentDirectory(self):
"""
Returns the directory of the open scene file.
:rtype: str
"""
return os.path.split(self.currentFilePath())[0]
def currentProjectDirectory(self):
"""
Returns the current project directory.
:rtype: str
"""
return os.path.normpath(mc.workspace(query=True, directory=True))
def getStartTime(self):
"""
Returns the current start time.
:rtype: int
"""
return int(mc.playbackOptions(query=True, min=True))
def setStartTime(self, startTime):
"""
Updates the start time.
:type startTime: int
:rtype: None
"""
mc.playbackOptions(edit=True, min=startTime)
def getEndTime(self):
"""
Returns the current end time.
:rtype: int
"""
return int(mc.playbackOptions(query=True, max=True))
def setEndTime(self, endTime):
"""
Updates the end time.
:type endTime: int
:rtype: None
"""
mc.playbackOptions(edit=True, max=endTime)
def getTime(self):
"""
Returns the current time.
:rtype: int
"""
return int(mc.currentTime(query=True))
def setTime(self, time):
"""
Updates the current time.
:type time: int
:rtype: int
"""
mc.currentTime(time, edit=True)
def iterTextures(self, absolute=False):
"""
Returns a generator that yields all texture paths inside the scene.
An optional keyword argument can be used to convert paths to absolute.
:type absolute: bool
:rtype: iter
"""
# Iterate through file nodes
#
for nodeName in mc.ls(type='file'):
# Check if path is valid
#
texturePath = mc.getAttr('%s.fileTextureName' % nodeName)
if self.isNullOrEmpty(texturePath):
continue
# Check if absolute path should be yielded
#
if absolute:
yield self.expandPath(texturePath)
else:
yield os.path.normpath(texturePath)
def updateTextures(self, updates):
"""
Applies all of the texture path updates to the associated file nodes.
Each key-value pair should consist of the original and updates texture paths!
:type updates: dict[str:str]
:rtype: None
"""
# Iterate through file nodes
#
for nodeName in mc.ls(type='file'):
# Check if path is valid
#
texturePath = mc.getAttr('%s.fileTextureName' % nodeName)
if self.isNullOrEmpty(texturePath):
continue
# Check if file node has an update
#
oldPath = os.path.normpath(texturePath)
newPath = updates.get(oldPath)
if newPath is not None:
mc.setAttr('%s.fileTextureName' % nodeName, newPath, type='string')
def reloadTextures(self):
"""
Forces all of the texture nodes to reload.
:rtype: None
"""
# Iterate through file nodes
#
for nodeName in mc.ls(type='file'):
mc.dgdirty('%s.fileTextureName' % nodeName)
def iterFileProperties(self):
"""
Returns a generator that yields file properties as key-value pairs.
:rtype: iter
"""
properties = mc.fileInfo(query=True)
numProperties = len(properties)
for i in range(0, numProperties, 2):
yield properties[i], properties[i + 1].encode('ascii').decode('unicode-escape')
def getFileProperty(self, key, default=None):
"""
Returns a file property value.
An optional default value can be provided if no key is found.
:type key: str
:type default: object
:rtype: Union[str, int, float, bool]
"""
return self.fileProperties().get(key, default)
def setFileProperty(self, key, value):
"""
Updates a file property value.
If the item does not exist it will be automatically added.
:type key: str
:type value: str
:rtype: None
"""
mc.fileInfo(key, value)
def getUpAxis(self):
"""
Returns the up-axis that the scene is set to.
:rtype: str
"""
return mc.upAxis(query=True, axis=True)
def markDirty(self):
"""
Marks the scene as dirty which will prompt the user for a save upon close.
:rtype: None
"""
mc.file(modified=True)
def markClean(self):
"""
Marks the scene as clean which will not prompt the user for a save upon close.
:rtype: None
"""
mc.file(modified=False)
|
import math
import os
from payton.scene import Scene
from payton.scene.geometry import Cube, Plane
from payton.scene.gui import info_box
def rotate(period, total):
global scene
y = math.radians(period * 100)
y = -y if int(total) % 2 == 0 else y
scene.objects["cube"].rotate_around_x(math.radians(period * 50))
scene.objects["cube"].rotate_around_y(y)
scene.objects["cube"].rotate_around_z(math.radians(period * 150))
scene = Scene()
cube = Cube()
ground = Plane(10, 10)
cube.position = [0, 0, 1.0]
texture_file = os.path.join(os.path.dirname(__file__), "cube.png")
scene.cameras[0].distance_to_target(5)
cube.material.texture = texture_file
scene.add_object("cube", cube)
scene.add_object("ground", ground)
scene.create_clock("rotate", 0.01, rotate)
scene.add_object(
"info",
info_box(
left=10,
top=10,
label="Hit SPACE to start animation",
),
)
scene.run(start_clocks=True)
|
import matplotlib.pyplot as plt
import subprocess
import time
# 모니터링이 필요한 패키지 이름
# 여기에 모니터링이 필요한 패키지 이름을 입력해주세요
# ex) com.example.test
packageName = "com.example.test"
# 테스트 시작 시간
startTime = time.time()
# 테스트 결과 저장 파일
testResultWriteFile = open("meminfo.txt",'w')
while True:
# 현재 시간
tempTime = time.time()
testTime = ((int)(tempTime - startTime))
# shell script 명령어 작성
# adb shell dumpsys meminfo : 안드로이드폰에서 실행중인 프로세스 메모리 정보 read
# grep packageName : packageName과 일치하는 부분 find
# xargs echo : 결과물을 한줄로 만들어줌
# tr -d ',' : , 문자열 제거
# tr -d ' ' : 공백 제거
# awk -F " " '{ split($0, array, "K");print array[1]; }' : K를 이용한 문자열 자르기
cmd = ['adb','shell','dumpsys','meminfo','|','grep',packageName,'|','xargs echo','|','tr','-d','\',\'','|','tr','-d','\' \'','|','awk','-F','" "','\'{ split($0, array, "K");print array[1]; }\'']
# 명령어 실행 후 반환되는 결과를 파일에 저장
fd_popen = subprocess.Popen(cmd,stdout=subprocess.PIPE).stdout
# 파일에 저장된 결과를 읽고 utf-8로 인코딩
tempTotalPSSMemory = (int)(fd_popen.read().strip().decode('utf-8'))//1024
# 파일 닫기
fd_popen.close()
# 프로세스가 실행중이지 않음
if tempTotalPSSMemory!=0 :
# 그래프 그리기
plt.scatter(testTime,tempTotalPSSMemory)
plt.pause(0.001)
# 파일에 작성
testResultWriteFile.write(str(testTime)+','+str(tempTotalPSSMemory)+'\n')
else :
startTime = time.time()
plt.show()
|
from ij import IJ
from ij.io import FileSaver
from mpicbg.ij.integral import Scale
import os
import sys
from os import path, walk
from loci.formats import ImageReader
from loci.formats import ImageWriter
from fiji.util.gui import GenericDialogPlus
from java.awt.event import TextListener
import zipfile
import zlib
def ScaleImageToSize(ip, width, height):
"""Scale image to a specific size using Stephans scaler"""
smaller = ip.scale( width, height )
return smaller
def SaveToZip(zf, ip, baseDir, counter):
fs = FileSaver(ip)
fs.setJpegQuality(75)
fs.saveAsJpeg(baseDir + "/tmp.jpeg")
zipName = str(counter) + ".jpeg"
zf.write(baseDir + "/tmp.jpeg", arcname=zipName)
os.remove(baseDir + "/tmp.jpeg")
def DirList(baseDir):
r = ImageReader()
imgStats = {}
for root, dirs, files in os.walk(str(baseDir)):
for f1 in files:
if f1.endswith(".jpg") or f1.endswith(".jpe") or f1.endswith(".jpeg"):
id = root + "/" + f1
r.setId(id)
if r is None:
print "Couldn\'t open image from file:", id
continue
w = r.getSizeX()
h = r.getSizeY()
imgStats[str(w) + "_" + str(h)] = imgStats.get(str(w) + "_" + str(h), 0)+1
IJ.log("Found image: " + str(id))
#counter += 1
r.close()
#print summary
summary = ''
for k, v in imgStats.iteritems():
dim = k.split("_")
ratio = float(dim[0])/float(dim[1])
IJ.log("Found " + str(v) + " images of dimension " + str(dim[0]) + "x" + str(dim[1]) + " apect ratio " + str(round(ratio, 2)))
summary = summary + "\nFound " + str(v) + " images of dimension " + str(dim[0]) + "x" + str(dim[1]) + " apect ratio " + str(round(ratio, 2))
return summary
def PrepareDatabase(minw, maxw, baseDir, aspectRatio, majorWidth, majorHeight):
outputpath = baseDir + "/" + str(majorWidth) + "_" + str(majorHeight) + "_orig.tif"
#initialize stacks and labels
stackScaled = []
stackOrig = ImageStack(majorWidth, majorHeight)
imageNames = []
for i in range(minw, maxw+1):
stackScaled.append(ImageStack(i, int(round(i/aspectRatio, 0))))
imageNames.append('')
counter = 0
# initialize zip file for originals
zf = zipfile.ZipFile(baseDir + "/originals.zip", mode='w', compression=zipfile.ZIP_DEFLATED, allowZip64=1)
zf.writestr('from_string.txt', 'hello')
zf.close()
zf = zipfile.ZipFile(baseDir + "/originals.zip", mode='a', compression=zipfile.ZIP_DEFLATED, allowZip64=1)
for root, dirs, files in os.walk(str(baseDir)):
for f1 in files:
if f1.endswith(".jpg") or f1.endswith(".jpe") or f1.endswith(".jpeg"):
id = root + "/" + f1
IJ.redirectErrorMessages()
IJ.redirectErrorMessages(1)
imp = IJ.openImage(id)
if imp is None:
print "Couldn\'t open image from file:", id
continue
# skip non RGBimages
if imp.getProcessor().getNChannels() != 3:
print "Converting non RGB image:", id
if imp.getStackSize() > 1:
StackConverter(imp).convertToRGB()
else:
ImageConverter(imp).convertToRGB()
#skip images with different aspect ratio
width = imp.getWidth()
height = imp.getHeight()
ratio = round(float(width)/float(height), 2) # this makes the ratio filering approximate, minor variations in image dimensions will be ignored
if ratio != aspectRatio:
IJ.log("Skipping image of size: " + str(width) + "," + str(height))
continue
# now scale the image within a given range
scale = Scale(imp.getProcessor())
IJ.log("Scaling image " + str(counter) + " " + str(id))
for i in range(minw, maxw+1):
stackScaled[i-minw].addSlice(None, ScaleImageToSize(scale, i, int(round(i/aspectRatio, 0))))
imageNames[i-minw] += str(id) + ";"
# save the originals to a temp directory
scaledOrig = ImagePlus(None, ScaleImageToSize(scale, majorWidth, majorHeight))
SaveToZip(zf, scaledOrig, baseDir, counter)
counter += 1
zf.close()
# save the stacks
for i in range(minw, maxw+1):
impScaled = ImagePlus(str(minw) + "_" + str(int(round(i/aspectRatio, 0))), stackScaled[i-minw])
impScaled.show()
#print imageNames
impScaled.setProperty('Info', imageNames[i-minw][:-1])
fs = FileSaver(impScaled)
filepath = baseDir + "/" + str(i) + "_" + str(int(round(i/aspectRatio, 0))) + ".tif"
IJ.log("Saving output stack" + str(filepath))
fs.saveAsTiffStack(filepath)
#IJ.save(impScaled, filepath);
IJ.log("Done")
def DialogAnalyze():
dpi = 300
defaultAspectRatio = 1.41
gd = GenericDialogPlus("Cover Maker")
gd.addMessage("Prepare Image database")
gd.addDirectoryField("Select base directory containing images", "", 20)
gd.showDialog()
if gd.wasCanceled():
print "User canceled dialog!"
return
imageBaseDir = gd.getNextString()
return imageBaseDir
class RatioToDim(TextListener):
def __init__(self, aspRatio, minw, maxw, minh, maxh):
self.aspRatio = aspRatio
self.minw = minw
self.maxw = maxw
self.minh = minh
self.maxh = maxh
def textValueChanged(self, e):
source = e.getSource()
if source == self.aspRatio:
#print "bla " + str(self.minw.getText)# + " " + str(float(source.getText()))
self.minh.setText(str(int(round(float(self.minw.getText())/float(source.getText())))))
self.maxh.setText(str(int(round(float(self.maxw.getText())/float(source.getText())))))
elif source == self.minw:
self.minh.setText(str(int(round(float(source.getText())/float(self.aspRatio.getText()), 0))))
elif source == self.maxw:
self.maxh.setText(str(int(round(float(source.getText())/float(self.aspRatio.getText()), 0))))
def DialogGenerate(imageBaseDir, summary):
dpi = 300
defaultAspectRatio = 1.33
defaultTileWidth = 15
defaultOriginalWidth = 150
defaultOriginalHeight = 113
defaultTileHeight = round(defaultTileWidth/defaultAspectRatio)
gd = GenericDialogPlus("Cover Maker")
gd.addMessage("Prepare Image database")
gd.addDirectoryField("Select base directory containing images", imageBaseDir, 20)
gd.addMessage(summary)
gd.addNumericField("Aspect ratio", defaultAspectRatio, 2)
gd.addNumericField("Original width", defaultOriginalWidth, 0)
gd.addNumericField("Original height", defaultOriginalHeight, 0)
gd.addNumericField("minimal tile width", defaultTileWidth, 0)
gd.addNumericField("maximal tile width", defaultTileWidth, 0)
gd.addNumericField("minimal tile height", defaultTileHeight, 0)
gd.addNumericField("maximal tile height", defaultTileHeight, 0)
fields = gd.getNumericFields()
aspRatio = fields.get(0)
minw = fields.get(3)
maxw = fields.get(4)
minh = fields.get(5)
maxh = fields.get(6)
# resolution and size listener
textListener = RatioToDim(aspRatio, minw, maxw, minh, maxh)
aspRatio.addTextListener(textListener)
minw.addTextListener(textListener)
maxw.addTextListener(textListener)
gd.showDialog()
if gd.wasCanceled():
print "User canceled dialog!"
return
imageBaseDir = gd.getNextString()
aspectRatio = gd.getNextNumber()
majorWidth = gd.getNextNumber()
majorHeight = gd.getNextNumber()
mintilewidth = gd.getNextNumber()
maxtilewidth = gd.getNextNumber()
return int(mintilewidth), int(maxtilewidth), imageBaseDir, float(aspectRatio), int(majorWidth), int(majorHeight)
imageBaseDir = ''
summary = ''
#imageBaseDir = DialogAnalyze()
#summary = DirList(imageBaseDir)
(minw, maxw, imageBaseDir, aspectRatio, majorWidth, majorHeight) = DialogGenerate(imageBaseDir, summary)
PrepareDatabase(minw, maxw, imageBaseDir, aspectRatio, majorWidth, majorHeight)
|
import toml
import sys
from colorama import init as colorama_init
from colorama import Fore, Back, Style
colorama_init()
LINK = Fore.BLUE
ERROR = Fore.RED
CLEAR = Style.RESET_ALL
cfg_filename ='config.toml'
required_fields = [
'repo',
'branch',
]
usage = f'''
You have to configure the deployment settings in cconfig.toml first!
Example:
[hugo-deployer-git]
repo = "<your github page url>"
branch = "master"
name= "<your username>"
email= "test@testtest.com"
message = ""
build-config= "--minify"
For more help, you can check the docs: {LINK}https://github.com/ianre657/hugo-deployer-git{CLEAR}
'''
def shout_and_exit(msg):
print(msg,file=sys.stderr)
print(usage,file=sys.stderr)
sys.exit(1)
def check_env():
try:
with open(cfg_filename, "r") as cfg_toml:
config = toml.loads(cfg_toml.read())
except FileNotFoundError as fne:
shout_and_exit(f"File not found: {cfg_filename}, are you inside hugo website folder?")
except Exception as e:
shout_and_exit(e)
if config.get('hugo-deployer-git',None) is None:
shout_and_exit(f"There're no [hugo-deployer-git] section inside your {cfg_filename}")
config= config['hugo-deployer-git']
lack = [s for s in required_fields if config.get(s,None) is None]
if len(lack) > 0:
s = 's' if len(lack)>1 else ''
lack_str = str(lack) if len(lack)>1 else f'"{lack[0]}"'
shout_and_exit(f'Error in {ERROR}{cfg_filename}{CLEAR}: missing field{s} <{lack_str}> inside your [hugo-deployer-git] block')
|
import hashlib
from Tkinter import *
class Application(Frame):
def __init__(self, parent):
def response():
try:
hashvalue.delete('1.0', END)
value = v.get()
message = str(text.get("1.0", 'end-1c'))
if value == 1:
sha1 = hashlib.sha1(message).hexdigest()
hashvalue.insert(INSERT, sha1)
if value == 2:
sha224 = hashlib.sha224(message).hexdigest()
hashvalue.insert(INSERT, sha224)
if value == 3:
sha256 = hashlib.sha256(message).hexdigest()
hashvalue.insert(INSERT, sha256)
if value == 4:
sha384 = hashlib.sha384(message).hexdigest()
hashvalue.insert(INSERT, sha384)
if value == 5:
sha512 = hashlib.sha512(message).hexdigest()
hashvalue.insert(INSERT, sha512)
except ValueError:
pass
Frame.__init__(self, parent)
self.parent = parent
self.parent.title("Python SHA")
self.pack(fill=BOTH, expand=TRUE)
frame1 = Frame(self)
frame1.pack(fill=X)
scrollbar = Scrollbar(frame1)
scrollbar.pack(side=RIGHT, fill=Y)
textlabel = Label(frame1, text="Text", width=6)
textlabel.pack()
text = Text(frame1, wrap=WORD, yscrollcommand=scrollbar.set, borderwidth=3, relief="ridge")
text.pack(side=LEFT, pady=5, padx=5, anchor=W, expand=TRUE, fill=BOTH)
scrollbar.config(command=text.yview)
v = IntVar()
optionslabel = Label(frame1, text="SHA Options\n", width=15)
optionslabel.pack()
Radiobutton(frame1, text="SHA-1", variable=v, value='1').pack(anchor=W, padx=20)
Radiobutton(frame1, text="SHA-224", variable=v, value='2').pack(anchor=W, padx=20)
Radiobutton(frame1, text="SHA-256", variable=v, value='3').pack(anchor=W, padx=20)
Radiobutton(frame1, text="SHA-384", variable=v, value='4').pack(anchor=W, padx=20)
Radiobutton(frame1, text="SHA-512", variable=v, value='5').pack(anchor=W, padx=20)
frame2 = Frame(self)
frame2.pack(fill=X)
gethashbutton = Button(frame1, text="Get Hash", command=response)
gethashbutton.pack(pady=15)
frame3 = Frame(self)
frame3.pack(fill=X)
hashlabel = Label(frame3, text="Hash value")
hashlabel.pack(pady=(15,0))
hashvalue = Text(frame3, wrap=WORD, borderwidth=3, relief="ridge")
hashvalue.pack(side=LEFT, pady=5, padx=5, anchor=W, expand=TRUE, fill=BOTH)
def main():
root = Tk()
root.geometry("800x550")
app = Application(root)
root.mainloop()
if __name__ == '__main__':
main()
|
from Jumpscale import j
class Package(j.baseclasses.threebot_package):
def start(self):
DOMAIN = self._package.install_kwargs.get("domain", "testnet.threefoldtoken.io")
for port in (443, 80):
website = self.openresty.get_from_port(port)
website.domain = DOMAIN
website.configure()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-09 17:07
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('submissions', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AppropriationAccountBalances',
fields=[
('appropriation_account_balances_id', models.AutoField(primary_key=True, serialize=False)),
('budget_authority_unobligat_fyb', models.DecimalField(blank=True, decimal_places=0, max_digits=21, null=True)),
('adjustments_to_unobligated_cpe', models.DecimalField(decimal_places=0, max_digits=21)),
('budget_authority_appropria_cpe', models.DecimalField(decimal_places=0, max_digits=21)),
('borrowing_authority_amount_cpe', models.DecimalField(blank=True, decimal_places=0, max_digits=21, null=True)),
('contract_authority_amount_cpe', models.DecimalField(blank=True, decimal_places=0, max_digits=21, null=True)),
('spending_authority_from_of_cpe', models.DecimalField(blank=True, decimal_places=0, max_digits=21, null=True)),
('other_budgetary_resources_cpe', models.DecimalField(blank=True, decimal_places=0, max_digits=21, null=True)),
('budget_authority_available_cpe', models.DecimalField(decimal_places=0, max_digits=21)),
('gross_outlay_amount_by_tas_cpe', models.DecimalField(decimal_places=0, max_digits=21)),
('deobligations_recoveries_r_cpe', models.DecimalField(decimal_places=0, max_digits=21)),
('unobligated_balance_cpe', models.DecimalField(decimal_places=0, max_digits=21)),
('status_of_budgetary_resour_cpe', models.DecimalField(decimal_places=0, max_digits=21)),
('obligations_incurred_total_cpe', models.DecimalField(decimal_places=0, max_digits=21)),
('drv_approp_avail_pd_start_date', models.DateField(blank=True, null=True)),
('drv_approp_avail_pd_end_date', models.DateField(blank=True, null=True)),
('drv_approp_account_exp_status', models.CharField(blank=True, max_length=10, null=True)),
('tas_rendering_label', models.CharField(blank=True, max_length=22, null=True)),
('drv_obligations_unpaid_amount', models.DecimalField(blank=True, decimal_places=0, max_digits=21, null=True)),
('drv_other_obligated_amount', models.DecimalField(blank=True, decimal_places=0, max_digits=21, null=True)),
('reporting_period_start', models.DateField(blank=True, null=True)),
('reporting_period_end', models.DateField(blank=True, null=True)),
('create_date', models.DateTimeField(blank=True, null=True)),
('update_date', models.DateTimeField(blank=True, null=True)),
('create_user_id', models.CharField(blank=True, max_length=50, null=True)),
('update_user_id', models.CharField(blank=True, max_length=50, null=True)),
('submission_process', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='submissions.SubmissionProcess')),
],
options={
'db_table': 'appropriation_account_balances',
'managed': True,
},
),
migrations.CreateModel(
name='TreasuryAppropriationAccount',
fields=[
('treasury_account_identifier', models.IntegerField(primary_key=True, serialize=False)),
('tas_rendering_label', models.CharField(max_length=22)),
('allocation_transfer_agency_id', models.CharField(blank=True, max_length=3, null=True)),
('responsible_agency_id', models.CharField(max_length=3)),
('beginning_period_of_availa', models.CharField(blank=True, max_length=4, null=True)),
('ending_period_of_availabil', models.CharField(blank=True, max_length=4, null=True)),
('availability_type_code', models.CharField(blank=True, max_length=1, null=True)),
('main_account_code', models.CharField(max_length=4)),
('sub_account_code', models.CharField(max_length=3)),
('drv_approp_avail_pd_start_date', models.DateField(blank=True, null=True)),
('drv_approp_avail_pd_end_date', models.DateField(blank=True, null=True)),
('drv_approp_account_exp_status', models.CharField(blank=True, max_length=10, null=True)),
('create_date', models.DateTimeField(blank=True, null=True)),
('update_date', models.DateTimeField(blank=True, null=True)),
('create_user_id', models.CharField(blank=True, max_length=50, null=True)),
('update_user_id', models.CharField(blank=True, max_length=50, null=True)),
],
options={
'db_table': 'treasury_appropriation_account',
'managed': True,
},
),
migrations.AddField(
model_name='appropriationaccountbalances',
name='treasury_account_identifier',
field=models.ForeignKey(db_column='treasury_account_identifier', on_delete=django.db.models.deletion.DO_NOTHING, to='accounts.TreasuryAppropriationAccount'),
),
]
|
"""Define tests for v3 Sensor objects."""
# pylint: disable=unused-argument
import aiohttp
import pytest
from simplipy import API
from tests.common import TEST_PASSWORD, TEST_SYSTEM_ID, TEST_USERNAME
@pytest.mark.asyncio
async def test_properties_v3(aresponses, v3_server):
"""Test that v3 sensor properties are created properly."""
async with aiohttp.ClientSession() as session:
simplisafe = await API.async_from_credentials(
TEST_USERNAME, TEST_PASSWORD, session=session
)
await simplisafe.async_verify_2fa_email()
systems = await simplisafe.async_get_systems()
system = systems[TEST_SYSTEM_ID]
entry_sensor = system.sensors["825"]
assert not entry_sensor.error
assert not entry_sensor.low_battery
assert not entry_sensor.offline
assert not entry_sensor.settings["instantTrigger"]
assert not entry_sensor.trigger_instantly
assert not entry_sensor.triggered
siren = system.sensors["236"]
assert not siren.triggered
temperature_sensor = system.sensors["320"]
assert temperature_sensor.temperature == 67
# Ensure that attempting to access the temperature attribute of a
# non-temperature sensor throws an error:
with pytest.raises(AttributeError):
assert siren.temperature == 42
aresponses.assert_plan_strictly_followed()
|
import numpy as np
def get_loss(data, pred_result, label, model):
'''
data: (2,100)
lable: (1,100), 1/-1
pred_result: (1,100), nums
'''
error_index = np.where(label[0]*pred_result[0] <= 0)
error_data = data[:, error_index[0]]
error_label = label[:, error_index[0]]
error_pred = pred_result[:, error_index[0]]
loss = - np.sum(error_pred * error_label) / np.linalg.norm(model.w, ord=2)
if error_index[0].size:
select_index = np.random.randint(0, error_data.shape[1])
select_sample = error_data[:, select_index]
return [select_sample, error_label[:, select_index], loss]
else:
return 0
|
# -*- coding: utf-8 -*-
import py
from test_interp import RunTests
from ctypes import cdll
class TestJNI(RunTests):
def test_static_method_call(self):
cls = self.getclass('''
class CallMe {
static int x=1;
public static native void callit(int j);
public static void callme(int j){x=j;}
public static void main(String[] args)
{
System.loadLibrary( "test_native" );
System.out.println(x);
callit(2);
System.out.println(x);
}
}
''', "")
string = "libtest_native.so"
self.loader.extern_libs[string] = cdll.LoadLibrary("/usr/lib/"+string)
self.run(cls, [], "1\n2\n")
def test_jni_add(self):
py.test.skip("in-progress test_jni_add")
# works but can't capure the print
cls = self.getclass('''
public class Add {
int number;
public native void calc(int j);
public static void main(String[] args)
{
System.loadLibrary( "test_native" );
Add a = new Add();
a.number = 40;
a.calc(1);
}
}
''', "Add")
string = "libtest_native.so"
self.loader.extern_libs[string] = cdll.LoadLibrary("/usr/lib/"+string)
self.run(cls, [], "41 \n")
def test_jni_fac(self):
cls = self.getclass('''
class Fac {
public native int fun();
public int i;
public static void main(String[] args)
{
System.loadLibrary( "test_native" );
Fac fac = new Fac();
fac.i = 7;
System.out.println(fac.fun());
}
}
''')
string = "libtest_native.so"
self.loader.extern_libs[string] = cdll.LoadLibrary("/usr/lib/"+string)
self.run(cls, [], "720\n")
def test_jni_methodcallA(self):
cls = self.getclass('''
class Sum {
static int x=1;
static int y=2;
public native void cCode();
public void set_it(int a, int b)
{
x = a;
y = b;
}
public static void main(String[] args)
{
System.loadLibrary( "test_native" );
Sum s = new Sum();
System.out.println(x+y);
s.cCode();
System.out.println(x+y);
}
}
''')
string = "libtest_native.so"
self.loader.extern_libs[string] = cdll.LoadLibrary("/usr/lib/"+string)
self.run(cls, [], "3\n7\n")
def test_jni_var_args(self):
cls = self.getclass('''
class Va {
public native void nativeMethod();
public native int nativeMethod2();
public void method(int a, int b)
{
System.out.println(a+b);
}
public int method2(int a, int b)
{
return a+b;
}
public static void main(String[] args)
{
System.loadLibrary( "test_native" );
Va va = new Va();
int x = va.nativeMethod2();
System.out.println(x);
}
}
''')
string = "libtest_native.so"
self.loader.extern_libs[string] = cdll.LoadLibrary("/usr/lib/"+string)
self.run(cls, [], "3\n")
def test_strlen(self):
cls = self.getclass('''
class StrLen {
public static native int strlen( String s );
public static void main(String[] args)
{
System.loadLibrary( "test_native" );
int len = strlen("Hallo Peter");
System.out.println(len);
}
}
''')
string = "libtest_native.so"
self.loader.extern_libs[string] = cdll.LoadLibrary("/usr/lib/"+string)
self.run(cls, [], "11\n")
def test_intArray(self):
cls = self.getclass('''
class IntArray {
private native int sumArray(int[] arr);
public static void main(String[] args) {
IntArray p = new IntArray();
int arr[] = new int[10];
for (int i = 0; i < 10; i++) {
arr[i] = i;
}
int sum = p.sumArray(arr);
System.out.println("sum = " + sum);
}
static {
System.loadLibrary("test_native");
}
}
''')
string = "libtest_native.so"
self.loader.extern_libs[string] = cdll.LoadLibrary("/usr/lib/"+string)
self.run(cls, [], "sum = 45\n")
def test_ObjectArrayTest(self):
cls = self.getclass('''
class ObjectArrayTest {
private static native int[][] initInt2DArray(int size);
public static void main(String[] args) {
int[][] i2arr = initInt2DArray(3);
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
System.out.print(" " + i2arr[i][j]);
}
System.out.println();
}
}
static {
System.loadLibrary("test_native");
}
}
''')
string = "libtest_native.so"
self.loader.extern_libs[string] = cdll.LoadLibrary("/usr/lib/"+string)
self.run(cls, [], " 0 1 2\n 1 2 3\n 2 3 4\n")
def test_ObjectArrayTest2(self):
cls = self.getclass('''
class ObjectArrayTest2 {
private static native Object[] initObjArray(int size);
public static void main(String[] args) {
Object[] objArr = initObjArray(3);
A obj;
for (int i = 0; i < 3; i++) {
obj = (A) objArr[i];
System.out.println(obj.x);
}
}
static {
System.loadLibrary("test_native");
}
}
class A{
int x;
}
''')
string = "libtest_native.so"
self.loader.extern_libs[string] = cdll.LoadLibrary("/usr/lib/"+string)
self.run(cls, [], "41\n42\n43\n")
def test_ObjectArrayTest3(self):
# this test uses an other init on the c-level
cls = self.getclass('''
class ObjectArrayTest3 {
private static native Object[] initObjArray(int size);
public static void main(String[] args) {
Object[] objArr = initObjArray(3);
A obj;
for (int i = 0; i < 3; i++) {
obj = (A) objArr[i];
System.out.println(obj.x);
}
}
static {
System.loadLibrary("test_native");
}
}
class A{
int x = 42;
}
''')
string = "libtest_native.so"
self.loader.extern_libs[string] = cdll.LoadLibrary("/usr/lib/"+string)
self.run(cls, [], "41\n41\n41\n")
def test_ObjectArrayTest4(self):
# this test inits the String Array with "C" insted of null
cls = self.getclass('''
class ObjectArrayTest4 {
private static native String[] initObjArray(int size);
public static void main(String[] args) {
String[] strArr = initObjArray(3);
//String[] strArr2 = new String[3];
for (int i = 0; i < strArr.length; i++)
{
System.out.println(strArr[i]);
//System.out.println(strArr2[i]);
}
}
static {
System.loadLibrary("test_native");
}
}
''')
string = "libtest_native.so"
self.loader.extern_libs[string] = cdll.LoadLibrary("/usr/lib/"+string)
self.run(cls, [], "C\nC\nC\n")
def test_ReadFile(self):
# from http://java.sun.com/developer/onlineTraining/Programming/JDCBook/jniexamp.html
data = ""
fobj = open("/home/stupsi/pypy_kram/jvm/jvm/test_classes/ReadFile.java", "r")
for line in fobj:
data += line
fobj.close()
cls = self.getclass('''
import java.util.*;
class ReadFile {
//Native method declaration
native byte[] loadFile(String name);
//Load the library
static {
System.loadLibrary("test_native");
}
public static void main(String args[]) {
byte buf[];
//Create class instance
ReadFile mappedFile=new ReadFile();
//Call native method to load ReadFile.java
buf=mappedFile.loadFile("/home/stupsi/pypy_kram/jvm/jvm/test_classes/ReadFile.java");
//Print contents of ReadFile.java
for(int i=0;i<buf.length;i++) {
System.out.print((char)buf[i]);
}
}
}
''', "ReadFile")
string = "libtest_native.so"
self.loader.extern_libs[string] = cdll.LoadLibrary("/usr/lib/"+string)
self.run(cls, [], data)
# tests throw on java level
def test_native_Exception(self):
cls = self.getclass('''
public class NativeExc {
static{ System.loadLibrary("test_native");}
public native static void nativeMethod() throws NativeException;
public static void callBack() throws Exception
{
throw new NativeException("callBack: Error on Javaside");
}
public static void main(String[] args)
{
try{
nativeMethod();
}
catch(NativeException e)
{
System.out.println("catch:NativeException");
}
finally
{
System.out.println("finally");
}
}
}
class NativeException extends Exception {
public NativeException(String s){super(s);}
}
''', "NativeExc")
string = "libtest_native.so"
self.loader.extern_libs[string] = cdll.LoadLibrary("/usr/lib/"+string)
self.run(cls, [], "catch:NativeException\nfinally\n")
# tests JNI-Env-fct ThrowNew
def test_native_Exception2(self):
cls = self.getclass('''
public class NativeExc2 {
static{ System.loadLibrary("test_native");}
public native static void nativeMethod() throws NativeException;
public static void main(String[] args)
{
try{
nativeMethod();
}
catch(NativeException e)
{
System.out.println("catch:NativeException");
}
finally
{
System.out.println("finally");
}
}
}
class NativeException extends Exception {
public NativeException(String s){super(s);}
}
''', "NativeExc2")
string = "libtest_native.so"
self.loader.extern_libs[string] = cdll.LoadLibrary("/usr/lib/"+string)
self.run(cls, [], "catch:NativeException\nfinally\n")
#TODO: use String constructor
# tests JNI-Env-fct Throw
def test_native_Exception3(self):
cls = self.getclass('''
public class NativeExc3 {
static{ System.loadLibrary("test_native");}
public native static void nativeMethod() throws NativeException;
public static void main(String[] args)
{
try{
nativeMethod();
}
catch(NativeException e)
{
System.out.println("catch:NativeException");
}
finally
{
System.out.println("finally");
}
}
}
class NativeException extends Exception {
public NativeException(){super();}
public NativeException(String s){super(s);}
}
''', "NativeExc3")
string = "libtest_native.so"
self.loader.extern_libs[string] = cdll.LoadLibrary("/usr/lib/"+string)
self.run(cls, [], "catch:NativeException\nfinally\n")
#tests ExceptionOccured, ExceptionCheck and ExceptionClear
def test_native_Exception4(self):
cls = self.getclass('''
public class NativeExc4 {
static{ System.loadLibrary("test_native");}
public native static boolean nativeMethod() throws NativeException;
public static void main(String[] args)
{
boolean b = false;
try{
b = nativeMethod();
}
catch(NativeException e)
{
System.out.println("catch:NativeException");
}
finally
{
System.out.println("finally");
}
System.out.println(b);
}
}
class NativeException extends Exception {
public NativeException(String s){super(s);}
}
''', "NativeExc4")
string = "libtest_native.so"
self.loader.extern_libs[string] = cdll.LoadLibrary("/usr/lib/"+string)
self.run(cls, [], "finally\ntrue\n")
def test_ArrayHandler(self):
# from http://java.sun.com/developer/onlineTraining/Programming/JDCBook/jnistring.html
cls = self.getclass('''
public class ArrayHandler {
public native String[] returnArray();
static{
System.loadLibrary("test_native");
}
public static void main(String args[]) {
String ar[];
ArrayHandler ah= new ArrayHandler();
ar = ah.returnArray();
for (int i=0; i<5; i++) {
System.out.println("array element"+i+
"=" + ar[i]);
}
}
}''',"ArrayHandler")
string = "libtest_native.so"
self.loader.extern_libs[string] = cdll.LoadLibrary("/usr/lib/"+string)
self.run(cls, [], "array element0=first\narray element1=second\narray element2=third\narray element3=fourth\narray element4=fifth\n")
# uses threading JNI-Env functions which are not implemented
def test_ArrayManipulation(self):
py.test.skip("in-progress test_ArrayManipulation")
cls = self.getclass('''
public class ArrayManipulation {
private int arrayResults[][];
Boolean lock=new Boolean(true);
int arraySize=-1;
public native void manipulateArray(
int[][] multiplier, Boolean lock);
static{
System.loadLibrary("test_native");
}
public void sendArrayResults(int results[][]) {
arraySize=results.length;
arrayResults=new int[results.length][];
System.arraycopy(results,0,arrayResults,
0,arraySize);
}
public void displayArray() {
for (int i=0; i<arraySize; i++) {
for(int j=0; j <arrayResults[i].length;j++) {
System.out.println("array element "+i+","+j+
"= " + arrayResults[i][j]);
}
}
}
public static void main(String args[]) {
int[][] ar = new int[3][3];
int count=3;
for(int i=0;i<3;i++) {
for(int j=0;j<3;j++) {
ar[i][j]=count;
}
count++;
}
ArrayManipulation am= new ArrayManipulation();
am.manipulateArray(ar, am.lock);
am.displayArray();
}
}
''',"ArrayManipulation")
string = "libtest_native.so"
self.loader.extern_libs[string] = cdll.LoadLibrary("/usr/lib/"+string)
self.run(cls, [], "array element 0,0= 12\narray element 0,1= 0\narray element 0,2= 12\narray element 1,0= 16\narray element 1,1= 0\narray element 1,2= 16\narray element 2,0= 20\narray element 2,1= 0\narray element 2,2= 20\n")
def test_StringTest(self):
py.test.skip("in-progress test_StringTest")
# works but can't capure the print
cls = self.getclass('''
class Prompt {
// native method that prints a prompt and reads a line
private native String getLine(String prompt);
public static void main(String args[]) {
Prompt p = new Prompt();
String input = p.getLine("Type a line: ");
System.out.println("User typed: " + input);
}
static {
System.loadLibrary("test_native");
}
}
''')
string = "libtest_native.so"
self.loader.extern_libs[string] = cdll.LoadLibrary("/usr/lib/"+string)
self.run(cls, [], "User typed: \n")
|
"""Util functions for Model Evaluation pipelines."""
import os
import pathlib
from typing import Any, Dict, List, Tuple
def get_sdk_pipeline_and_parameters(
project: str,
location: str,
root_dir: str,
model_name: str,
target_column_name: str,
prediction_type: str,
batch_predict_gcs_source_uris: List[str],
batch_predict_instances_format: str,
batch_predict_machine_type: str = 'n1-standard-16',
batch_predict_starting_replica_count: int = 25,
batch_predict_max_replica_count: int = 25,
class_names: List[str] = ['0', '1'],
dataflow_machine_type: str = 'n1-standard-4',
dataflow_max_num_workers: int = 25,
dataflow_disk_size_gb: int = 50,
encryption_spec_key_name: str = '') -> Tuple[str, Dict[str, Any]]:
"""Get the evaluation sdk pipeline and parameters.
Args:
project: The GCP project that runs the pipeline components.
location: The GCP region that runs the pipeline components.
root_dir: The root GCS directory for the pipeline components.
model_name: The Vertex model resource name to be imported and used for batch
prediction.
target_column_name: The target column name.
prediction_type: The type of prediction the Model is to produce.
"classification" or "regression".
batch_predict_gcs_source_uris: Google Cloud Storage URI(-s) to your
instances to run batch prediction on. May contain wildcards. For more
information on wildcards, see
https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames. For
more details about this input config, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_instances_format: The format in which instances are given,
must be one of the Model's supportedInputStorageFormats. If not set,
default to "jsonl". For more details about this input config, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_machine_type: The type of machine for running batch prediction
on dedicated resources. If the Model supports DEDICATED_RESOURCES this
config may be provided (and the job will use these resources). If the
Model doesn't support AUTOMATIC_RESOURCES, this config must be provided.
For more details about the BatchDedicatedResources, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#BatchDedicatedResources.
For more details about the machine spec, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec
batch_predict_starting_replica_count: The number of machine replicas used at
the start of the batch operation. If not set, Vertex AI decides starting
number, not greater than `max_replica_count`. Only used if `machine_type`
is set.
batch_predict_max_replica_count: The maximum number of machine replicas the
batch operation may be scaled to. Only used if `machine_type` is set.
Default is 10.
class_names: The list of class names that the ground truth can be, in the
same order they appear in the batch predict predictions output.
dataflow_machine_type: The dataflow machine type for evaluation components.
dataflow_max_num_workers: The max number of Dataflow workers for evaluation
components.
dataflow_disk_size_gb: Dataflow worker's disk size in GB for evaluation
components.
encryption_spec_key_name: The KMS key name.
Returns:
Tuple of pipeline_definiton_path and parameter_values.
"""
if prediction_type == 'regression':
prediction_score_column = 'prediction.value'
prediction_label_column = ''
elif prediction_type == 'classification':
prediction_score_column = 'prediction.scores'
prediction_label_column = 'prediction.classes'
parameter_values = {
'project':
project,
'location':
location,
'root_dir':
root_dir,
'model_name':
model_name,
'target_column_name':
target_column_name,
'prediction_type':
prediction_type,
'class_names':
class_names,
'batch_predict_gcs_source_uris':
batch_predict_gcs_source_uris,
'batch_predict_instances_format':
batch_predict_instances_format,
'batch_predict_machine_type':
batch_predict_machine_type,
'batch_predict_starting_replica_count':
batch_predict_starting_replica_count,
'batch_predict_max_replica_count':
batch_predict_max_replica_count,
'prediction_score_column':
prediction_score_column,
'prediction_label_column':
prediction_label_column,
'dataflow_machine_type':
dataflow_machine_type,
'dataflow_max_num_workers':
dataflow_max_num_workers,
'dataflow_disk_size_gb':
dataflow_disk_size_gb,
'encryption_spec_key_name':
encryption_spec_key_name,
}
pipeline_definition_path = os.path.join(
pathlib.Path(__file__).parent.resolve(), 'templates', 'sdk_pipeline.json')
return pipeline_definition_path, parameter_values
|
from .bandit_base import AdversarialBandit, Bandit
from .corrupted_bandits import CorruptedLaws, CorruptedNormalBandit
from .stochastic_bandits import BernoulliBandit, NormalBandit
|
from AwsS3 import AwsS3
from CVFileMixer import CVFileMixer
class AwsS3ML:
def __init__(self, aws_profile_name=None):
self.S3 = AwsS3(aws_profile_name)
def read_cv_parquets(self, parquet_name, n_splits, validation_index, s3_bucket, s3_path, delete_local=True):
if '.parquet' in parquet_name: parquet_name = parquet_name.replace('.parquet', '')
file_names = ['{}_{}.parquet'.format(parquet_name, i) for i in range(n_splits)]
print(file_names)
df_list = [self.S3.read_parquet(name, s3_bucket, s3_path, delete_local) for name in file_names]
return CVFileMixer.get_cv_train_validation(df_list, validation_index)
if __name__ == '__main__':
s3 = AwsS3ML()
s3.read_cv_parquets('abcd.parquet', 10, '', '')
|
import sys
import pandas as pd
import sqlalchemy
import os
def load_data(messages_filepath, categories_filepath):
'''
Load data function
Arguments:
messages->path the messages csv file
categories->path to the categories csv file
Output:
df->loaded as pandas DataFrame
'''
# load datasets
messages_df = pd.read_csv(messages_filepath)
categories_df = pd.read_csv(categories_filepath)
df=messages_df.merge(categories_df,left_on='id',right_on='id')
return df
def clean_data(df):
'''
Input
df -> dataframe combined from messages and categories datasets
Output
df -> clean pandas ready for traning
'''
categories = df['categories'].str.split(';', expand=True)
row = categories.iloc[0, :]
category_colnames = row.apply(lambda x: x[:-2])
categories.columns = category_colnames
for column in categories:
# set each value to be the last character of the string
categories[column] = categories[column].str[-1]
# convert column from string to numeric
categories[column] = categories[column].astype(int)
df.drop('categories', axis=1,inplace=True)
df = pd.concat([df, categories], axis=1)
df.drop_duplicates(inplace=True)
return df
def save_data(df, database_filename):
'''
:param df->to save
:param database_path->path of db to save the .db file
output-> if the database already exists then it's deleted then recreated again by calling
the same function at the except else it will be created within the try statement
'''
engine = sqlalchemy.create_engine('sqlite:///'+ database_filename)
df.to_sql('messages',engine,index=False)
def main():
if len(sys.argv) == 4:
messages_filepath, categories_filepath, database_filepath = sys.argv[1:]
print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}'
.format(messages_filepath, categories_filepath))
df = load_data(messages_filepath, categories_filepath)
print('Cleaning data...')
df = clean_data(df)
print('Saving data...\n DATABASE: {}'.format(database_filepath))
save_data(df, database_filepath)
print('Cleaned data saved to database!')
else:
print('Please provide the filepaths of the messages and categories '\
'datasets as the first and second argument respectively, as '\
'well as the filepath of the database to save the cleaned data '\
'to as the third argument. \n\nExample: python process_data.py '\
'disaster_messages.csv disaster_categories.csv '\
'DisasterResponse.db')
if __name__ == '__main__':
main()
|
from PIL import Image
import os
import random
import json
import time
from io import BytesIO
import base64
import numpy as np
from gym_donkeycar.core.sim_client import SDClient
from TelemetryPack import TelemetryPack
'''Code Reference:
https://github.com/tawnkramer/sdsandbox/blob/master/src/test_client.py
'''
DEFAULT_GYM_CONFIG = {
'racer_name': 'RL_User',
'bio': 'Triton-AI',
'country': 'US',
"guid": "RL",
'body_style': 'f1',
'body_rgb': (0, 0, 0),
'car_name': 'RL_Model',
'font_size': 50,
"fov": 0,
"fish_eye_x": 0.0,
"fish_eye_y": 0.0,
"img_w": 160,
"img_h": 120,
"img_d": 3,
"img_enc": 'JPG',
"offset_x": 0,
"offset_y": 0,
"offset_z": 0,
"rot_x": 0,
# "rot_y": 180,
'scene_name': 'donkey-circuit_launch-v0',
'host': '127.0.0.1',
# 'sim_host':'donkey-sim.roboticist.dev',
'port': 9091,
'artificial_latency': 0
}
DEFAULT_LIDAR_CONFIG = {
"degPerSweepInc": "2",
"degAngDown": "0",
"degAngDelta": "0",
"numSweepsLevels": "1",
"maxRange": "50.0",
"noise": "0",
"offset_x": "0",
"offset_y": "0",
"offset_z": "0",
"rot_x": "0"
}
GYM_DICT = {
'car': {
'car_name': 'TritonRacer',
'font_size': 50,
'racer_name': 'Triton AI',
'bio': 'Something',
'country': 'US',
'body_style': 'car01',
'body_rgb': [24, 43, 73],
'guid': 'some_random_string'},
# Which is the default connection profile? "local" or "remote"?
'default_connection': 'local',
# default_connection: 'remote'
'local_connection': {
# roboracingleague_1 | generated_track | generated_road | warehouse | sparkfun_avc | waveshare
'scene_name': 'donkey-circuit-launch-track-v0',
# Use "127.0.0.1" for simulator running on local host.
'host': '127.0.0.1',
'port': 9091,
'artificial_latency': 0}, # Ping the remote simulator whose latency you would like to match with, and put the ping in millisecond here.
'remote_connection': {
'scene_name': 'generated_track',
'host': '127.0.0.1', # Use the actual host name for remote simulator.
'port': 9091,
'artificial_latency': 0}, # Besides the ping to the remote simulator, how many MORE delay would you like to add?
'lidar': {
'enabled': False,
'deg_inc': 2, # Degree increment between each ray of the lidar
'max_range': 50.0}, # Max range of the lidar laser
}
class GymInterface(SDClient):
'''Talking to the donkey gym'''
def __init__(self, poll_socket_sleep_time=0.01, gym_config=DEFAULT_GYM_CONFIG):
self.gym_config = DEFAULT_GYM_CONFIG
connection_config = gym_config['local_connection'] if gym_config[
'default_connection'] == 'local' else gym_config['remote_connection']
self.gym_config.update(gym_config['car'])
self.lidar_config = gym_config['lidar']
self.gym_config.update(connection_config)
self.cam_config = gym_config['camera']
self.gym_config.update(self.cam_config)
self.deg_inc = gym_config['lidar']['deg_inc']
self.max_range = gym_config['lidar']['max_range']
self.num_scan_lines = gym_config['lidar']['num_sweeps_levels']
self.lidar_offset_y = gym_config['lidar']['offset_y']
self.lidar_offset_x = gym_config['lidar']['offset_x']
self.lidar_offset_z = gym_config['lidar']['offset_z']
DEFAULT_LIDAR_CONFIG['degPerSweepInc'] = str(self.deg_inc)
DEFAULT_LIDAR_CONFIG['maxRange'] = str(self.max_range)
DEFAULT_LIDAR_CONFIG['numSweepsLevels'] = str(self.num_scan_lines)
DEFAULT_LIDAR_CONFIG['offset_y'] = str(self.lidar_offset_y)
DEFAULT_LIDAR_CONFIG['offset_x'] = str(self.lidar_offset_x)
DEFAULT_LIDAR_CONFIG['offset_z'] = str(self.lidar_offset_z)
SDClient.__init__(
self, self.gym_config['host'], self.gym_config['port'], poll_socket_sleep_time=poll_socket_sleep_time)
self.load_scene(self.gym_config['scene_name'])
self.send_config()
self.last_image = None
self.car_loaded = False
self.latency = self.gym_config['artificial_latency']
self.tele = TelemetryPack()
self.lidar = None
self.hsv = [0, 1, 1]
def step(self, steering, throttle, braking, reset):
# steering = args[0]
# throttle = args[1]
# breaking = args[2]
# if breaking is None:
# breaking = 0.0
# reset = args[3]
self.send_controls(steering, throttle, braking)
if reset:
self.reset_car()
return self.last_image, self.tele, self.lidar, self.tele.pos_x, self.tele.pos_y, self.tele.pos_z, self.tele.speed, self.tele.cte
def onStart(self):
print(
f'CAUTION: Confirm your artificial latency setting: {self.latency}ms.')
def onShutdown(self):
self.stop()
def getName(self):
return 'Gym Interface'
def __try_get(self, dict, key, type_required):
try:
val = dict.get(key)
return type_required(val)
except:
return None
def on_msg_recv(self, json_packet):
if json_packet['msg_type'] == "need_car_config":
self.send_config()
elif json_packet['msg_type'] == "car_loaded":
print('Car loaded.')
self.car_loaded = True
elif json_packet['msg_type'] == "telemetry":
# print(json_packet)
# 1000 for ms -> s
time.sleep(self.gym_config['artificial_latency'] / 1000.0)
imgString = json_packet["image"]
image = Image.open(BytesIO(base64.b64decode(imgString)))
self.last_image = np.asarray(image, dtype=np.uint8)
# Telemetry, New since 21.04.05
to_extract = ['steering_angle', 'throttle', 'speed', 'pos_x',
'pos_y', 'pos_z', 'hit', 'time',
'accel_x', 'accel_y', 'accel_z',
'gyro_x', 'gyro_y', 'gyro_z', 'gyro_w',
'pitch', 'yaw', 'roll',
'cte', 'activeNode', 'totalNodes',
'vel_x', 'vel_y', 'vel_z', 'on_road',
'progress_on_shortest_path']
types = [float, float, float, float,
float, float, str, float,
float, float, float,
float, float, float, float,
float, float, float,
float, int, int,
float, float, float, int,
float]
vals = []
for i in range(len(to_extract)):
vals.append(self.__try_get(
json_packet, to_extract[i], types[i]))
self.tele = TelemetryPack(*tuple(vals))
if "lidar" in json_packet:
self.lidar = json_packet["lidar"]
def send_config(self):
'''
send three config messages to setup car, racer, and camera
'''
print('Sending configs...')
print('Sending racer info')
# Racer info
msg = {'msg_type': 'racer_info',
'racer_name': self.gym_config['racer_name'],
'car_name': self.gym_config['car_name'],
'bio': self.gym_config['bio'],
'country': self.gym_config['country'],
'guid': self.gym_config['guid']}
self.send_now(json.dumps(msg))
time.sleep(2.0)
print('Sending car config')
# Car config
msg = {"msg_type": "car_config",
"body_style": self.gym_config['body_style'],
"body_r": self.gym_config['body_rgb'][0].__str__(),
"body_g": self.gym_config['body_rgb'][1].__str__(),
"body_b": self.gym_config['body_rgb'][2].__str__(),
"car_name": self.gym_config['car_name'],
"font_size": self.gym_config['font_size'].__str__()}
self.send_now(json.dumps(msg))
#this sleep gives the car time to spawn. Once it's spawned, it's ready for the camera config.
time.sleep(2.0)
# Camera config
print('Sending camera config')
msg = {"msg_type": "cam_config",
"fov": self.gym_config['fov'].__str__(),
"fish_eye_x": self.gym_config['fish_eye_x'].__str__(),
"fish_eye_y": self.gym_config['fish_eye_y'].__str__(),
"img_w": self.gym_config['img_w'].__str__(),
"img_h": self.gym_config['img_h'].__str__(),
"img_d": self.gym_config['img_d'].__str__(),
"img_enc": self.gym_config['img_enc'],
"offset_x": self.gym_config['offset_x'].__str__(),
"offset_y": self.gym_config['offset_y'].__str__(),
"offset_z": self.gym_config['offset_z'].__str__(),
"rot_x": self.gym_config['rot_x'].__str__()
}
self.send_now(json.dumps(msg))
print(
f"Gym Interface: Camera resolution ({self.gym_config['img_w']}, {self.gym_config['img_h']}).")
if self.lidar_config['enabled']:
print('Sending LiDAR config')
msg = {'msg_type': "lidar_config"}
msg.update(DEFAULT_LIDAR_CONFIG)
self.send_now(json.dumps(msg))
def send_controls(self, steering, throttle, breaking):
msg = {"msg_type": "control",
"steering": steering.__str__(),
"throttle": throttle.__str__(),
"brake": breaking.__str__()}
self.send(json.dumps(msg))
''' Would you like some RGB?
import colorsys
self.hsv[0] += 0.005
if self.hsv[0] > 1 : self.hsv[0] = 0
rgb = colorsys.hsv_to_rgb(*(tuple(self.hsv)))
msg = { "msg_type" : "car_config",
"body_r" : int(rgb[0] * 255).__str__(),
"body_g" : int(rgb[1] * 255).__str__(),
"body_b" : int(rgb[2] * 255).__str__(),
"body_style" : self.gym_config['body_style'],
"car_name" : self.gym_config['car_name'],
"font_size" : self.gym_config['font_size'].__str__() }
self.send_now(json.dumps(msg))
'''
#this sleep lets the SDClient thread poll our message and send it out.
# time.sleep(self.poll_socket_sleep_sec)
def load_scene(self, scene):
print(f'Loading scene: {scene}')
msg = {"msg_type": "load_scene", "scene_name": scene}
self.send_now(json.dumps(msg))
def reset_car(self):
print('Resetting car...')
msg = {'msg_type': 'reset_car'}
self.send(json.dumps(msg))
def teleport_car(self,x,y,z):
print('Teleporting Car...')
msg = {
"msg_type": "set_position",
"pos_x": str(int(x)),
"pos_y": str(y),
"pos_z": str(int(z)),
}
print(msg)
self.send_now(json.dumps(msg))
def send_camera_config(self, config_dict):
"""
config_dict: a dictionary, e.g.
{"img_w" : 160,
"img_h" : 120,
"img_d" : 3,
"img_enc" : 'JPG',
"offset_x" : 0.0,
"offset_y" : 3,
"offset_z" : 1.0,
"rot_x" : 0.0,}
"""
print("Gym Interface: Sending custom camera config...")
msg = {"msg_type": "cam_config", }
msg.update(config_dict)
self.send_now(json.dumps(msg))
|
"""
Collect log events for completed AWS lambda runs and publish status report
"""
from __future__ import print_function
import base64
import json
import logging
import os
import zlib
import boto3
# message formatting in separate module, perhaps one day we can support customizing
# format through some kind of context-specific formatting hook.
from format_request_events import create_message_subject, create_message_body
def get_env_var(name, default_value = None):
"""Get the value of an environment variable, if defined"""
if name in os.environ:
return os.environ[name]
elif default_value is not None:
return default_value
else:
raise RuntimeError('Required environment variable %s not found' % name)
# Get configuration from environment variables
topic_arn = get_env_var('REPORTING_TOPIC_ARN')
# Configure local logging
log_level = get_env_var('LOG_LEVEL', 'INFO')
log = logging.getLogger()
log.setLevel(log_level)
def decompress_string(value):
"""
Convert base64-encoded, compressed data to a string
"""
data = base64.b64decode(value)
return zlib.decompress(data,47,4096).decode()
def unpack_subscription_event(event):
"""
Convert and parse cloudwatch log subscription event
"""
payload = decompress_string(event['awslogs']['data'])
event = json.loads(payload)
return event
def get_run_events(requestId, context):
"""
Get cloudwatch log events for the specified lambda request
Assumes log events are formatted with the default Lambda log format, i.e. '<level> <timestamp> <requestid> ...".
"""
logs = boto3.client('logs')
log_group_name = context['log_group_name']
log_stream_name = context['log_stream_name']
log_filter = '[level,ts,id=%s,...]' % requestId
results = logs.filter_log_events(
logGroupName=log_group_name,
logStreamNames=[log_stream_name],
filterPattern=log_filter,
interleaved=True)
events = results['events']
# get additional batches
while 'nextToken' in results:
results = logs.filter_log_events(
logGroupName=log_group_name,
logStreamNames=[log_stream_name],
filterPattern=log_filter,
interleaved=True,
nextToken=results['nextToken'])
events.extend(results['events'])
return events
def analyze_run_events(requestId, events, context):
"""
Collect information about request execution from log events.
"""
assert len(events) > 0, "No events found for request %s" % requestId
errors = 0
warnings = 0
startts = 0
endts = 0
for event in events:
if 'message' in event:
message = event['message']
if message.startswith('START'):
startts = event['timestamp']
elif message.startswith('END'):
endts = event['timestamp']
elif message.startswith('[ERROR]'):
errors = errors + 1
elif message.startswith('[WARNING]'):
warnings = warnings + 1
assert startts > 0, "No START event found in request log trace %s" % requestId
assert endts > 0, "No END event found in request log trace %s" % requestId
duration = endts - startts
return { 'start': startts, 'end': endts, 'duration': duration, 'errors': errors, 'warnings': warnings }
def create_topic_message(info, context):
"""
Create a report message for a request execution
"""
subject = create_message_subject(info, context)
defaultMessage = create_message_body(info, context)
# TODO define alternate messages for other protocols, e.g. SMS
return (subject, defaultMessage)
def publish_run_info(info, context):
"""
Publish job execution report to SNS topic.
If context.dry_run is True, dumps subject and message to stdout instead of
publishing to the topic.
"""
(subject,message) = create_topic_message(info, context)
if info['errors'] > 0:
status = 'error'
elif info['warnings'] > 0:
status = 'warning'
else:
status = 'success'
attributes = {
'function': {
'DataType': 'String',
'StringValue': context['function_name']
},
'status': {
'DataType': 'String',
'StringValue': status
},
'errors': {
'DataType': 'String',
'StringValue': str(info['errors'])
},
'warnings': {
'DataType': 'String',
'StringValue': str(info['warnings'])
},
}
print_level = logging.INFO if context['dry_run'] else logging.DEBUG
log.log(print_level, "SUBJECT: %s", subject)
log.log(print_level, "ATTRIBUTES:\n%s", json.dumps(attributes))
log.log(print_level, "BODY\n%s", message)
if context['dry_run']:
# return dummy publish response
response = { 'MessageId': '12345' }
else:
# publish to topic
sns = boto3.client('sns')
response = sns.publish(
TopicArn=topic_arn,
Subject=subject,
Message=message,
MessageAttributes=attributes)
log.info('Published message %s to target topic %s', response['MessageId'], topic_arn)
return response
def process_lambda_run(requestId, context):
"""
Process CloudWatch log events for a lambda function run
"""
log.debug('Processing log events for %s request %s', context['function_name'], requestId)
events = get_run_events(requestId, context)
log.debug('Found %d events', len(events))
info = analyze_run_events(requestId, events, context)
publish_run_info(dict(info,
requestId=requestId,
events=events),
context)
def get_request_ids(events, context):
"""
Get request IDs from a set of lambda log events
"""
ids = []
for event in events:
if ('extractedFields' in event):
fields = event['extractedFields']
if 'type' in fields and fields['type'] == 'END' and 'requestId' in fields:
ids.append(fields['requestId'])
# should always be at least one END event
assert len(ids) > 0, "No END events found in message stream."
# shouldn't be any dupes
assert len(ids) == len(set(ids)), "Found duplicate request ids"
return ids
def process_lambda_events(events, context):
"""
Process a set of Lambda log events, running `process_lambda_run` for each END event.
It's possible that the log subscription could be configured to send all events
for a particular run to the handler, but I haven't seen anything that guarantees this. So
for now we only look at END requests, then explicitly collect all the others through a
filter_log_events query. It's highly recommended to add a filter to the log subscription
that only looks at 'END' events, to avoid including other request events that will only be
discarded here.
"""
ids = get_request_ids(events, context)
log.debug("Processing events for %d runs", len(ids))
for request_id in ids:
process_lambda_run(request_id, context)
def get_lambda_tags(function_name):
"""
Get all tags and values associated with the specified function name.
"""
return { }
#
# lambda entry point
#
def lambda_handler(handler_event, handler_context):
"""
Process a CloudWatch Log trigger.
"""
dry_run = os.getenv('DRY_RUN', 'false').lower() == 'true'
subscription_event = unpack_subscription_event(handler_event)
log.debug('Event data: %s', json.dumps(subscription_event))
log_group_name = subscription_event['logGroup']
log_stream_name = subscription_event['logStream']
if log_group_name.startswith('/aws/lambda/'):
function_name = log_group_name[12:]
else:
raise RuntimeError('Log group %s is not a lambda' % log_group_name)
display_name = get_lambda_tags(function_name).get('DISPLAY_NAME', function_name)
# set up handler context
context = {
'log_group_name': log_group_name,
'log_stream_name': log_stream_name,
'function_name': function_name,
'display_name': display_name,
'dry_run': dry_run
}
process_lambda_events(subscription_event['logEvents'], context)
return 'Mischief managed.'
|
"""用来把正方形的图片割成圆形带黑色边框有透明背景的图,省得一张一张搞PS"""
from os import system
from PIL import Image as Image
def transparent(img: Image, W: int, H: int, L: int):
if W != H: raise Exception('这张图片不是正方形的!')
R = W / 2
for x in range(W):
if x % 10 == 0: print('{0:.2f}%\r'.format((x + 1) / W * 100), end='')
for y in range(H):
if (x - R) ** 2 + (y - R) ** 2 > R ** 2:
img.putpixel((x, y), (255, 255, 255, 0))
elif (x - R) ** 2 + (y - R) ** 2 > (R - L * R) ** 2:
img.putpixel((x, y), (0, 0, 0, 255))
print('100.00%')
if __name__ == '__main__':
src_path = './Haruka-3a.png'
save_path = ['./Haruka-3', '.png']
L = 0.02
img = Image.open(src_path)
img = img.convert('RGBA')
W, H = img.size
transparent(img, W, H, L)
img.save(save_path[0] + save_path[1])
# 以下内容是为了把图变小但对png似乎反而会变大,算了,直接存吧
# img.save(save_path[0] + '-tmp' + save_path[1])
# system(f'ffmpeg -i "{save_path[0] + "-tmp" + save_path[1]}" -pred mixed "{save_path[0] + save_path[1]}"') # 过一遍ffmpeg把图变小点
# system(f'del "{save_path[0] + "-tmp" + save_path[1]}"') # 删除临时文件
|
import re
from typing import Iterable, List, Optional, Set
from prefixdate import parse_formats
NUMBERS = re.compile("\d+")
def extract_years(text: str, default: Optional[str] = None) -> Set[str]:
"""Try to locate year numbers in a string such as 'circa 1990'. This will fail if
any numbers that don't look like years are found in the string, a strong indicator
that a more precise date is encoded (e.g. '1990 Mar 03')."""
years: Set[str] = set()
for match in NUMBERS.finditer(text):
year = match.group()
number = int(year)
if 1800 >= number <= 2100:
if default is not None:
return set([default])
return set()
years.add(year)
return years
def parse_date(
text: Optional[str], formats: Iterable[str], default: Optional[str] = None
) -> Iterable[str]:
"""Parse a date two ways: first, try and apply a set of structured formats and
return a partial date if any of them parse correctly. Otherwise, apply `extract_years`
on the remaining string."""
if text is None:
return []
parsed = parse_formats(text, formats)
if parsed.text is not None:
return [parsed.text]
default = default or text
return extract_years(text, default)
|
# -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
pkg_name = 'radpress'
version = __import__(pkg_name).__version__
PROJECT_DIR = os.path.dirname(__file__)
# get requires from requirements/global.txt file.
requires_file_name = os.path.join(PROJECT_DIR, 'requirements', 'global.txt')
with open(requires_file_name) as install_requires:
install_requires = map(lambda x: x.strip(), install_requires.readlines())
setup(
name=pkg_name,
version=version,
description='Simple reusable blog application',
long_description=open(os.path.join(PROJECT_DIR, 'README.rst')).read(),
author=u'Gökmen Görgen',
author_email='gokmen@radity.com',
license='MIT',
url='https://github.com/gkmngrgn/radpress',
packages=find_packages(exclude=['venv', 'demo', 'docs']),
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
classifiers=[
'Topic :: Software Development :: Libraries :: Python Modules',
'Framework :: Django',
'Environment :: Web Environment',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
]
)
|
## Number of People in the Bus
## 7 kyu
## https://www.codewars.com/kata/5648b12ce68d9daa6b000099
def number(bus_stops):
# Good Luck!
people_on_bus = 0
for stop in bus_stops:
people_on_bus += stop[0]
for stop in bus_stops:
people_on_bus -= stop[1]
return people_on_bus
|
from django.contrib import admin
from base.admin import PhotoAdminAbtract
from kendama.forms import KendamaForm
from kendama.models import KendamaTrick, TrickPlayer, Combo, ComboPlayer, Kendama, ComboTrick, Ladder, LadderCombo
class BaseKendamaAdmin(admin.ModelAdmin):
list_display = ('name', 'creator', 'difficulty', 'created_at')
list_filter = ('difficulty',)
search_fields = ('name', 'creator__user__username')
date_hierarchy = 'created_at'
autocomplete_fields = ('creator',)
list_select_related = ('creator',)
def get_changeform_initial_data(self, request):
return {'creator': request.user.profil}
class TrickPlayerInline(admin.TabularInline):
model = TrickPlayer
@admin.register(KendamaTrick)
class KendamaTrickAdmin(BaseKendamaAdmin):
inlines = (TrickPlayerInline,)
class ComboTrickInline(admin.TabularInline):
model = ComboTrick
show_change_link = True
class ComboPlayerInline(admin.TabularInline):
model = ComboPlayer
@admin.register(Combo)
class ComboAdmin(BaseKendamaAdmin):
inlines = (ComboTrickInline, ComboPlayerInline)
@admin.register(ComboTrick)
class ComboTrickAdmin(admin.ModelAdmin):
list_display = ('combo', 'trick', 'order')
list_select_related = ('combo', 'trick')
class BasePlayerFrequencyAdmin(admin.ModelAdmin):
list_filter = ('frequency',)
search_fields = ('trick__name', 'player__username')
date_hierarchy = 'created_at'
autocomplete_fields = ('trick', 'player')
def get_changeform_initial_data(self, request):
return {'player': request.user.profil}
@admin.register(TrickPlayer)
class TrickPlayerAdmin(BasePlayerFrequencyAdmin):
list_display = ('id', 'trick', 'player', 'frequency', 'created_at')
autocomplete_fields = ('trick', 'player')
list_select_related = ('trick', 'player')
@admin.register(ComboPlayer)
class ComboPlayerAdmin(BasePlayerFrequencyAdmin):
list_display = ('id', 'combo', 'player', 'frequency', 'created_at')
autocomplete_fields = ('combo', 'player')
list_select_related = ('combo', 'player')
@admin.register(Kendama)
class KendamaAdmin(PhotoAdminAbtract):
list_display = ('thumbnail', 'name', 'owner', 'created_at')
search_fields = ('name', 'owner__username')
list_select_related = ('owner',)
form = KendamaForm
def get_changeform_initial_data(self, request):
return {'owner': request.user.profil}
class LadderComboInline(admin.TabularInline):
model = LadderCombo
show_change_link = True
@admin.register(Ladder)
class LadderAdmin(BaseKendamaAdmin):
inlines = (LadderComboInline,)
|
from comet_ml import Experiment
import torch
import torch.nn as nn
import torch.optim as optim
from torch.backends import cudnn as cudnn
from torch.utils.data import TensorDataset, DataLoader, Subset, random_split
from Typhoon.utils.dataset import load_har
from Typhoon.core.model import Typhoon
from Typhoon.utils.trainer import NeuralNetworkClassifier
torch.manual_seed(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
(x_train, y_train), (x_test, y_test) = load_har(True)
y_train -= 1
y_test -= 1
y_train = y_train.flatten()
y_test = y_test.flatten()
x_train = torch.tensor(x_train).float()
x_test = torch.tensor(x_test).float()
y_train = torch.tensor(y_train).long()
y_test = torch.tensor(y_test).long()
train_val_ds = TensorDataset(x_train, y_train)
test_ds = TensorDataset(x_test, y_test)
n_samples = len(train_val_ds)
train_size = int(n_samples * 0.7)
train_idx = list(range(0, train_size))
val_idx = list(range(train_size, n_samples))
train_ds = Subset(train_val_ds, train_idx)
val_ds = Subset(train_val_ds, val_idx)
train_loader = DataLoader(train_ds, batch_size=128, shuffle=False)
val_loader = DataLoader(val_ds, batch_size=128, shuffle=False)
test_loader = DataLoader(test_ds, batch_size=128, shuffle=False)
in_feature = 9
seq_len = 128
n_heads = 32
factor = 32
num_class = 6
clf = NeuralNetworkClassifier(
Typhoon(in_feature, seq_len, n_heads, factor, num_class, num_layers=6, d_model=128, dropout_rate=0.2),
nn.CrossEntropyLoss(),
optim.Adam, {"lr": 0.000001, "betas": (0.9, 0.98), "eps": 4e-09, "weight_decay": 5e-4}, Experiment()
)
clf.experiment_tag = "har_dataset"
clf.num_class = num_class
clf.fit(
{"train": train_loader,
"val": val_loader},
epochs=200
)
clf.evaluate(test_loader)
clf.confusion_matrix(test_ds)
clf.save_to_file("save_params_test/")
|
# -*- coding: utf-8 -*-
"""
@author: 2series
"""
def brute_force_cow_transport(cows,limit=10):
"""
Finds the allocation of cows that minimizes the number of spaceship trips
via brute force. The brute force algorithm should follow the following method:
1. Enumerate all possible ways that the cows can be divided into separate trips
2. Select the allocation that minimizes the number of trips without making any trip
that does not obey the weight limitation
Does not mutate the given dictionary of cows.
Parameters:
cows - a dictionary of name (string), weight (int) pairs
limit - weight limit of the spaceship (an int)
Returns:
A list of lists, with each inner list containing the names of cows
transported on a particular trip and the overall list containing all the
trips
"""
# TODO: Your code here
copyCows, minLen, temp = cows.copy(), 10, []
sorted_x = sorted(copyCows.items(), key=lambda x: x[1], reverse=True) #sort dict as list of tuples
for item in (get_partitions(sorted_x)):
for x in item:
n = len(x)
aye = (sum(int(x[y][1]) for y in range(n)))
if n < minLen and aye == limit:
finalList = []
for i in item:
finalList.append(list(list(zip(*i))[0]))
return finalList
minLen = n
elif n < minLen and aye <= limit:
if len(temp)-1 <= len(item) and (x not in temp):
temp.append(x)
if len(temp)-1 == len(item):
finalList = []
for i in temp:
finalList.append(list(list(zip(*i))[0]))
return finalList
#print(brute_force_cow_transport({'Milkshake': 40, 'MooMoo': 50, 'Lotus': 40, \
# 'Miss Bella': 25, 'Horns': 25, 'Boo': 20}, 100))
#[Out]: [['MooMoo', 'Horns', 'Miss Bella'], ['Milkshake', 'Lotus', 'Boo']]
#print(brute_force_cow_transport({'Betsy': 65, 'Buttercup': 72, 'Daisy': 50}, 75))
#[Out]: [['Buttercup'], ['Daisy'], ['Betsy']]
|
#!/usr/bin/env python
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import super
from future import standard_library
standard_library.install_aliases()
import pika
import json
from pymongo import MongoClient
from leopold.consumer import Consumer, logger
class StdouterrConsumer(Consumer):
def __init__(self, amqp_url, exchange, exchange_type, queue, routing_key,
mongodb_url, mongodb_name):
super(StdouterrConsumer, self).__init__(amqp_url, exchange,
exchange_type,
queue, routing_key)
self._mongodb_url = mongodb_url
self._mongodb_name = mongodb_name
self._client = MongoClient(self._mongodb_url)
self._db = self._client[self._mongodb_name]
self._col = self._db['stdouterr']
self._col.ensure_index([('job_id', 1), ('datetime', 1)])
def execute_callback(self, ch, method, properties, body):
logger.info("body: %s" % body)
# save results in mongodb
self._col.insert(json.loads(body))
def __del__(self): self._client.close()
if __name__ == "__main__":
amqp_url = "amqp://guest:guest@localhost:5672/%2F"
exchange = ""
exchange_type = "direct"
queue = "stdouterr"
routing_key = "stdouterr"
mongodb_url = "mongodb://localhost/"
mongodb_name = "mozart"
status_worker = StdouterrConsumer(amqp_url, exchange, exchange_type,
queue, routing_key, mongodb_url,
mongodb_name)
try:
status_worker.run()
except KeyboardInterrupt:
status_worker.stop()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.