max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
app/services/crud_service.py | palazzem/gello | 44 | 12772951 | <reponame>palazzem/gello<filename>app/services/crud_service.py
# -*- coding: utf-8 -*-
#
# Unless explicitly stated otherwise all files in this repository are licensed
# under the Apache 2 License.
#
# This product includes software developed at Datadog
# (https://www.datadoghq.com/).
#
# Copyright 2018 Datadog, Inc.
#
"""crud_service.py
Service-helpers for creating and updating data.
"""
class CRUDService(object):
"""CRUD persistent storage service.
An abstract class for creating and mutating data.
"""
def create(self):
"""Creates and persists a new record to the database."""
pass
def update(self):
"""Updates a persisted record."""
pass
def delete(self):
"""Deletes a persisted record."""
pass
| 1.828125 | 2 |
.circleci/regenerate.py | anandj91/vision | 9 | 12772952 | <filename>.circleci/regenerate.py
#!/usr/bin/env python
import jinja2
import os.path
d = os.path.dirname(__file__)
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(d),
lstrip_blocks=True,
autoescape=False,
)
with open(os.path.join(d, 'config.yml'), 'w') as f:
f.write(env.get_template('config.yml.in').render())
| 2.21875 | 2 |
waves_gateway/service/transaction_service_converter_proxy_impl.py | NeolithEra/WavesGatewayFramework | 25 | 12772953 | <reponame>NeolithEra/WavesGatewayFramework
"""
AssetTransactionServiceConverterProxyImpl
"""
from typing import Optional, cast
from waves_gateway.common import Injectable
from waves_gateway.model import Transaction, TransactionAttempt
from waves_gateway.service.token import COIN_TRANSACTION_SERVICE, COIN_INTEGER_CONVERTER_SERVICE, \
ASSET_INTEGER_CONVERTER_SERVICE
from .attempt_list_converter_service import AttemptListConverterService
from .transaction_service import TransactionService
from .integer_converter_service import IntegerConverterService
from .asset_transaction_service_impl import AssetTransactionServiceImpl
from .token import COIN_TRANSACTION_SERVICE_CONVERTER_PROXY, WAVES_TRANSACTION_SERVICE_CONVERTER_PROXY
@Injectable(
COIN_TRANSACTION_SERVICE_CONVERTER_PROXY,
deps=[COIN_INTEGER_CONVERTER_SERVICE, COIN_TRANSACTION_SERVICE, AttemptListConverterService])
@Injectable(
WAVES_TRANSACTION_SERVICE_CONVERTER_PROXY,
deps=[ASSET_INTEGER_CONVERTER_SERVICE, AssetTransactionServiceImpl, AttemptListConverterService])
class TransactionServiceConverterProxyImpl(TransactionService):
"""
Reverts the amount conversion before overhanding the TransactionAttempt instance
to the given TransactionService.
"""
def __init__(self, integer_converter_service: IntegerConverterService, transaction_service: TransactionService,
attempt_list_converter_service: AttemptListConverterService) -> None:
self._integer_converter_service = integer_converter_service
self._transaction_service = transaction_service
self._attempt_list_converter_service = attempt_list_converter_service
def send_coin(self, attempt: TransactionAttempt, secret: Optional[str]) -> Transaction:
converted_attempt = self._attempt_list_converter_service.revert_attempt_conversion(attempt)
transaction = self._transaction_service.send_coin(converted_attempt, secret)
return self._integer_converter_service.convert_transaction_to_int(transaction)
| 2.0625 | 2 |
src/icolos/config_containers/container.py | jharrymoore/Icolos | 11 | 12772954 | <gh_stars>10-100
import abc
import json
import os
class ConfContainer(object, metaclass=abc.ABCMeta):
@abc.abstractmethod
def __init__(self, conf):
# get instance of configuration enum and load configuration
# parameter "config" can be a string, a path or a dictionary (as long as it holds valid JSON input)
if isinstance(conf, str):
if os.path.isfile(conf):
with open(conf) as file:
conf = file.read().replace("\r", "").replace("\n", "")
conf = json.loads(conf)
self._conf = conf
def get_as_dict(self):
return self._conf
def get(self, key, default=None):
return self._conf.get(key, default)
def __getitem__(self, item):
return self.get_as_dict()[item]
def get_as_string(self):
return json.dumps(self._conf)
def validate(self):
raise NotImplementedError(
"This functions needs to be implemented by child classes."
)
| 3.125 | 3 |
vocabulary/models.py | ketzu/klang-api | 0 | 12772955 | <reponame>ketzu/klang-api
from datetime import datetime, timezone
from django.db import models
# Create your models here.
from users.models import User
class Vocable(models.Model):
vocab = models.CharField(max_length=200)
translation = models.CharField(max_length=200)
def __str__(self):
return f"{self.vocab} ({self.translation})"
class StudiedVocable(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
vocab = models.ForeignKey(Vocable, on_delete=models.CASCADE)
last_studied = models.DateTimeField(blank=True)
correct_studied = models.IntegerField(default=0)
class Meta:
unique_together = ('user', 'vocab')
def __str__(self):
if self.last_studied is not None:
timediff = (datetime.now(timezone.utc) - self.last_studied)
if timediff.days >= 1:
return f"{self.vocab.vocab} ({timediff.days} days ago)"
else:
return f"{self.vocab.vocab} ({timediff.total_seconds()/3600:.1f} hours ago)"
return f"{self.vocab.vocab} (unstudied)"
class Set(models.Model):
name = models.CharField(max_length=200)
vocabs = models.ManyToManyField(Vocable, related_name='sets', blank=True)
def __str__(self):
return self.name
| 2.65625 | 3 |
spacegraphcats/cdbg/label_cdbg.py | mogproject/spacegraphcats | 0 | 12772956 | #! /usr/bin/env python
"""
Build an index that can be used to retrieve individual reads or contigs
by cDBG node ID; produce a SQLite database for fast retrieval.
Briefly, this script creates a sqlite database with a single table,
'sequences', where a query like this:
SELECT DISTINCT sequences.offset FROM sequences WHERE label ...
can be executed to return the offset of all sequences with the given
label. Here, 'label' is the cDBG ID to which the sequence belongs.
The script extract_reads_by_frontier_sqlite.py is a downstream script to
extract the reads with a frontier search.
Specifically,
* walk through the contigs assembled from the cDBG;
* build a DBG cover using khmer tags, such that every k-mer in the DBG
is within distance d=40 of a tag;
* label each tag with the cDBG node ID from the contig;
* save for later use.
"""
import sys
import os
import argparse
import screed
import khmer
import collections
import sqlite3
from spacegraphcats.search import search_utils
# graph settings
DEFAULT_KSIZE = 31
DEFAULT_MEMORY = 1e9
def main(argv=sys.argv[1:]):
p = argparse.ArgumentParser()
p.add_argument('catlas_prefix', help='catlas prefix')
p.add_argument('reads')
p.add_argument('savename')
p.add_argument('-k', '--ksize', default=DEFAULT_KSIZE, type=int)
p.add_argument('-M', '--memory', default=DEFAULT_MEMORY,
type=float)
args = p.parse_args(argv)
dbfilename = args.savename
if os.path.exists(dbfilename):
print('removing existing db {}'.format(dbfilename))
os.unlink(dbfilename)
db = sqlite3.connect(dbfilename)
cursor = db.cursor()
cursor.execute('CREATE TABLE sequences (offset INTEGER, label INTEGER)');
db.commit()
# @CTB support different sizes.
graph_tablesize = int(args.memory * 8.0 / 4.0)
ng = khmer.Nodegraph(args.ksize, graph_tablesize, 4)
basename = os.path.basename(args.catlas_prefix)
contigfile = os.path.join(args.catlas_prefix, "contigs.fa.gz")
total_bp = 0
watermark_size = 1e6
watermark = watermark_size
print('walking catlas cDBG contigs: {}'.format(contigfile))
n = 0
tags_to_label = collections.defaultdict(int)
for contig in screed.open(contigfile):
n += 1
if total_bp >= watermark:
print('... {:5.2e} bp thru contigs'.format(int(watermark)),
file=sys.stderr)
watermark += watermark_size
total_bp += len(contig.sequence)
if len(contig.sequence) < args.ksize:
continue
cdbg_id = int(contig.name)
ng.consume_and_tag(contig.sequence)
tags = ng.get_tags_for_sequence(contig.sequence)
for t in tags:
tags_to_label[t] = cdbg_id
###
total_bp = 0
watermark_size = 1e7
watermark = watermark_size
print('walking read file: {}'.format(args.reads))
n = 0
cursor.execute('PRAGMA cache_size=1000000')
cursor.execute('PRAGMA synchronous = OFF')
cursor.execute('PRAGMA journal_mode = MEMORY')
# some sqlite installs start in transactions
try:
cursor.execute('BEGIN TRANSACTION')
except sqlite3.OperationalError:
pass
reader = search_utils.BgzfReader(args.reads)
for record, offset in search_utils.iterate_bgzf(reader):
n += 1
if total_bp >= watermark:
print('... {:5.2e} bp thru reads'.format(int(watermark)),
file=sys.stderr)
watermark += watermark_size
total_bp += len(record.sequence)
if len(record.sequence) < args.ksize:
continue
tags = ng.get_tags_for_sequence(record.sequence)
labels = set([ tags_to_label[t] for t in tags ])
for lb in labels:
cursor.execute('INSERT INTO sequences (offset, label) VALUES (?, ?)', (offset, lb))
db.commit()
db.close()
print('done!')
return 0
if __name__ == '__main__':
sys.exit(main())
| 2.828125 | 3 |
RunJobArgo.py | GNUBrinux/pilot | 13 | 12772957 | # Class definition:
# RunJobMira
# [Add description here]
# Instances are generated with RunJobFactory via pUtil::getRunJob()
# Implemented as a singleton class
# http://stackoverflow.com/questions/42558/python-and-the-singleton-pattern
# Import relevant python/pilot modules
from RunJobHPC import RunJobHPC # Parent RunJobHPC class
import Site, pUtil, Job, Node, RunJobUtilities
from pUtil import tolog, isAnalysisJob, readpar, getExperiment
from FileStateClient import updateFileStates, dumpFileStates
from ErrorDiagnosis import ErrorDiagnosis # import here to avoid issues seen at BU with missing module
from PilotErrors import PilotErrors
from datetime import datetime
from MessageInterface import MessageInterface
from ArgoJob import ArgoJob, ArgoJobStatus
from BalsamJob import BalsamJob
from SiteInformation import SiteInformation
# Standard python modules
import os, sys, commands, time, optparse, shlex, stat
import traceback
import atexit, signal
class RunJobArgo(RunJobHPC):
# private data members
__runjob = "RunJobArgo" # String defining the sub class
__instance = None # Boolean used by subclasses to become a Singleton
__error = PilotErrors() # PilotErrors object
# public data members
process = ""# zjet, wjet, wqq, wcjet, etc.
base_filename = "alpout" # should be the same as in the input cards
# controls for warmup
warmup_phase0_number_events = None
warmup_phase0_number_iterations = None
warmup_phase1_number_events = None
warmup_wall_minutes = None
warmup_preprocess = 'alpgen_warmup_presubmit.sh'
warmup_preprocess_args = None
# controls for event generation (weighted gen + unweighting)
evtgen_phase0_number_events = None
evtgen_phase0_number_iterations = None
evtgen_phase1_number_events = None
evtgen_nodes = None
evtgen_processes_per_node = None
evtgen_wall_minutes = None
evtgen_executable = 'alpgenCombo.sh'
evtgen_scheduler_args = '--mode=script'
evtgen_preprocess = 'alpgen_presubmit.sh'
evtgen_postprocess = 'alpgen_postsubmit.sh'
working_path = None
input_url = None
output_url = None
pdf_filename = 'cteq6l1.tbl'
username = None
serial_site = 'argo_cluster'
parallel_site = None
group_identifier = None
athena_input_card_executable = 'get_alpgen_input_card.py'
athena_postprocess = 'alpgen_create_input_cards.py'
athena_postprocess_log = 'alpgen_create_input_cards.log'
ecm = None
run_number = None
job_config = None
evgen_job_opts = None
athena_input_card_name = 'input_card.mode_1.dat' # card output by Generate_trf
grid_ftp_server = 'atlasgridftp02.hep.anl.gov'
grid_ftp_protocol = 'gsiftp://'
job_working_path = '/grid/atlas/hpc/argo/jobs'
argo_job = []
# Required methods
def __init__(self):
""" Default initialization """
pass
def __new__(cls, *args, **kwargs):
""" Override the __new__ method to make the class a singleton """
if not cls.__instance:
cls.__instance = super(RunJobHPC, cls).__new__(cls, *args, **kwargs)
return cls.__instance
def getRunJob(self):
""" Return a string with the experiment name """
return self.__runjob
def getRunJobFileName(self):
""" Return the filename of the module """
return super(RunJobArgo, self).getRunJobFileName()
# def argumentParser(self): <-- see example in RunJob.py
def allowLoopingJobKiller(self):
""" Should the pilot search for looping jobs? """
# The pilot has the ability to monitor the payload work directory. If there are no updated files within a certain
# time limit, the pilot will consider the as stuck (looping) and will kill it. The looping time limits are set
# in environment.py (see e.g. loopingLimitDefaultProd)
return False
def get_argo_job(self, job):
##-----------------------
# create argo job
##-----------------------
argo_job = ArgoJob()
argo_job.input_url = None #self.GRID_FTP_PROTOCOL + self.GRID_FTP_SERVER + self.job_path
if self.input_url is not None:
argo_job.input_url = self.input_url
argo_job.output_url = self.grid_ftp_protocol + self.grid_ftp_server + self.job_path
if self.output_url is not None:
argo_job.output_url = self.output_url
argo_job.username = self.username
argo_job.group_identifier = self.group_identifier
##-----------------------
# create get alpgen input cards balsam job
##-----------------------
input_file_imode0 = self.base_filename + '.input.0'
input_file_imode1 = self.base_filename + '.input.1'
input_file_imode2 = self.base_filename + '.input.2'
input_cards_job = BalsamJob()
input_cards_job.executable = self.athena_input_card_executable
input_cards_job.executable_args = ('-e ' + self.ecm
+ ' -r ' + self.run_number
+ ' -o ' + self.job_config
+ ' -j ' + self.evgen_job_opts)
input_cards_job.output_files = [input_file_imode0,
input_file_imode1,
input_file_imode2,
self.athena_postprocess_log]
input_cards_job.nodes = 1
input_cards_job.processes_per_node = 1
input_cards_job.wall_minutes = 0 # running on condor cluster so does not need time
input_cards_job.username = self.username
input_cards_job.target_site = self.serial_site
input_cards_job.postprocess = self.athena_postprocess
input_cards_job.postprocess_args = (' -i ' + self.athena_input_card_name + ' -p ' + self.process
+ ' -n ' + str(self.evtgen_phase1_number_events)
+ ' --log-filename=' + str(self.athena_postprocess_log))
if self.warmup_phase0_number_events is not None:
input_cards_job.postprocess_args += ' --wmp-evts-itr=' + str(self.warmup_phase0_number_events)
if self.warmup_phase0_number_iterations is not None:
input_cards_job.postprocess_args += ' --wmp-nitr=' + str(self.warmup_phase0_number_iterations)
if self.warmup_phase1_number_events is not None:
input_cards_job.postprocess_args += ' --wmp-evts=' + str(self.warmup_phase1_number_events)
argo_job.add_job(input_cards_job)
##-----------------------
# create warm-up job
##-----------------------
# create grid filenames
grid1 = self.base_filename + '.grid1'
grid2 = self.base_filename + '.grid2'
# create warmup balsam job
warmup = BalsamJob()
warmup.executable = self.process + 'gen90_mpi'
warmup.executable_args = input_file_imode0
warmup.input_files = [input_file_imode0]
warmup.output_files = [grid1,grid2]
warmup.nodes = 1
warmup.processes_per_node = 1
warmup.wall_minutes = 0 # running on condor cluster so does not need time
warmup.username = self.username
warmup.target_site = self.serial_site
warmup.preprocess = self.warmup_preprocess
argo_job.add_job(warmup)
##-----------------------
# create event generation job
##-----------------------
# create executable
alpgen_exe = self.process + 'gen90_mpi_ramdisk_nomrstpdfs'
if 'argo_cluster' in self.parallel_site: # no ramdisk needed on argo_cluster
alpgen_exe = self.process + 'gen90_mpi'
# create filenames
unw = self.base_filename + '.unw.gz'
unw_par = self.base_filename + '_unw.par'
wgt = self.base_filename + '.wgt'
wgt_par = self.base_filename + '.par'
directoryList_before = 'directoryList_before.txt'
directoryList_after = 'directoryList_after.txt'
# create event gen balsam job
evtgen = BalsamJob()
evtgen.executable = self.evtgen_executable
evtgen.executable_args = (alpgen_exe + ' ' + input_file_imode1 + ' '
+ input_file_imode2 + ' ' + str(self.evtgen_processes_per_node))
evtgen.input_files = [grid1,
grid2,
input_file_imode1,
input_file_imode2]
evtgen.output_files = [unw,
unw_par,
directoryList_before,
directoryList_after,
self.evtgen_postprocess + '.out',
self.evtgen_postprocess + '.err',
]
evtgen.preprocess = self.evtgen_preprocess
evtgen.postprocess = self.evtgen_postprocess
evtgen.postprocess_args = self.base_filename
evtgen.nodes = self.evtgen_nodes
evtgen.processes_per_node = self.evtgen_processes_per_node
evtgen.wall_minutes = self.evtgen_wall_minutes
evtgen.username = self.username
evtgen.scheduler_args = self.evtgen_scheduler_args
evtgen.target_site = self.parallel_site
argo_job.add_job(evtgen)
return argo_job
def setup(self, job, jobSite, thisExperiment):
""" prepare the setup and get the run command list """
# start setup time counter
t0 = time.time()
ec = 0
# split up the job parameters to be able to loop over the tasks
jobParameters = job.jobPars.split("\n")[0]
jobTrf = job.trf.split("\n")[0]
parser = optparse.OptionParser(description=' program to submit alpgen jobs like a pilot')
parser.add_option('-p','--process',dest='process',help='Alpgen Process, i.e. zjet, wjet, wqq, etc.')
parser.add_option('-n','--nevts',dest='nevts',help='Number of weighted events requested in input file for weighted event generation',type='int')
parser.add_option('-g','--group-id',dest='group_identifier',help='User specified string that helps the user group jobs together.')
parser.add_option('-e','--ecm',dest='ecm',help='Center of Mass Energy.')
parser.add_option('-r','--run-number',dest='run_number',help='Run Number')
parser.add_option('-c','--jobConfig',dest='jobConfig',help='Job Options that will used from the Job Config tarball, i.e. MC12JobOptions/MC12.<Run Number>.<description>.py')
parser.add_option('-j','--evgenJobOpts',dest='evgenJobOpts',help='Job Config tarball, i.e. MC12JobOpts-XX-YY-ZZ.tar.gz')
parser.add_option('','--dev',dest='dev',help='For development only.',action='store_true',default=False)
parser.add_option('-q','--status-queue',dest='enable_status_queue',help='Enable the setting of the message queue parameter in the ArgoJob, which means ARGO will not send message updates for this job to the queue with its job ID.',action='store_true',default=False)
#parser.add_option('-a','--warmup-evts',dest='warmup_evts',help='For Warmup Step: Three numbers seperated by commas giving the number of events per iteration, number of iterations, and final number of events to generate. Example: "10000,10,1000000"')
parser.add_option('-b','--evtgen-evts',dest='evtgen_evts',help='For Event Generation Step: The number of events to generation in the event generation step. The ouput of unweighted events tends to be less so request more than you want. For example W+0jets gives you 70\%, W+1jet gives you 16%, W+2jet gives you 5%, W+3jet gives you 1%, and so on.', type='int')
parser.add_option('-o','--num-nodes',dest='numnodes',help='number of nodes to use on destination machine',type='int')
parser.add_option('-u','--ranks-per-node',dest='ranks_per_node',help='number of MPI ranks per node to use on destination machine',type='int')
parser.add_option('-t','--wall-time',dest='walltime',help='The wall time to submit to the queue in minutes.',type='int')
parser.add_option('-s','--site',dest='site',help='Balsam site name on which to run the event generation')
parser.add_option('-x','--no-submit',dest='submit',help='do not submit the message to ARGO. For testing purposes.',action='store_false',default=True)
parser.add_option('','--wmp-evts-itr',dest='wm_evts_per_itr',help='Warmup: Number of weighted events per interation.')
parser.add_option('','--wmp-nitr',dest='wm_nitr',help='Warmup: Number of iterations')
parser.add_option('','--wmp-evts',dest='wm_evts',help='Warmup: Number of final events to produce.')
try:
options, args = parser.parse_args(shlex.split(jobParameters))
except:
ec = self.__error.ERR_SETUPFAILURE
job.pilotErrorDiag = "Failure to parse job arguments"
tolog("Failure to parse job arguments for ARGO job")
return ec, job
tolog("ARGO job will be launched with next parameters: %s" % jobParameters)
self.process = options.process
self.username = 'pilot, %s' % job.prodUserID[:120] #os.environ['USER']
self.group_identifier = options.group_identifier
self.ecm = options.ecm
self.run_number = options.run_number
self.job_config = options.jobConfig
self.evgen_job_opts = options.evgenJobOpts
self.warmup_phase0_number_events = options.wm_evts_per_itr
self.warmup_phase0_number_iterations = options.wm_nitr
self.warmup_phase1_number_events = options.wm_evts
self.evtgen_phase1_number_events = options.evtgen_evts
self.evtgen_nodes = options.numnodes
self.evtgen_processes_per_node = options.ranks_per_node
self.evtgen_wall_minutes = options.walltime
self.parallel_site = options.site
self.dev = options.dev
self.job_path = os.path.join(self.job_working_path,job.jobId)
tolog("ARGO job path: %s" % self.job_path)
self.argo_job = self.get_argo_job(job)
if options.dev:
job.serial_site = 'argo_cluster_dev'
# verify that the multi-trf job is setup properly
os.chdir(jobSite.workdir)
tolog("Current job workdir is %s" % os.getcwd())
job.timeSetup = int(time.time() - t0)
tolog("Total setup time: %d s" % (job.timeSetup))
return ec, job
def executePayload(self, thisExperiment, job):
t0 = os.times()
res_tuple = None
# loop over all run commands (only >1 for multi-trfs)
getstatusoutput_was_interrupted = False
job_status = None
tolog("About to launch ARGO job")
# Poll MQ for Job Status
try:
# Initiate MQ interface and send job
self.argo_job.job_status_routing_key = '%s_job_status' % job.jobId #'status_' + jobID
si = SiteInformation()
mi = MessageInterface()
mi.host = 'atlasgridftp02.hep.anl.gov'
mi.port = 5671
mi.ssl_cert = si.getSSLCertificate() #'/grid/atlas/hpc/pilot_certs/xrootdsrv-cert.pem'
proxy_cert_path = si.getSSLCertificate()
mi.ssl_cert = os.path.dirname(proxy_cert_path) + "/rabbitmq-cert.pem"
if 'X509_USER_CERT' in os.environ.keys():
mi.ssl_cert = os.environ['X509_USER_CERT'] #'/users/hpcusers/balsam_dev/gridsecurity/jchilders/xrootdsrv-cert.pem'
mi.ssl_key = mi.ssl_cert #'/grid/atlas/hpc/pilot_certs/xrootdsrv-key.pem'
mi.ssl_key = os.path.dirname(proxy_cert_path) + "/rabbitmq-key.pem"
if 'X509_USER_KEY' in os.environ.keys():
mi.ssl_key = os.environ['X509_USER_KEY'] #'/users/hpcusers/balsam_dev/gridsecurity/jchilders/xrootdsrv-key.pem'
#mi.ssl_ca_certs = os.path.dirname(proxy_cert_path) + "/rabbitmq-cacerts.pem"
mi.ssl_ca_certs = '/grid/atlas/hpc/pilot_certs/cacerts.pem'
#if 'X509_CA_CERTS' in os.environ.keys():
# mi.ssl_ca_certs = os.environ['X509_CA_CERTS'] #'/users/hpcusers/balsam_dev/gridsecurity/jchilders/cacerts.pem'
#tolog("CA certs: %s" % (mi.ssl_ca_certs))
ca_certs = os.path.dirname(proxy_cert_path) + "/rabbitmq-cacerts.pem"
if os.path.isfile(ca_certs):
mi.ssl_ca_certs = ca_certs
mi.exchange_name = 'argo_users'
#Create queue to get messages about ARGO Job status from MQ
tolog('Opening connection with MQ')
mi.open_blocking_connection()
tolog('Create queue [%s] to retrieve messages with job status' % self.argo_job.job_status_routing_key)
mi.create_queue(self.argo_job.job_status_routing_key, self.argo_job.job_status_routing_key)
# submit ARGO job to MQ
#tolog('Opening connection with MQ')
#mi.open_blocking_connection()
routing_key = 'argo_job'
if self.dev:
routing_key = 'argo_job_dev'
tolog('Sending msg with job to ARGO')
mi.send_msg(self.argo_job.serialize(), routing_key)
tolog(' done sending ')
# Waiting till job done or failed
ARGO_err_msg = ''
while True:
time.sleep(5)
message = mi.receive_msg(self.argo_job.job_status_routing_key, True)
if message[2]:
tolog ("Got message from queue [%s]: method [%s], properties [%s], body [ %s ]" % (self.argo_job.job_status_routing_key, message[0], message[1], message[2]))
job_status = ArgoJobStatus.get_from_message(message[2])
job.hpcStatus = job_status.state
rt = RunJobUtilities.updatePilotServer(job, self.getPilotServer(), self.getPilotPort())
tolog("Extracted state: %s" % job_status.state)
if job_status.state == job_status.HISTORY:
res_tuple = (0, "Done")
break
elif job_status.is_failed():
res_tuple = (1, "Failed")
ARGO_err_msg = ARGO_err_msg + ' ' + job_status.message
elif job_status.state == job_status.FAILED:
res_tuple = (1, "Failed")
ARGO_err_msg = ARGO_err_msg + ' ' + job_status.message
runJob.failJob(1, 0, job, ins=job.inFiles, pilotErrorDiag=ARGO_err_msg)
break
time.sleep(5)
mi.close()
tolog(' closing connection to MQ')
tolog("Job State: %s" % (job_status.state))
#job.timeExe = int(fork_job.finished - fork_job.started)
####################################################
except Exception, e:
tolog("!!FAILED!!3000!! Failed to run command %s" % str(e))
getstatusoutput_was_interrupted = True
res_tuple = (1, "Failed")
self.failJob(0, self.__error.ERR_GENERALERROR, job, pilotErrorDiag=str(e))
else:
if res_tuple[0] == 0:
tolog("ARGO Job finished")
else:
tolog("ARGO Job failed: res = %s" % (str(res_tuple)))
t1 = os.times()
job.timeExe = int(round(t1[4] - t0[4]))
tolog("Original exit code: %s" % (res_tuple[0]))
if res_tuple[0] != None:
tolog("Exit code: %s (returned from OS)" % (res_tuple[0]%255))
res0 = res_tuple[0]%255
if job_status:
exitMsg = job_status.message
else:
exitMsg = res_tuple[1]
else:
tolog("Exit code: None (returned from OS, Job was canceled or interrupted)")
res0 = None
exitMsg = "Job was canceled by internal call"
# check the job report for any exit code that should replace the res_tuple[0]
res = (res0, res_tuple[1], exitMsg)
# dump an extract of the payload output
tolog("NOTE: For %s output, see files %s, %s" % (job.payload, job.stdout, job.stderr))
# JEM job-end callback
try:
from JEMstub import notifyJobEnd2JEM
notifyJobEnd2JEM(job, tolog)
except:
pass # don't care (fire and forget)
return res, job, getstatusoutput_was_interrupted
if __name__ == "__main__":
tolog("Starting RunJobArgo")
# Get error handler
error = PilotErrors()
# Get runJob object
runJob = RunJobArgo()
# Define a new parent group
os.setpgrp()
# Protect the runJob code with exception handling
hP_ret = False
try:
# always use this filename as the new jobDef module name
import newJobDef
jobSite = Site.Site()
return_tuple = runJob.argumentParser()
tolog("argumentParser returned: %s" % str(return_tuple))
jobSite.setSiteInfo(return_tuple)
# jobSite.setSiteInfo(argParser(sys.argv[1:]))
# reassign workdir for this job
jobSite.workdir = jobSite.wntmpdir
if runJob.getPilotLogFilename() != "":
pUtil.setPilotlogFilename(runJob.getPilotLogFilename())
# set node info
node = Node.Node()
node.setNodeName(os.uname()[1])
node.collectWNInfo(jobSite.workdir)
# redirect stder
sys.stderr = open("%s/runjob.stderr" % (jobSite.workdir), "w")
tolog("Current job workdir is: %s" % os.getcwd())
tolog("Site workdir is: %s" % jobSite.workdir)
# get the experiment object
thisExperiment = getExperiment(runJob.getExperiment())
tolog("RunJob will serve experiment: %s" % (thisExperiment.getExperiment()))
# set the cache (used e.g. by LSST)
#if runJob.getCache():
# thisExperiment.setCache(runJob.getCache())
#JR = JobRecovery()
try:
job = Job.Job()
job.setJobDef(newJobDef.job)
job.workdir = jobSite.workdir
job.experiment = runJob.getExperiment()
# figure out and set payload file names
job.setPayloadName(thisExperiment.getPayloadName(job))
except Exception, e:
pilotErrorDiag = "Failed to process job info: %s" % str(e)
tolog("!!WARNING!!3000!! %s" % (pilotErrorDiag))
runJob.failJob(0, error.ERR_UNKNOWN, job, pilotErrorDiag=pilotErrorDiag)
# prepare for the output file data directory
# (will only created for jobs that end up in a 'holding' state)
job.datadir = runJob.getParentWorkDir() + "/PandaJob_%s_data" % (job.jobId)
# register cleanup function
atexit.register(runJob.cleanup, job)
# to trigger an exception so that the SIGTERM signal can trigger cleanup function to run
# because by default signal terminates process without cleanup.
def sig2exc(sig, frm):
""" signal handler """
error = PilotErrors()
runJob.setGlobalPilotErrorDiag("!!FAILED!!3000!! SIGTERM Signal %s is caught in child pid=%d!\n" % (sig, os.getpid()))
tolog(runJob.getGlobalPilotErrorDiag())
if sig == signal.SIGTERM:
runJob.setGlobalErrorCode(error.ERR_SIGTERM)
elif sig == signal.SIGQUIT:
runJob.setGlobalErrorCode(error.ERR_SIGQUIT)
elif sig == signal.SIGSEGV:
runJob.setGlobalErrorCode(error.ERR_SIGSEGV)
elif sig == signal.SIGXCPU:
runJob.setGlobalErrorCode(error.ERR_SIGXCPU)
elif sig == signal.SIGBUS:
runJob.setGlobalErrorCode(error.ERR_SIGBUS)
elif sig == signal.SIGUSR1:
runJob.setGlobalErrorCode(error.ERR_SIGUSR1)
else:
runJob.setGlobalErrorCode(error.ERR_KILLSIGNAL)
runJob.setFailureCode(runJob.getGlobalErrorCode)
# print to stderr
print >> sys.stderr, runJob.getGlobalPilotErrorDiag()
raise SystemError(sig)
signal.signal(signal.SIGTERM, sig2exc)
signal.signal(signal.SIGQUIT, sig2exc)
signal.signal(signal.SIGSEGV, sig2exc)
signal.signal(signal.SIGXCPU, sig2exc)
signal.signal(signal.SIGBUS, sig2exc)
# see if it's an analysis job or not
analysisJob = isAnalysisJob(job.trf.split(",")[0])
if analysisJob:
tolog("User analysis job")
else:
tolog("Production job")
tolog("runJobArgo received a job with prodSourceLabel=%s" % (job.prodSourceLabel))
# setup starts here ................................................................................
# update the job state file
job.jobState = "setup"
#_retjs = JR.updateJobStateTest(job, jobSite, node, mode="test")
# send [especially] the process group back to the pilot
job.setState([job.jobState, 0, 0])
rt = RunJobUtilities.updatePilotServer(job, runJob.getPilotServer(), runJob.getPilotPort())
# prepare the setup and get the run command list
ec, job = runJob.setup(job, jobSite, thisExperiment)
if ec != 0:
tolog("!!WARNING!!2999!! runJob setup failed: %s" % (job.pilotErrorDiag))
runJob.failJob(0, ec, job, pilotErrorDiag=job.pilotErrorDiag)
tolog("Setup has finished successfully")
# job has been updated, display it again
job.displayJob()
# (setup ends here) ................................................................................
tolog("Setting stage-in state until all input files have been copied")
job.setState(["stagein", 0, 0])
# send the special setup string back to the pilot (needed for the log transfer on xrdcp systems)
rt = RunJobUtilities.updatePilotServer(job, runJob.getPilotServer(), runJob.getPilotPort())
# stage-in .........................................................................................
# update the job state file
job.jobState = "stagein"
#_retjs = JR.updateJobStateTest(job, jobSite, node, mode="test")
# update copysetup[in] for production jobs if brokerage has decided that remote I/O should be used
if job.transferType == 'direct':
tolog('Brokerage has set transfer type to \"%s\" (remote I/O will be attempted for input files, any special access mode will be ignored)' %\
(job.transferType))
RunJobUtilities.updateCopysetups('', transferType=job.transferType)
# stage-in all input files (if necessary)
job, ins, statusPFCTurl, usedFAXandDirectIO = runJob.stageIn(job, jobSite, analysisJob)
if job.result[2] != 0:
tolog("Failing job with ec: %d" % (ec))
runJob.failJob(0, job.result[2], job, ins=ins, pilotErrorDiag=job.pilotErrorDiag)
# after stageIn, all file transfer modes are known (copy_to_scratch, file_stager, remote_io)
# consult the FileState file dictionary if cmd3 should be updated (--directIn should not be set if all
# remote_io modes have been changed to copy_to_scratch as can happen with ByteStream files)
# and update the run command list if necessary.
# in addition to the above, if FAX is used as a primary site mover and direct access is enabled, then
# the run command should not contain the --oldPrefix, --newPrefix options but use --usePFCTurl
#if job.inFiles != ['']:
# runCommandList = RunJobUtilities.updateRunCommandList(runCommandList, runJob.getParentWorkDir(), job.jobId, statusPFCTurl, analysisJob, usedFAXandDirectIO)
# (stage-in ends here) .............................................................................
# change to running state since all input files have been staged
tolog("Changing to running state since all input files have been staged")
job.setState(["running", 0, 0])
rt = RunJobUtilities.updatePilotServer(job, runJob.getPilotServer(), runJob.getPilotPort())
# update the job state file
job.jobState = "running"
#_retjs = JR.updateJobStateTest(job, jobSite, node, mode="test")
# run the job(s) ...................................................................................
# Set ATLAS_CONDDB if necessary, and other env vars
RunJobUtilities.setEnvVars(jobSite.sitename)
# execute the payload
res, job, getstatusoutput_was_interrupted = runJob.executePayload(thisExperiment, job)
tolog("Check ARGO output: %s" % runJob.job_path)
# if payload leaves the input files, delete them explicitly
if ins:
ec = pUtil.removeFiles(job.workdir, ins)
# payload error handling
ed = ErrorDiagnosis()
if res[0] == None:
job.jobState = "cancelled"
job.setState(["cancelled", 0, 0])
rt = RunJobUtilities.updatePilotServer(job, runJob.getPilotServer(), runJob.getPilotPort())
#else:
# job = ed.interpretPayload(job, res, getstatusoutput_was_interrupted, current_job_number, runCommandList, runJob.getFailureCode())
if job.result[1] != 0 or job.result[2] != 0:
runJob.failJob(job.result[1], job.result[2], job, pilotErrorDiag=job.pilotErrorDiag)
# stage-out ........................................................................................
# update the job state file
tolog(runJob.getOutputDir())
job.jobState = "stageout"
#_retjs = JR.updateJobStateTest(job, jobSite, node, mode="test")
# verify and prepare and the output files for transfer
ec, pilotErrorDiag, outs, outsDict = RunJobUtilities.prepareOutFiles(job.outFiles, job.logFile, runJob.job_path)
if ec:
# missing output file (only error code from prepareOutFiles)
runJob.failJob(job.result[1], ec, job, pilotErrorDiag=pilotErrorDiag)
tolog("outsDict: %s" % str(outsDict))
# update the current file states
updateFileStates(outs, runJob.getParentWorkDir(), job.jobId, mode="file_state", state="created")
dumpFileStates(runJob.getParentWorkDir(), job.jobId)
# create xml string to pass to dispatcher for atlas jobs
outputFileInfo = {}
if outs or (job.logFile and job.logFile != ''):
# get the datasets for the output files
dsname, datasetDict = runJob.getDatasets(job)
# re-create the metadata.xml file, putting guids of ALL output files into it.
# output files that miss guids from the job itself will get guids in PFCxml function
# first rename and copy the trf metadata file for non-build jobs
if not pUtil.isBuildJob(outs):
runJob.moveTrfMetadata(job.workdir, job.jobId)
# create the metadata for the output + log files
ec, job, outputFileInfo = runJob.createFileMetadata(list(outs), job, outsDict, dsname, datasetDict, jobSite.sitename, analysisJob=analysisJob)
if ec:
runJob.failJob(0, ec, job, pilotErrorDiag=job.pilotErrorDiag)
# move output files from workdir to local DDM area
finalUpdateDone = False
if outs:
tolog("Setting stage-out state until all output files have been copied")
job.setState(["stageout", 0, 0])
rt = RunJobUtilities.updatePilotServer(job, runJob.getPilotServer(), runJob.getPilotPort())
# stage-out output files
ec, job, rf, latereg = runJob.stageOut(job, jobSite, outs, analysisJob, dsname, datasetDict, outputFileInfo)
# error handling
if job.result[0] == "finished" or ec == error.ERR_PUTFUNCNOCALL:
rt = RunJobUtilities.updatePilotServer(job, runJob.getPilotServer(), runJob.getPilotPort(), final=True)
else:
rt = RunJobUtilities.updatePilotServer(job, runJob.getPilotServer(), runJob.getPilotPort(), final=True, latereg=latereg)
if ec == error.ERR_NOSTORAGE:
# update the current file states for all files since nothing could be transferred
updateFileStates(outs, runJob.getParentWorkDir(), job.jobId, mode="file_state", state="not_transferred")
dumpFileStates(runJob.getParentWorkDir(), job.jobId)
finalUpdateDone = True
if ec != 0:
runJob.sysExit(job, rf)
# (stage-out ends here) .......................................................................
job.setState(["finished", 0, 0])
if not finalUpdateDone:
rt = RunJobUtilities.updatePilotServer(job, runJob.getPilotServer(), runJob.getPilotPort(), final=True)
runJob.sysExit(job)
except Exception, errorMsg:
error = PilotErrors()
if runJob.getGlobalPilotErrorDiag() != "":
pilotErrorDiag = "Exception caught in runJobArgo: %s" % (runJob.getGlobalPilotErrorDiag())
else:
pilotErrorDiag = "Exception caught in runJobArgo: %s" % str(errorMsg)
if 'format_exc' in traceback.__all__:
pilotErrorDiag += ", " + traceback.format_exc()
try:
tolog("!!FAILED!!3001!! %s" % (pilotErrorDiag))
except Exception, e:
if len(pilotErrorDiag) > 10000:
pilotErrorDiag = pilotErrorDiag[:10000]
tolog("!!FAILED!!3001!! Truncated (%s): %s" % (e, pilotErrorDiag))
else:
pilotErrorDiag = "Exception caught in runJob: %s" % (e)
tolog("!!FAILED!!3001!! %s" % (pilotErrorDiag))
# # restore the proxy if necessary
# if hP_ret:
# rP_ret = proxyguard.restoreProxy()
# if not rP_ret:
# tolog("Warning: Problems with storage can occur since proxy could not be restored")
# else:
# hP_ret = False
# tolog("ProxyGuard has finished successfully")
tolog("sys.path=%s" % str(sys.path))
cmd = "pwd;ls -lF %s;ls -lF;ls -lF .." % (runJob.getPilotInitDir())
tolog("Executing command: %s" % (cmd))
out = commands.getoutput(cmd)
tolog("%s" % (out))
job = Job.Job()
job.setJobDef(newJobDef.job)
job.pilotErrorDiag = pilotErrorDiag
job.result[0] = "failed"
if runJob.getGlobalErrorCode() != 0:
job.result[2] = runJob.getGlobalErrorCode()
else:
job.result[2] = error.ERR_RUNJOBEXC
tolog("Failing job with error code: %d" % (job.result[2]))
# fail the job without calling sysExit/cleanup (will be called anyway)
runJob.failJob(0, job.result[2], job, pilotErrorDiag=pilotErrorDiag, docleanup=False)
| 2.03125 | 2 |
LoadHandler.py | Oriolus/GithubLoadScheduler | 0 | 12772958 | <reponame>Oriolus/GithubLoadScheduler
from EntityLoader import EntityLoader, LoadResult
from SimplePageableBehaviour import SimplePageableBehaviour
from ObjectQueue import QueueRepository, ObjectHistoryRepository, ObjectQueue, QueueEntry, QueueState, MAX_RETRY_COUNT
from uuid import uuid4
from json import dumps
from copy import deepcopy
from threading import local
from datetime import datetime
from tzlocal import get_localzone
from config import Config
class LoadHandler(object):
def __init__(self, logger, config: Config = None):
self.__object_queue = ObjectQueue(config)
self.__queue_repository = QueueRepository() # type: QueueRepository
self.__obj_history_repository = ObjectHistoryRepository() # type: ObjectHistoryRepository
self.__logger = logger
self.__config = config # type: Config
self.__thread_local_store = local()
def _handle_ok(self, queue_object: QueueEntry, load_result: LoadResult):
cur_uuid = self.__thread_local_store.cur_uuid
self.__logger.debug('LoadHandler._handle_ok: start. uuid: {}'.format(cur_uuid))
queue_object.updated_at = datetime.now(get_localzone())
queue_object.closed_at = datetime.now(get_localzone())
queue_object.state = QueueState.PROCESSED.value
self.__object_queue.enqueue_ok(queue_object)
if load_result.next_load_context:
_new_entry = deepcopy(queue_object)
_headers = deepcopy(load_result.next_load_context.headers)
del _headers['Authorization']
_new_entry.headers = dumps(_headers)
_new_entry.params = dumps(load_result.next_load_context.params)
_new_entry.url = load_result.next_load_context.url
self.__queue_repository.add_entry(_new_entry)
self.__logger.debug('LoadHandler._handle_ok: added next page. uuid: {}'.format(cur_uuid))
self.__logger.debug('LoadHandler._handle_ok: enqueue done. uuid: {}'.format(cur_uuid))
def _handle_error(self, queue_object: QueueEntry, load_result: LoadResult, error_text: str):
cur_uuid = self.__thread_local_store.cur_uuid
# TODO: use transaction
self.__logger.debug('LoadHandler._handle_error: start. uuid: {}'.format(cur_uuid))
queue_object.state = QueueState.UNPROCESSED.value
queue_object.updated_at = datetime.now(get_localzone())
queue_object.error = error_text
queue_object.retry_count += 1
if queue_object.retry_count >= MAX_RETRY_COUNT:
queue_object.closed_at = datetime.now(get_localzone())
self.__object_queue.enqueue_with_error(queue_object)
self.__logger.debug('LoadHandler._handle_error: enqueued with error. uuid: {}'.format(cur_uuid))
else:
self.__object_queue.move_to_end_with_error(queue_object)
self.__logger.debug('LoadHandler._handle_error: moved to end with error. uuid: {}'.format(cur_uuid))
if load_result and load_result.resp_status in (403, 429):
self.__queue_repository.shift_by_token(queue_object.token_id)
self.__logger.debug('LoadHandler._handle_error: token_id: {}, object shifted. uuid: {}'.format(
queue_object.token_id, cur_uuid
))
def handle(self, object_queue_id: int):
self.__thread_local_store.cur_uuid = uuid4()
_cur_uuid = self.__thread_local_store.cur_uuid
self.__logger.debug('LoadHandler.handle: start. uuid: {}'.format(_cur_uuid))
current_obj = self.__queue_repository.by_id(object_queue_id)
if current_obj:
try:
self.__logger.info('type: {}, token_id: {}, url: {}. uuid: {}'.format(
current_obj.entry_type
, current_obj.token_id
, current_obj.url,
_cur_uuid)
)
load_result = EntityLoader(SimplePageableBehaviour(
current_obj.token,
self.__config.gh_per_page if self.__config else 100,
self.__logger,
current_obj.entry_type,
current_obj.url,
current_obj.headers,
current_obj.params,
current_obj.token_id,
str(_cur_uuid)
)).load()
self.__logger.debug('LoadHandler.handle: loaded. uuid: {}'.format(_cur_uuid))
if load_result:
if load_result.resp_status < 400:
self._handle_ok(current_obj, load_result)
elif load_result.resp_status >= 400:
self._handle_error(current_obj, load_result, load_result.resp_text_data)
except Exception as ex:
self._handle_error(current_obj, None, str(ex))
self.__logger.error('type: {}, url: {}, error: {}. uuid: {}'\
.format(current_obj.entry_type, current_obj.url, str(ex), _cur_uuid)
)
else:
self.__logger.warn('there is no object in object_queue with object_id: {}'.format(object_queue_id))
self.__logger.debug('LoadHandler.handle: end. uuid: {}'.format(_cur_uuid))
| 2.171875 | 2 |
.archive/rinex_reader.py | jordan-mazur/georinex | 66 | 12772959 | <gh_stars>10-100
"""
Read navigation and observation rinex files
"""
from . import Path
import logging
import numpy as np
from datetime import datetime
import xarray
from io import BytesIO
from time import time
import re
# %% Navigation file
def rinexnav(fn, ofn=None):
"""
Reads RINEX 2.11 NAV files
<NAME>, Ph.D.
It may actually be faster to read the entire file via f.read() and then .split()
and asarray().reshape() to the final result, but I did it frame by frame.
http://gage14.upc.es/gLAB/HTML/GPS_Navigation_Rinex_v2.11.html
"""
fn = Path(fn).expanduser()
startcol = 3 # column where numerical data starts
N = 7 # number of lines per record
sv = []
epoch = []
raws = ""
with fn.open("r") as f:
"""
skip header, which has non-constant number of rows
"""
while True:
if "END OF HEADER" in f.readline():
break
"""
now read data
"""
for l in f:
# format I2 http://gage.upc.edu/sites/default/files/gLAB/HTML/GPS_Navigation_Rinex_v2.11.html
sv.append(int(l[:2]))
# format I2
year = int(l[3:5])
if 80 <= year <= 99:
year += 1900
elif year < 80: # good till year 2180
year += 2000
epoch.append(
datetime(
year=year,
month=int(l[6:8]),
day=int(l[9:11]),
hour=int(l[12:14]),
minute=int(l[15:17]),
second=int(l[17:20]), # python reads second and fraction in parts
microsecond=int(l[21]) * 100000,
)
)
"""
now get the data as one big long string per SV
"""
raw = l[22:80]
for _ in range(N):
raw += f.readline()[startcol:80]
# one line per SV
# raws += raw + '\n'
raws += raw + " "
raws = raws.replace("D", "E")
raws = re.sub(r"(\d-)", r" -", raws)
raws = re.sub(r"\n", r" ", raws)
lista = [float(i) for i in raws.split(" ") if len(i) != 0]
sat_info = np.array(lista)
sat_info = sat_info.reshape(len(lista) / 29, 29)
nav = xarray.DataArray(
data=np.concatenate((np.atleast_2d(sv).T, sat_info), axis=1),
coords={
"t": epoch,
"data": [
"sv",
"SVclockBias",
"SVclockDrift",
"SVclockDriftRate",
"IODE",
"Crs",
"DeltaN",
"M0",
"Cuc",
"Eccentricity",
"Cus",
"sqrtA",
"TimeEph",
"Cic",
"OMEGA",
"CIS",
"Io",
"Crc",
"omega",
"OMEGA DOT",
"IDOT",
"CodesL2",
"GPSWeek",
"L2Pflag",
"SVacc",
"SVhealth",
"TGD",
"IODC",
"TransTime",
"FitIntvl",
],
},
dims=["t", "data"],
)
if ofn:
ofn = Path(ofn).expanduser()
print("saving NAV data to", ofn)
if ofn.is_file():
wmode = "a"
else:
wmode = "w"
nav.to_netcdf(ofn, group="NAV", mode=wmode)
return nav
# %% Observation File
def rinexobs(fn, ofn=None):
"""
Program overviw:
1) scan the whole file for the header and other information using scan(lines)
2) each epoch is read and the information is put in a 4-D xarray.DataArray
3) rinexobs can also be sped up with if an h5 file is provided,
also rinexobs can save the rinex file as an h5. The header will
be returned only if specified.
rinexobs() returns the data in a 4-D xarray.DataArray, [Parameter,Sat #,time,data/loss of lock/signal strength]
"""
# open file, get header info, possibly speed up reading data with a premade h5 file
fn = Path(fn).expanduser()
with fn.open("r") as f:
tic = time()
lines = f.read().splitlines(True)
header, version, headlines, headlength, obstimes, sats, svset = scan(lines)
print(fn, "is a RINEX", version, "file.", fn.stat().st_size // 1000, "kB.")
if fn.suffix == ".nc":
data = xarray.open_dataarray(str(fn), group="OBS")
elif fn.suffix == ".h5":
logging.warning("HDF5 is deprecated in this program, please use NetCDF format")
import pandas
data = pandas.read_hdf(fn, key="OBS")
else:
data = processBlocks(lines, header, obstimes, svset, headlines, headlength, sats)
print("finished in {:.2f} seconds".format(time() - tic))
# write an h5 file if specified
if ofn:
ofn = Path(ofn).expanduser()
print("saving OBS data to", ofn)
if ofn.is_file():
wmode = "a"
else:
wmode = "w"
data.to_netcdf(ofn, group="OBS", mode=wmode)
return data, header
# this will scan the document for the header info and for the line on
# which each block starts
def scan(L):
header = {}
# Capture header info
for i, l in enumerate(L):
if "END OF HEADER" in l:
i += 1 # skip to data
break
if l[60:80].strip() not in header: # Header label
header[l[60:80].strip()] = l[:60] # don't strip for fixed-width parsers
# string with info
else:
header[l[60:80].strip()] += " " + l[:60]
# concatenate to the existing string
verRinex = float(header["RINEX VERSION / TYPE"][:9]) # %9.2f
# list with x,y,z cartesian
header["APPROX POSITION XYZ"] = [float(i) for i in header["APPROX POSITION XYZ"].split()]
# observation types
header["# / TYPES OF OBSERV"] = header["# / TYPES OF OBSERV"].split()
# turn into int number of observations
header["# / TYPES OF OBSERV"][0] = int(header["# / TYPES OF OBSERV"][0])
header["INTERVAL"] = float(header["INTERVAL"][:10])
headlines = []
headlength = []
obstimes = []
sats = []
svset = set()
# %%
while i < len(L):
if len(L[i].split()) > header["# / TYPES OF OBSERV"][0]: # then its headerline
if int(L[i][28]) in (0, 1, 5, 6): # CHECK EPOCH FLAG STATUS
headlines.append(i)
year, month, day, hour = L[i][1:3], L[i][4:6], L[i][7:9], L[i][10:12]
minute, second = L[i][13:15], L[i][16:26]
obstimes.append(_obstime([year, month, day, hour, minute, second]))
# ONLY GPS SATELLITES
numsvs = int(L[i][29:32]) # Number of visible satellites %i3
headlength.append(
1 + (numsvs - 1) // 12
) # number of lines in header, depends on how many svs on view
if numsvs > 12:
sv = []
for s in range(numsvs):
if s > 0 and s % 12 == 0:
i += 1 # every 12th sat will add new headline row ex >12 2 rows
if L[i][33 + (s % 12) * 3 - 1] == "G":
sv.append(int(L[i][33 + (s % 12) * 3 : 35 + (s % 12) * 3]))
sats.append(sv)
i += numsvs + 1
else:
sats.append(
[
int(L[i][33 + s * 3 : 35 + s * 3])
for s in range(numsvs)
if L[i][33 + s * 3 - 1] == "G"
]
) # lista de satelites (numeros prn)
i += numsvs + 1
else: # there was a comment or some header info
flag = int(L[i][28])
if flag != 4:
print(flag)
skip = int(L[i][30:32])
i += skip + 1
# %% get every SV that appears at any time in the file, for master index
for sv in sats:
svset = svset.union(set(sv))
return header, verRinex, headlines, headlength, obstimes, sats, svset
def processBlocks(lines, header, obstimes, svset, ihead, headlength, sats):
# lines,header,obstimes,svset,ihead, headlength,sats
obstypes = header["# / TYPES OF OBSERV"][1:]
blocks = np.nan * np.ones((len(obstypes), max(svset) + 1, len(obstimes), 3)) # por que max
for i in range(len(ihead)):
linesinblock = len(sats[i]) * int(
np.ceil(header["# / TYPES OF OBSERV"][0] / 5.0)
) # nsats x observations
# / 5 there is space for 5 observables per line
block = "".join(lines[ihead[i] + headlength[i] : ihead[i] + linesinblock + headlength[i]])
bdf = _block2df(block, obstypes, sats[i], len(sats[i]))
blocks[:, sats[i], i, :] = bdf
blocks = xarray.DataArray(
data=blocks,
coords={
"obs": obstypes,
"sv": np.arange(max(svset) + 1),
"t": obstimes,
"type": ["data", "lli", "ssi"],
},
dims=["obs", "sv", "t", "type"],
)
blocks = blocks[:, list(svset), :, :] # remove unused SV numbers
return blocks
def _obstime(fol):
year = int(fol[0])
if 80 <= year <= 99:
year += 1900
elif year < 80: # because we might pass in four-digit year
year += 2000
return datetime(
year=year,
month=int(fol[1]),
day=int(fol[2]),
hour=int(fol[3]),
minute=int(fol[4]),
second=int(float(fol[5])),
microsecond=int(float(fol[5]) % 1 * 100000),
)
def _block2df(block, obstypes, svnames, svnum):
"""
input: block of text corresponding to one time increment INTERVAL of RINEX file
output: 2-D array of float64 data from block.
"""
assert isinstance(svnum, int)
N = len(obstypes)
S = 3 # stride
sio = BytesIO(block.encode("ascii"))
barr = np.genfromtxt(sio, delimiter=(svnum, 1, 1) * 5).reshape((svnum, -1), order="C")
# iLLI = [obstypes.index(l) for l in ('L1','L2')]
data = barr[:, 0 : N * S : S].T
lli = barr[:, 1 : N * S : S].T # [:,iLLI]
ssi = barr[:, 2 : N * S : S].T
data = np.stack((data, lli, ssi), 2) # Nobs x Nsat x 3
return data
| 2.953125 | 3 |
app1/migrations/0006_flight_squack.py | awzdevelopers/FlightStrips_django | 0 | 12772960 | # Generated by Django 2.0.3 on 2020-02-18 03:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app1', '0005_flight_printed'),
]
operations = [
migrations.AddField(
model_name='flight',
name='squack',
field=models.CharField(max_length=20, null=True),
),
]
| 1.695313 | 2 |
staff_management_models/staff_group_payments/class_serializers/staff_worker_payment_group_serializer.py | reimibeta/django-staff-management-models | 0 | 12772961 | from rest_flex_fields import FlexFieldsModelSerializer
from rest_framework import serializers
# from rest_framework_utils.key_related_field import key_related_field
from staff_management_models.staff_group_payments.class_models.staff_worker_payment import StaffWorkerPayment, \
StaffWorkerPaymentGroup
from staff_management_models.staff_group_payments.class_serializers.staff_worker_payment_serializer import \
StaffWorkerPaymentSerializer
class StaffWorkerPaymentGroupSerializer(FlexFieldsModelSerializer):
# staff_phone = serializers.PrimaryKeyRelatedField(read_only=True, many=True)
staff_worker_payment = serializers.PrimaryKeyRelatedField(read_only=True, many=True)
class Meta:
model = StaffWorkerPaymentGroup
fields = [
'id',
'url',
'pay_date',
'staff_worker_payment'
]
expandable_fields = {
# 'staff_phone': (StaffPhoneSerializer, {'many': True}),
'staff_worker_payment': (StaffWorkerPaymentSerializer, {'many': True})
}
| 1.8125 | 2 |
python-algorithm/leetcode/problem_1337.py | isudox/nerd-algorithm | 5 | 12772962 | """1337. The K Weakest Rows in a Matrix
https://leetcode.com/problems/the-k-weakest-rows-in-a-matrix/
"""
import collections
from typing import List
class Solution:
def kWeakestRows(self, mat: List[List[int]], k: int) -> List[int]:
def count(nums: List[int]) -> int:
cnt = 0
for num in nums:
if num == 0:
break
cnt += 1
return cnt
values = [False] * (len(mat[0]) + 1)
store = collections.defaultdict(list)
for i in range(len(mat)):
cur = count(mat[i])
store[cur].append(i)
values[cur] = True
ans = []
for i, value in enumerate(values):
if value:
rows = store[i]
for row in rows:
if k > 0:
ans.append(row)
k -= 1
return ans
| 3.4375 | 3 |
proofreader/utils/print_table.py | elifesciences/proofreader-python | 1 | 12772963 | <gh_stars>1-10
from collections import defaultdict
class PrintTable(object):
COLUMN_MARK = '+'
COLUMN_SEP = '|'
DASH = '-'
PADDING = 1
def __init__(self, headers):
# type: (List[str]) -> None
self.headers = headers
self._rows = []
self._col_widths = []
def add_row(self, row):
# type: (List[str]) -> None
self._rows.append(row)
@property
def col_widths(self):
# type: () -> defaultdict
"""Get MAX possible width of each column in the table.
:return: defaultdict
"""
_widths = defaultdict(int)
all_rows = [self.headers]
all_rows.extend(self._rows)
for row in all_rows:
for idx, col in enumerate(row):
_col_l = len(col)
if _col_l > _widths[idx]:
_widths[idx] = _col_l
return _widths
def _marker_line(self):
# type: () -> str
"""Generate a correctly sized marker line.
e.g.
'+------------------+---------+----------+---------+'
:return: str
"""
output = ''
for col in sorted(self.col_widths):
line = self.COLUMN_MARK + (self.DASH * (self.col_widths[col] + self.PADDING * 2))
output += line
output += self.COLUMN_MARK + '\n'
return output
@property
def pad(self):
# type: () -> str
return ' ' * self.PADDING
def _row_to_str(self, row):
# type: (List[str]) -> str
"""Converts a list of strings to a correctly spaced and formatted
row string.
e.g.
['some', 'foo', 'bar'] --> '| some | foo | bar |'
:param row: list
:return: str
"""
_row_text = ''
for col, width in self.col_widths.items():
_row_text += self.COLUMN_SEP
l_pad, r_pad = self._split_int(width - len(row[col]))
_row_text += '{0}{1}{2}'.format(' ' * (l_pad + self.PADDING),
row[col],
' ' * (r_pad + self.PADDING))
_row_text += self.COLUMN_SEP + '\n'
return _row_text
@staticmethod
def _split_int(num):
# type: (int) -> Tuple(int, int)
split = num // 2
return split + num % 2, split
def _table_to_str(self):
# type: () -> str
"""Return single formatted table string.
:return: str
"""
_marker_line = self._marker_line()
output = _marker_line + self._row_to_str(self.headers) + _marker_line
for row in self._rows:
output += self._row_to_str(row)
output += _marker_line
return output
def __str__(self):
return self._table_to_str()
| 3.359375 | 3 |
align_to_wig_20180130.py | ethanagbaker/Rend_seq_core_scripts | 1 | 12772964 | <filename>align_to_wig_20180130.py
'''
Generates pile-up files (reads counts that map along the genome, with position in nucleotides) wig format. Deals with mismatches (from non-template addition from reverse transcription) and possible multiple alignments, with the explicit purpose of correctly taking into account close paralogs (e.g., EF-Tu).
Input:
1. An align file (standard bowtie output). Assumes -k at most 2 as a bowtie option.
2. Name of resulting wig files
3. Chromosome name (for wig header)
Output: wig files
The structure of the source file (bowtie default) contains 11 columns of information.
0. The read name, which is completely unique.
1. The RefSeq strand of alignment, which is either '+' for forward or '-' for reverse.
2. The RefSeq chromosome name.
3. The RefSeq position of the 5' end.
4. The sequencing read in nucleotides.
5. The sequencing quality score.
6. number of other instances the same sequence aligned against the same reference characters (generally not the number of other alignments).
7. Mismatches in the alignment.
'''
# import required packages
import sys, random, copy
def write_wig(inputFile, output_header,chrom_name):
### Set initial variables ###
forward_5, forward_3 = {}, {}
reverse_5, reverse_3 = {}, {}
### Run file ###
inFile = open(inputFile, 'r')
n_line = 0
for line in inFile:
n_line = n_line + 1
# storing previous lines to assess number of alignment
if n_line>2:
pprevious_length = previous_length
pprevious_fiveprime = previous_fiveprime
pprevious_strand = previous_strand
pprevious_n_other_alignments = previous_n_other_alignments
pprevious_read_name = previous_read_name
pprevious_mismatch_info = previous_mismatch_info
if n_line>1:
previous_length = length
previous_fiveprime = fiveprime
previous_strand = strand
previous_n_other_alignments = n_other_alignments
previous_read_name = read_name
previous_mismatch_info = mismatch_info
# parsing each line of the bowtie output.
fields = line.split('\t')
read_name = fields[0]
length = len(fields[4])
fiveprime = int(fields[3])
strand = str(fields[1])
n_other_alignments = fields[6]
mismatch_info = fields[7]
if n_line>2:
# if a single alignment, do as usual:
if (previous_read_name != pprevious_read_name) and (previous_read_name != read_name):
(forward_5,forward_3,reverse_5,reverse_3) = count_read(forward_5,forward_3,reverse_5,reverse_3,previous_mismatch_info,previous_fiveprime,previous_length,previous_strand)
# if two alignments, keep only if a single of the read as a non-template mismatch
elif previous_read_name == pprevious_read_name:
# both reads have no mismatches
if (previous_mismatch_info == '\n') and (pprevious_mismatch_info == '\n'):
pass
# one alignment has a mismatch, but not the other, map the one without mismatch
elif previous_mismatch_info == '\n':
(forward_5,forward_3,reverse_5,reverse_3) = count_read(forward_5,forward_3,reverse_5,reverse_3,previous_mismatch_info,previous_fiveprime,previous_length,previous_strand)
elif pprevious_mismatch_info == '\n':
(forward_5,forward_3,reverse_5,reverse_3) = count_read(forward_5,forward_3,reverse_5,reverse_3,pprevious_mismatch_info,pprevious_fiveprime,pprevious_length,pprevious_strand)
# non-template addition mismatch
elif (previous_mismatch_info[0]=='0') and not (pprevious_mismatch_info[0]=='0'):
(forward_5,forward_3,reverse_5,reverse_3) = count_read(forward_5,forward_3,reverse_5,reverse_3,previous_mismatch_info,previous_fiveprime,previous_length,previous_strand)
elif not (previous_mismatch_info[0]=='0') and (pprevious_mismatch_info[0]=='0'):
(forward_5,forward_3,reverse_5,reverse_3) = count_read(forward_5,forward_3,reverse_5,reverse_3,pprevious_mismatch_info,pprevious_fiveprime,pprevious_length,pprevious_strand)
### Output ###
writeOutput_wig(forward_5,output_header+'_5_f.wig',chrom_name)
writeOutput_wig(forward_3,output_header+'_3_f.wig',chrom_name)
writeOutput_wig(reverse_5,output_header+'_5_r.wig',chrom_name)
writeOutput_wig(reverse_3,output_header+'_3_r.wig',chrom_name)
def count_read(forward_5,forward_3,reverse_5,reverse_3,mismatch_info,fiveprime,length,strand):
# count reads (3' and 5' ends of mapped reads) towards the pile-up file.
# determining the nature of mismatch. Only keepin mismatches resulting (likely) from non-template addition of reverse transcription (mismatch at 5' end of cDNA) or bad read position (N).
if mismatch_info == '\n':
mismatch = 0
elif '0' == mismatch_info[0]:
mismatch = 1
elif '>N' in mismatch_info:
mismatch = 0
else:
mismatch = 2
# only keeping reads in desired range and with mismatch type (above).
if (14 < length < 45) and (mismatch < 2):
if strand == '+':
# dealing with non-template addition
if mismatch == 1:
fiveprime += 1
length = length - 1
forward_5[fiveprime+1] = forward_5.get(fiveprime+1,0) + 1
forward_3[fiveprime+length] = forward_3.get(fiveprime+length,0) + 1
elif strand == '-':
# dealing with non-template addition
if mismatch == 1:
length = length - 1
reverse_5[fiveprime+length]=reverse_5.get(fiveprime+length,0) + 1
reverse_3[fiveprime+1]=reverse_3.get(fiveprime+1,0) + 1
return (forward_5,forward_3,reverse_5,reverse_3)
def writeOutput_wig(dictionary,File_name,chrom_name):
# prints the wig file output header. To be changed for species/chromosome name of interest.
dictionary = dictionary.items()
dictionary.sort()
outFile = open(File_name, 'w')
outFile.write('track type=wiggle_0\n')
outFile.write('variableStep chrom='+chrom_name+'\n')
for y in dictionary:
position = str(y[0])
read_counts = str(y[1])
outFile.write(position + '\t' + read_counts + '\n')
outFile.close()
if __name__ == '__main__':
### input from command call
# name of .align file
alignFile = sys.argv[1]
# name of resulting .wig files
output_header = sys.argv[2]
# chromosome name for wig file header
chrom_name = sys.argv[3]
### generating the .wig files
write_wig(alignFile, output_header,chrom_name)
| 3.0625 | 3 |
Code/Web_app/mail_test.py | gokul-koganti/SRIJAS | 1 | 12772965 | <filename>Code/Web_app/mail_test.py
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from socket import gaierror
import sys
import smtplib
import json
f = open ('parameters.json', "r")
mail_params = json.loads(f.read())
password = mail_params['email_password']
smtp_server = "smtp.gmail.com"
login = "<EMAIL>"
sender = "<EMAIL>"
receiver = "<EMAIL>"
message = sys.argv[1]
port = 587
msg = MIMEMultipart()
msg['From'] = sender
msg['To'] = receiver
msg['Subject'] = 'Post deployment test Results'
body = message
msg.attach(MIMEText(body, 'plain'))
text = msg.as_string()
try:
server = smtplib.SMTP(smtp_server, port)
server.connect(smtp_server,port)
server.ehlo()
server.starttls()
server.ehlo()
server.login(login, password)
server.sendmail(sender, receiver, text)
server.quit()
# tell the script to report if your message was sent or which errors need to be fixed
print('Sent')
except (gaierror, ConnectionRefusedError):
print('Failed to connect to the server. Bad connection settings?')
except smtplib.SMTPServerDisconnected as e:
print('Failed to connect to the server. Wrong user/password?')
print(str(e))
except smtplib.SMTPException as e:
print('SMTP error occurred: ' + str(e))
| 2.890625 | 3 |
blog/migrations/0001_initial.py | marcanuy/django-categories-example-app | 1 | 12772966 | # Generated by Django 2.2.13 on 2020-08-30 23:18
from django.db import migrations, models
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
('slug', models.SlugField(blank=True, max_length=200, null=True)),
('description', models.TextField(blank=True, null=True)),
('lft', models.PositiveIntegerField(editable=False)),
('rght', models.PositiveIntegerField(editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(editable=False)),
('parent', mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='blog.Category')),
],
options={
'verbose_name_plural': 'Categories',
},
),
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('slug', models.SlugField(max_length=200)),
('category', mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='blog.Category')),
],
),
]
| 1.742188 | 2 |
woke/woke/e_ast_parsing/__init__.py | kevinkanak/woke-readme | 7 | 12772967 | <gh_stars>1-10
# TODO: Implement woke/ast_parsing
# assignees: michprev, manfm
| 1.171875 | 1 |
FLaREON/__init__.py | sidgurun/FLaREON | 0 | 12772968 | import os
import os.path
#import imp
import sys
import shutil
import urllib
import numpy as np
from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor, GradientBoostingRegressor
from sklearn.neighbors import KNeighborsRegressor
import pickle
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def Check_if_DATA_files_are_found():
this_dir, this_filename = os.path.split(__file__)
Bool_1 = True
arxiv_with_file_names = this_dir + '/DATA/List_of_DATA_files'
with open( arxiv_with_file_names ) as fd:
for line in fd.readlines():
arxiv_name = line.strip('\n')
Bool_1 = Bool_1 * os.path.isfile( this_dir + '/DATA/' + arxiv_name )
return Bool_1
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def Download_data():
this_dir, this_filename = os.path.split(__file__)
arxiv_with_file_names = this_dir + '/DATA/List_of_DATA_files'
file_where_to_store_data = this_dir + '/DATA/'
print( 'This package is stored in ', this_dir , '(Please, note that we are not spying you.)' )
http_url = 'http://www.cefca.es/people/~sidgurung/ShouT/ShouT/DATA/'
testfile = urllib.URLopener()
with open( arxiv_with_file_names ) as fd:
for line in fd.readlines():
arxiv_name = line.strip('\n')
print( 'Downloaing...' , http_url + arxiv_name )
testfile.retrieve( http_url + arxiv_name , arxiv_name )
print( '--> Done!' )
print( 'Moving Downloaded file to' , file_where_to_store_data )
shutil.move( arxiv_name , file_where_to_store_data + arxiv_name )
print( '--> Done' )
if Check_if_DATA_files_are_found():
print( '\nHey man, looks like everything is done! That is brilliant!' )
else:
print( 'This is weird... We just downloaded everthing but the files are not found...Exiting...')
print( 'Error. Human is dead. Mismatch.')
sys.exit()
return
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def load_machine_fesc( Machine , property_name , Geometry , INSIDE_BICONE=True ):
'''
This functions gives you the trained model that you want to use.
'''
Machine_Set = [ 'KN' , 'Grad' , 'Tree' , 'Forest' ]
Geometry_Set = [ 'Thin_Shell' , 'Galactic_Wind' , 'Bicone_X_Slab' ]
dirs_Set = [ 'Thin_Shell/' , 'Galactic_Wind/' , 'Bicone_X_Slab/' ]
geo_code = [ 'thin' , 'wind' , 'Bicone_X_Slab' ]
Property_Set = [ 'KKK' , 'CCC' , 'LLL' , 'f_esc' ]
assert property_name in Property_Set , "Houston we've got a problem, Error Code = 23452345.7523"
index = np.where( Geometry == np.array(Geometry_Set) )[0][0]
this_dir, this_filename = os.path.split(__file__)
filename_root = 'DATA/finalized_model_'+ geo_code[index] +'_f_esc_' + Machine + '_' + property_name
if Geometry == 'Bicone_X_Slab':
filename_root += '_Inside_Bicone_' + str(INSIDE_BICONE)
filename = filename_root + '.sav'
filename = os.path.join(this_dir, filename)
loaded_model = pickle.load(open(filename, 'rb'))
return loaded_model
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def Analytic_f_esc_Thin_Shell( V_Arr , logNH_Arr , ta_Arr ):
NH18 = 10 ** ( logNH_Arr - 18 )
# Old MCMC
#c11 = 10**(2.109185)
#c12 = -10**(2.745113)
#c13 = 10**(2.954875)
#c21 = 10**(-1.785661)
#c22 = -10**(-0.7302781)
#c23 = 10**(-0.1554347)
#c24 = -10**(0.1517145)
#c3 = 10**(-0.03688789)
#c4 = 10**(-1.556422)
#New MCMC
c11 = 10**(1.90526)
c12 = -10**(2.0399)
c13 = 10**(2.34829)
c21 = 10**(-3.138837)
c22 = -10**(-1.92151)
c23 = 10**(-1.1860205000000001)
c24 = -10**(-0.1480042)
c3 = 10**(0.0530715)
c4 = 10**(-2.743455)
C1 = ( ( np.log10(NH18) ) ** 2 ) * c11 + np.log10(NH18) * c12 + c13
y = np.log10(NH18)
C2 = c21*y*y*y + c22*y*y + c23*y + c24
C3 = c3
C4 = c4
K1 = C1 * ( V_Arr ** C2 )
K2 = C3 * ( V_Arr ** C4 )
fesc = 1. / np.cosh( np.sqrt( K1 * ( ta_Arr ** K2 ) ) )
return fesc
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def Analytic_f_esc_Wind( V_Arr , logNH_Arr , ta_Arr ):
NH18 = 10 ** ( logNH_Arr - 18 )
# New MCMC
c11 = 10**(0.4852541)
c12 = 10**(-0.2006394)
c21 = 10**(-1.912059)
c22 = -10**(-0.6380347)
c3 = 10**(0.046314074999999996)
c4 = 10**(-1.782037)
C1 = c11 * ( NH18 ** c12 )
C2 = c21 * np.log10( NH18 )**2 + c22 * np.log10(NH18) #+ c23
C3 = c3
C4 = c4
K1 = C1 * V_Arr ** C2
K2 = C3 * V_Arr ** C4
fesc = 1./ np.cosh( np.sqrt( K1 * ta_Arr ** K2 ) )
return fesc
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def RT_f_esc_Analytic( Geometry , V_Arr , logNH_Arr , ta_Arr , Inside_Bicone_Arr=None , Machine_Learning_Algorithm=None ):
Geometry_Set = [ 'Thin_Shell' , 'Galactic_Wind' ]
assert Geometry in Geometry_Set , 'The geometry ' + Geometry + ' is nor supported in MODE=Analytic , only Thin_Shell and Galactic_Wind'
logNH_Arr = np.atleast_1d( logNH_Arr )
ta_Arr = np.atleast_1d( ta_Arr )
V_Arr = np.atleast_1d( V_Arr )
if Geometry == 'Thin_Shell' :
f_esc_Arr = Analytic_f_esc_Thin_Shell( V_Arr , logNH_Arr , ta_Arr )
if Geometry == 'Galactic_Wind' :
f_esc_Arr = Analytic_f_esc_Wind( V_Arr , logNH_Arr , ta_Arr )
return f_esc_Arr
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def fesc_of_ta_Thin_and_Wind( ta , CCC , KKK ):
f_esc = 1./np.cosh( np.sqrt( CCC * (ta**KKK) ) )
return f_esc
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def fesc_of_ta_Bicone( ta , CCC , KKK , LLL ):
f_esc = LLL * 1./np.cosh( np.sqrt( CCC * (ta**KKK) ) )
return f_esc
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def RT_f_esc_Machine_Parameter( Geometry , V_Arr , logNH_Arr , ta_Arr , Inside_Bicone_Arr=None , Machine_Learning_Algorithm='Tree' ):
logNH_Arr = np.atleast_1d( logNH_Arr )
ta_Arr = np.atleast_1d( ta_Arr )
V_Arr = np.atleast_1d( V_Arr )
if Geometry in [ 'Thin_Shell' , 'Galactic_Wind' ] :
Coor_matrix = np.zeros( len(V_Arr) * 2 ).reshape( len(V_Arr) , 2 )
Coor_matrix[ : , 0 ] = V_Arr
Coor_matrix[ : , 1 ] = logNH_Arr
CCC_machine = load_machine_fesc( Machine_Learning_Algorithm , 'CCC' , Geometry )
KKK_machine = load_machine_fesc( Machine_Learning_Algorithm , 'KKK' , Geometry )
CCC_model_Arr = CCC_machine.predict( Coor_matrix )
KKK_model_Arr = KKK_machine.predict( Coor_matrix )
f_esc_Arr = fesc_of_ta_Thin_and_Wind( ta_Arr , CCC_model_Arr , KKK_model_Arr )
if Geometry in [ 'Bicone_X_Slab' ] :
assert not Inside_Bicone_Arr is None , 'Inside_Bicone_Arr give is None or none Inside_Bicone_Arr was given. If the geometry is Bicone_X_Slab it is necesary to give a Inside_Bicone_Arr'
Inside_Bicone_Arr = np.atleast_1d( Inside_Bicone_Arr )
f_esc_Arr = np.zeros( len(V_Arr) )
##################
if sum( Inside_Bicone_Arr ) > 0 :
Coor_matrix = np.zeros( sum( Inside_Bicone_Arr ) * 2 ).reshape( sum( Inside_Bicone_Arr ) , 2 )
Coor_matrix[ : , 0 ] = V_Arr[ Inside_Bicone_Arr ]
Coor_matrix[ : , 1 ] = logNH_Arr[ Inside_Bicone_Arr ]
CCC_machine_in = load_machine_fesc( Machine_Learning_Algorithm , 'CCC' , Geometry , INSIDE_BICONE=True )
KKK_machine_in = load_machine_fesc( Machine_Learning_Algorithm , 'KKK' , Geometry , INSIDE_BICONE=True )
LLL_machine_in = load_machine_fesc( Machine_Learning_Algorithm , 'LLL' , Geometry , INSIDE_BICONE=True )
CCC_model_in_Arr = CCC_machine_in.predict( Coor_matrix )
KKK_model_in_Arr = KKK_machine_in.predict( Coor_matrix )
LLL_model_in_Arr = LLL_machine_in.predict( Coor_matrix )
f_esc_Arr[ Inside_Bicone_Arr ] = fesc_of_ta_Bicone( ta_Arr[ Inside_Bicone_Arr ] , CCC_model_in_Arr , KKK_model_in_Arr , LLL_model_in_Arr )
##################
if sum( ~Inside_Bicone_Arr ) > 0 :
Coor_matrix = np.zeros( sum( ~Inside_Bicone_Arr ) * 2 ).reshape( sum( ~Inside_Bicone_Arr ) , 2 )
Coor_matrix[ : , 0 ] = V_Arr[ ~Inside_Bicone_Arr ]
Coor_matrix[ : , 1 ] = logNH_Arr[ ~Inside_Bicone_Arr ]
CCC_machine_out = load_machine_fesc( Machine_Learning_Algorithm , 'CCC' , Geometry , INSIDE_BICONE=False )
KKK_machine_out = load_machine_fesc( Machine_Learning_Algorithm , 'KKK' , Geometry , INSIDE_BICONE=False )
LLL_machine_out = load_machine_fesc( Machine_Learning_Algorithm , 'LLL' , Geometry , INSIDE_BICONE=False )
CCC_model_out_Arr = CCC_machine_out.predict( Coor_matrix )
KKK_model_out_Arr = KKK_machine_out.predict( Coor_matrix )
LLL_model_out_Arr = LLL_machine_out.predict( Coor_matrix )
f_esc_Arr[ ~Inside_Bicone_Arr ] = fesc_of_ta_Bicone( ta_Arr[ ~Inside_Bicone_Arr ] , CCC_model_out_Arr , KKK_model_out_Arr , LLL_model_out_Arr )
return f_esc_Arr
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def RT_f_esc_Machine_Values( Geometry , V_Arr , logNH_Arr , ta_Arr , Inside_Bicone_Arr=None , Machine_Learning_Algorithm='Tree' ):
logNH_Arr = np.atleast_1d( logNH_Arr )
ta_Arr = np.atleast_1d( ta_Arr )
V_Arr = np.atleast_1d( V_Arr )
if Geometry in [ 'Thin_Shell' , 'Galactic_Wind' ] :
loaded_model = load_machine_fesc( Machine_Learning_Algorithm , 'f_esc' , Geometry )
Coor_matrix = np.zeros( len(V_Arr) * 3 ).reshape( len(V_Arr) , 3 )
Coor_matrix[ : , 0 ] = V_Arr
Coor_matrix[ : , 1 ] = logNH_Arr
Coor_matrix[ : , 2 ] = np.log10(ta_Arr)
f_esc_Arr = loaded_model.predict( Coor_matrix )
if Geometry in [ 'Bicone_X_Slab' ] :
Inside_Bicone_Arr = np.atleast_1d( Inside_Bicone_Arr )
f_esc_Arr = np.zeros( len(V_Arr) )
##################
if sum( Inside_Bicone_Arr ) > 0 :
loaded_model_inside = load_machine_fesc( Machine_Learning_Algorithm , 'f_esc' , Geometry , INSIDE_BICONE=True )
Coor_matrix = np.zeros( sum( Inside_Bicone_Arr ) * 3 ).reshape( sum( Inside_Bicone_Arr ) , 3 )
Coor_matrix[ : , 0 ] = V_Arr[ Inside_Bicone_Arr ]
Coor_matrix[ : , 1 ] = logNH_Arr[ Inside_Bicone_Arr ]
Coor_matrix[ : , 2 ] = np.log10(ta_Arr)[ Inside_Bicone_Arr ]
f_esc_Arr[ Inside_Bicone_Arr ] = loaded_model_inside.predict( Coor_matrix )
##################
if sum( ~Inside_Bicone_Arr ) > 0 :
loaded_model_outside = load_machine_fesc( Machine_Learning_Algorithm , 'f_esc' , Geometry , INSIDE_BICONE=False )
Coor_matrix = np.zeros( sum( ~Inside_Bicone_Arr ) * 3 ).reshape( sum( ~Inside_Bicone_Arr ) , 3 )
Coor_matrix[ : , 0 ] = V_Arr[ ~Inside_Bicone_Arr ]
Coor_matrix[ : , 1 ] = logNH_Arr[ ~Inside_Bicone_Arr ]
Coor_matrix[ : , 2 ] = np.log10(ta_Arr)[ ~Inside_Bicone_Arr ]
f_esc_Arr[ ~Inside_Bicone_Arr ] = loaded_model_outside.predict( Coor_matrix )
return f_esc_Arr
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def Linear_2D_interpolator( X_prob , Y_prob , X_grid , Y_grid , Field_in_grid ):
INDEX_X = np.where( ( X_grid < X_prob ) )[0][-1]
INDEX_Y = np.where( ( Y_grid < Y_prob ) )[0][-1]
dX_grid = X_grid[ INDEX_X + 1 ] - X_grid[ INDEX_X ]
dY_grid = Y_grid[ INDEX_Y + 1 ] - Y_grid[ INDEX_Y ]
X_min_grid = X_grid[ INDEX_X ]
Y_min_grid = Y_grid[ INDEX_Y ]
Xprob_X0 = ( X_prob - X_min_grid ) * 1. / dX_grid
Yprob_Y0 = ( Y_prob - Y_min_grid ) * 1. / dY_grid
Area1 = ( 1. - Xprob_X0 ) * ( 1. - Yprob_Y0 )
Area2 = ( 1. - Xprob_X0 ) * ( Yprob_Y0 )
Area3 = ( Xprob_X0 ) * ( Yprob_Y0 )
Area4 = ( Xprob_X0 ) * ( 1. - Yprob_Y0 )
Field1 = Field_in_grid[ INDEX_X , INDEX_Y ]
Field2 = Field_in_grid[ INDEX_X , INDEX_Y + 1 ]
Field3 = Field_in_grid[ INDEX_X + 1 , INDEX_Y + 1 ]
Field4 = Field_in_grid[ INDEX_X + 1 , INDEX_Y ]
Field_at_the_prob_point = Area1 * Field1 + Area2 * Field2 + Area3 * Field3 + Area4 * Field4
return Field_at_the_prob_point
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def Linear_3D_interpolator( X_prob , Y_prob , Z_prob , X_grid , Y_grid , Z_grid , Field_in_grid ):
INDEX_X = np.where( ( X_grid < X_prob ) )[0][-1]
INDEX_Y = np.where( ( Y_grid < Y_prob ) )[0][-1]
INDEX_Z = np.where( ( Z_grid < Z_prob ) )[0][-1]
dX_grid = X_grid[ INDEX_X + 1 ] - X_grid[ INDEX_X ]
dY_grid = Y_grid[ INDEX_Y + 1 ] - Y_grid[ INDEX_Y ]
dZ_grid = Z_grid[ INDEX_Z + 1 ] - Z_grid[ INDEX_Z ]
X_min_grid = X_grid[ INDEX_X ]
Y_min_grid = Y_grid[ INDEX_Y ]
Z_min_grid = Z_grid[ INDEX_Z ]
Xprob_X0 = ( X_prob - X_min_grid ) * 1. / dX_grid
Yprob_Y0 = ( Y_prob - Y_min_grid ) * 1. / dY_grid
Zprob_Z0 = ( Z_prob - Z_min_grid ) * 1. / dZ_grid
Vol1 = ( 1. - Xprob_X0 ) * ( 1. - Yprob_Y0 ) * ( 1. - Zprob_Z0 )
Vol2 = ( 1. - Xprob_X0 ) * ( Yprob_Y0 ) * ( 1. - Zprob_Z0 )
Vol3 = ( 1. - Xprob_X0 ) * ( Yprob_Y0 ) * ( Zprob_Z0 )
Vol4 = ( 1. - Xprob_X0 ) * ( 1. - Yprob_Y0 ) * ( Zprob_Z0 )
Vol5 = ( Xprob_X0 ) * ( 1. - Yprob_Y0 ) * ( 1. - Zprob_Z0 )
Vol6 = ( Xprob_X0 ) * ( Yprob_Y0 ) * ( 1. - Zprob_Z0 )
Vol7 = ( Xprob_X0 ) * ( Yprob_Y0 ) * ( Zprob_Z0 )
Vol8 = ( Xprob_X0 ) * ( 1. - Yprob_Y0 ) * ( Zprob_Z0 )
Field1 = Field_in_grid[ INDEX_X , INDEX_Y , INDEX_Z ]
Field2 = Field_in_grid[ INDEX_X , INDEX_Y + 1 , INDEX_Z ]
Field3 = Field_in_grid[ INDEX_X , INDEX_Y + 1 , INDEX_Z + 1 ]
Field4 = Field_in_grid[ INDEX_X , INDEX_Y , INDEX_Z + 1 ]
Field5 = Field_in_grid[ INDEX_X + 1 , INDEX_Y , INDEX_Z ]
Field6 = Field_in_grid[ INDEX_X + 1 , INDEX_Y + 1 , INDEX_Z ]
Field7 = Field_in_grid[ INDEX_X + 1 , INDEX_Y + 1 , INDEX_Z + 1 ]
Field8 = Field_in_grid[ INDEX_X + 1 , INDEX_Y , INDEX_Z + 1 ]
Field_at_the_prob_point = Vol1 * Field1 + Vol2 * Field2 + Vol3 * Field3 + Vol4 * Field4 + Vol5 * Field5 + Vol6 * Field6 + Vol7 * Field7 + Vol8 * Field8
return Field_at_the_prob_point
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def load_Grid_fesc( Geometry , MODE , INSIDE_BICONE=True ):
Geometry_Set = [ 'Thin_Shell' , 'Galactic_Wind' , 'Bicone_X_Slab' ]
dirs_Set = [ 'Thin_Shell/' , 'Galactic_Wind/' , 'Bicone_X_Slab/' ]
geo_code = [ 'Thin_Shell' , 'Wind' , 'Bicone_X_Slab' ]
MODE_Set = [ 'Parameters' , 'values' ]
index = np.where( Geometry == np.array(Geometry_Set) )[0][0]
filename_root = 'DATA/Dictonary_'+ geo_code[index] +'_Grid_f_esc_' + MODE
if Geometry == 'Bicone_X_Slab':
filename_root += '_Inside_Bicone_' + str(INSIDE_BICONE)
filename = filename_root + '.npy'
this_dir, this_filename = os.path.split(__file__)
filename = os.path.join(this_dir, filename)
loaded_model = np.load( filename , allow_pickle=True ).item()
return loaded_model
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def Interpolate_f_esc_Arrays_2D_grid( V_Arr , logNH_Arr , ta_Arr , Grid_Dictionary , Geometry ):
V_Arr_Grid = Grid_Dictionary[ 'V_Arr' ]
logNH_Arr_Grid = Grid_Dictionary[ 'logNH_Arr' ]
logta_Arr_Grid = Grid_Dictionary[ 'logta_Arr' ]
Grid = Grid_Dictionary[ 'Grid' ]
N_objects = len( V_Arr )
CCC_Arr_evaluated = np.zeros( N_objects )
KKK_Arr_evaluated = np.zeros( N_objects )
###################
if Geometry in [ 'Thin_Shell' , 'Galactic_Wind' ] :
for INDEX in range( 0 , N_objects ):
CCC_Arr_evaluated[ INDEX ] , KKK_Arr_evaluated[ INDEX ] = Linear_2D_interpolator( V_Arr[INDEX] , logNH_Arr[INDEX] , V_Arr_Grid , logNH_Arr_Grid , Grid )
f_esc_Arr = fesc_of_ta_Thin_and_Wind( ta_Arr , CCC_Arr_evaluated , KKK_Arr_evaluated )
###################
if Geometry in [ 'Bicone_X_Slab' ] :
LLL_Arr_evaluated = np.zeros( N_objects )
for INDEX in range( 0 , N_objects ):
CCC_Arr_evaluated[ INDEX ] , KKK_Arr_evaluated[ INDEX ] , LLL_Arr_evaluated[ INDEX ] = Linear_2D_interpolator( V_Arr[INDEX] , logNH_Arr[INDEX] , V_Arr_Grid , logNH_Arr_Grid , Grid )
f_esc_Arr = fesc_of_ta_Bicone( ta_Arr , CCC_Arr_evaluated , KKK_Arr_evaluated , LLL_Arr_evaluated )
return f_esc_Arr
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def Interpolate_fesc_Arrays_3D_grid( V_Arr , logNH_Arr , ta_Arr , Grid_Dictionary ):
V_Arr_Grid = Grid_Dictionary[ 'V_Arr' ]
logNH_Arr_Grid = Grid_Dictionary[ 'logNH_Arr' ]
logta_Arr_Grid = Grid_Dictionary[ 'logta_Arr' ]
Grid = Grid_Dictionary[ 'Grid' ]
logta_Arr = np.log10( ta_Arr )
N_objects = len( V_Arr )
f_esc_Arr_evaluated = np.zeros( N_objects )
for INDEX in range( 0 , N_objects ):
f_esc_Arr_evaluated[ INDEX ] = Linear_3D_interpolator( V_Arr[INDEX] , logNH_Arr[INDEX] , logta_Arr[INDEX] , V_Arr_Grid , logNH_Arr_Grid , logta_Arr_Grid , Grid )
return f_esc_Arr_evaluated
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def RT_f_esc_Interpolation_Values( Geometry , V_Arr , logNH_Arr , ta_Arr , Inside_Bicone_Arr=None , Machine_Learning_Algorithm=None ):
logNH_Arr = np.atleast_1d( logNH_Arr )
ta_Arr = np.atleast_1d( ta_Arr )
V_Arr = np.atleast_1d( V_Arr )
if Geometry in [ 'Thin_Shell' , 'Galactic_Wind' ] :
DATA_DICTIONAY = load_Grid_fesc( Geometry , 'values' )
f_esc_Arr = Interpolate_fesc_Arrays_3D_grid( V_Arr , logNH_Arr , ta_Arr , DATA_DICTIONAY )
if Geometry in [ 'Bicone_X_Slab' ] and not Inside_Bicone_Arr is None :
Inside_Bicone_Arr = np.atleast_1d( Inside_Bicone_Arr )
f_esc_Arr = np.zeros( len(logNH_Arr) )
##############
if sum( Inside_Bicone_Arr ) > 0:
DATA_DICTIONAY_in = load_Grid_fesc( Geometry , 'values' , INSIDE_BICONE=True )
f_esc_Arr[ Inside_Bicone_Arr ] = Interpolate_fesc_Arrays_3D_grid( V_Arr[Inside_Bicone_Arr] , logNH_Arr[Inside_Bicone_Arr] , ta_Arr[Inside_Bicone_Arr] , DATA_DICTIONAY_in )
##############
if sum( ~Inside_Bicone_Arr ) > 0:
DATA_DICTIONAY_out = load_Grid_fesc( Geometry , 'values' , INSIDE_BICONE=False )
f_esc_Arr[ ~Inside_Bicone_Arr ] = Interpolate_fesc_Arrays_3D_grid( V_Arr[~Inside_Bicone_Arr] , logNH_Arr[~Inside_Bicone_Arr] , ta_Arr[~Inside_Bicone_Arr] , DATA_DICTIONAY_out )
return f_esc_Arr
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def RT_f_esc_Interpolation_Parameters( Geometry , V_Arr , logNH_Arr , ta_Arr , Inside_Bicone_Arr=None , Machine_Learning_Algorithm=None ):
logNH_Arr = np.atleast_1d( logNH_Arr )
ta_Arr = np.atleast_1d( ta_Arr )
V_Arr = np.atleast_1d( V_Arr )
if Geometry in [ 'Thin_Shell' , 'Galactic_Wind' ] :
DATA_DICTIONAY = load_Grid_fesc( Geometry , 'Parameters' )
f_esc_Arr = Interpolate_f_esc_Arrays_2D_grid( V_Arr , logNH_Arr , ta_Arr , DATA_DICTIONAY , Geometry )
if Geometry in [ 'Bicone_X_Slab' ] :
Inside_Bicone_Arr = np.atleast_1d( Inside_Bicone_Arr )
f_esc_Arr = np.zeros( len(logNH_Arr) )
##############
DATA_DICTIONAY_in = load_Grid_fesc( Geometry , 'Parameters' , INSIDE_BICONE=True )
f_esc_Arr[ Inside_Bicone_Arr ] = Interpolate_f_esc_Arrays_2D_grid( V_Arr[Inside_Bicone_Arr] , logNH_Arr[Inside_Bicone_Arr] , ta_Arr[Inside_Bicone_Arr] , DATA_DICTIONAY_in , Geometry )
##############
DATA_DICTIONAY_out = load_Grid_fesc( Geometry , 'Parameters' , INSIDE_BICONE=False )
f_esc_Arr[ ~Inside_Bicone_Arr ] = Interpolate_f_esc_Arrays_2D_grid( V_Arr[~Inside_Bicone_Arr] , logNH_Arr[~Inside_Bicone_Arr] , ta_Arr[~Inside_Bicone_Arr] , DATA_DICTIONAY_out , Geometry )
return f_esc_Arr
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def pre_treatment_f_esc( Geometry , V_Arr , logNH_Arr , ta_Arr , Inside_Bicone_Arr , MODE ):
V_Arr = np.atleast_1d( V_Arr )
logNH_Arr = np.atleast_1d( logNH_Arr )
ta_Arr = np.atleast_1d( ta_Arr )
V_Arr = V_Arr.astype(float)
logNH_Arr = logNH_Arr.astype(float)
ta_Arr = ta_Arr.astype(float)
bool1 = np.isfinite( V_Arr )
bool2 = np.isfinite( logNH_Arr )
bool3 = np.isfinite( ta_Arr )
mask_good = bool1 * bool2 * bool3
assert sum( mask_good ) != 0 , 'All the V-logNH-ta combinations are np.nan, -np.inf or np.inf'
V_Arr_used = V_Arr[ mask_good ]
logNH_Arr_used = logNH_Arr[ mask_good ]
ta_Arr_used = ta_Arr[ mask_good ]
#bool4 = ( V_Arr_used <= 100 ) * ( logNH_Arr_used >= 20.5 )
#V_Arr_used[ bool4 ] = 100.00001
#============================================#
if Geometry in [ 'Bicone_X_Slab' ] :
bool1 = V_Arr_used < 100.0
bool2 = logNH_Arr_used >= 20.5
#aux_V_arr = logNH_Arr_used*-40 + 920.
aux_V_arr = logNH_Arr_used * ( -100/1.5 ) + ( 100 - ( -100/1.5 * 20.5 ) )
bool_aux = V_Arr_used > aux_V_arr
V_Arr_used[ bool1 * bool2 * bool_aux ] = 100.000001
logNH_Arr_used[ bool1 * bool2 *~ bool_aux ] = 20.5
#============================================#
bool5 = V_Arr_used <= 10.00
V_Arr_used[ bool5 ] = 10.000001
bool6 = V_Arr_used >= 1000
V_Arr_used[ bool6 ] = 999.9999
bool7 = logNH_Arr_used <= 17.0
logNH_Arr_used[ bool7 ] = 17.0000001
bool8 = logNH_Arr_used >= 22.0
logNH_Arr_used[ bool8 ] = 21.9999
if MODE=='Raw':
bool9 = ta_Arr_used <= 10**(-2.5)
ta_Arr_used[ bool9 ] = 10**(-2.499999)
bool10 = ta_Arr_used >= 10**(-0.25)
ta_Arr_used[ bool10 ] = 10**(-0.2500001)
if Inside_Bicone_Arr is None : Inside_Bicone_Arr = np.ones( len(V_Arr) )
Inside_Bicone_Arr = np.atleast_1d( Inside_Bicone_Arr )
In_Bool_used = Inside_Bicone_Arr[ mask_good ]
return V_Arr_used , logNH_Arr_used , ta_Arr_used , In_Bool_used , mask_good
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def RT_f_esc( Geometry , V_Arr , logNH_Arr , ta_Arr , Inside_Bicone_Arr=None , MODE='Parametrization' , Algorithm='Intrepolation' , Machine_Learning_Algorithm='Tree' ):
'''
Return the Lyman alpha escape fraction for a given outflow properties.
Parameters
----------
Geometry : string
The outflow geometry to use: Options: 'Thins_Shell',
'Galactic_Wind' , 'Bicone_X_Slab'.
wavelength_Arr : 1-D sequence of floats
Array with the wavelength vales where the line
profile is computed. The units are meters, i.e.,
amstrongs * 1.e-10.
V_Arr : 1-D sequence of float
Array with the expansion velocity of the outflow. The unit
are km/s.
logNH_Arr : 1-D sequence of float
Array with the logarithim of the outflow neutral hydrogen
column density. The units of the colum density are in c.g.s,
i.e, cm**-2.
ta_Arr : 1-D sequence of float
Array with the dust optic depth of the outflow.
Inside_Bicone_Arr : optional 1-D sequence of bool
An Array with booleans, indicating if the bicone is face-on
or edge-on. If True then the bicone is face-on. If false the
bicone is edge-on. The probability of being face on is
np.cos( np.pi/4 ).
MODE : optional string
Set the mode in which the escape fraction is computed. It can be:
Analytic : it uses an analytic equation fitted to the output of the RT MC code.
Parametrization : it computes the escape fraction using a function that depends on the
dust optical depts as in Neufeld et al. 1990.
Raw : it uses directly the output of the RT MC code.
Default = 'Parametrization'
Algorithm : optional string
Set how the escape fraction is computed. If MODE='Analytic' then this varialbe is useless.
Intrepolation : Direct lineal interpolation.
Machine_Learning : uses machine learning algorithms
Default = 'Intrepolation'
Machine_Learning_Algorithm : optial string
Set the machine learning algorith used. Available:
Tree : decision tree
Forest : random forest
KN : KN
Default = 'Tree'
.. versionadded:: 0.0.3
Returns
-------
lines_Arr : 1-D sequence of float
The Lyman alpha escape fraction for V_Arr[i] ,
logNH_Arr[i] , ta_Arr[i] , Inside_Bicone_Arr[i].
'''
assert MODE in [ 'Parametrization' , 'Raw' , 'Analytic'] , 'The requested mode ' + MODE + ' is not available. The modes supported are : Parametrization , Raw , Analytic'
assert Algorithm in [ 'Intrepolation' , 'Machine_Learning' ] , 'The requested algorithm ' + Algorithm + ' is not available. The algorithms supported are : Intrepolation , Machine_Learning'
assert Geometry in [ 'Thin_Shell' , 'Galactic_Wind' , 'Bicone_X_Slab' ] , 'The requested geoemtry ' + Geometry + ' is not available. The geometries supported are : Thin_Shell , Galactic_Wind , Bicone_X_Slab'
V_Arr_used , logNH_Arr_used , ta_Arr_used , In_Bool_used , mask_good = pre_treatment_f_esc( Geometry , V_Arr , logNH_Arr , ta_Arr , Inside_Bicone_Arr , MODE )
f_esc_Arr = np.zeros( len( mask_good ) ) * np.nan
if MODE == 'Parametrization' :
if Algorithm == 'Intrepolation' :
funtion_to_use = RT_f_esc_Interpolation_Parameters
if Algorithm == 'Machine_Learning':
funtion_to_use = RT_f_esc_Machine_Parameter
if MODE == 'Raw' :
if Algorithm == 'Intrepolation' :
funtion_to_use = RT_f_esc_Interpolation_Values
if Algorithm == 'Machine_Learning':
funtion_to_use = RT_f_esc_Machine_Values
if MODE == 'Analytic' :
funtion_to_use = RT_f_esc_Analytic
f_esc_Arr[ mask_good ] = funtion_to_use( Geometry , V_Arr_used , logNH_Arr_used , ta_Arr_used , Inside_Bicone_Arr=In_Bool_used , Machine_Learning_Algorithm=Machine_Learning_Algorithm )
return f_esc_Arr
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def define_RT_parameters():
T4 = 1. # = 10000. / 1e4
nu0 = 2.46777 * 1.e15 #3. * 10.**8 / (1215.67 * (10**(-10)))
Vth = 12.85 * np.sqrt(T4) # lo he comentado porque sqrt(1) = 1
Dv = Vth * nu0 *1. / ( 3 * (10**5))
return nu0 , Dv
#==============================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def convert_x_into_lamda( x ):
nu0 , Dv = define_RT_parameters()
return( 3. * 1.e8 / ( x * Dv + nu0) )
def convert_lamda_into_x( lamda ):
nu0 , Dv = define_RT_parameters()
return( (( 3. * 1.e8 / lamda) -nu0 ) / Dv )
#==============================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def load_Grid_Line( Geometry , INSIDE_BICONE=None ):
'''
Return the dictionary with all the properties of the grid where the lines were run.
Parameters
----------
Geometry : string
The outflow geometry to use: Options: 'Thins_Shell',
'Galactic_Wind' , 'Bicone_X_Slab'.
INSIDE_BICONE : optional boolean
This is useless if the geometry is not Bicone_X_Slab.
If True then the bicone is face-on. If false the
bicone is edge-on. The probability of being face
on is np.cos( np.pi/4 ).
Returns
-------
loaded_model : Dictionary
This dictonary have all the information of the grid.
Entries:
'V_Arr' : Array of velocity expansions used.[km/s]
'logNH_Arr' : Array of logarithm of the column density. [c.g.s.]
'logta_Arr' : Array of logarithm of the dust optical depth.
'x_Arr' : Array of frequency in Doppler units.
'Grid' : Array with the output of the RT MC code LyaRT:
loaded_model['Grid'][i,j,k,:] has the line profile evaluated in loaded_model['x_Arr']
with outflow velocity loaded_model['V_Arr'][i] , logarithm of the neutral hydrogen
column density loaded_model['logNH_Arr'][j] and logarithm of dust optical depth
loaded_model['logta_Arr'][k]
'''
assert Geometry in [ 'Thin_Shell' , 'Galactic_Wind' , 'Bicone_X_Slab' ] , 'The requested geoemtry ' + Geometry + ' is not available. The geometries supported are : Thin_Shell , Galactic_Wind , Bicone_X_Slab'
Geometry_Set = [ 'Thin_Shell' , 'Galactic_Wind' , 'Bicone_X_Slab' ]
dirs_Set = [ 'Thin_Shell/' , 'Galactic_Wind/' , 'Bicone_X_Slab/' ]
geo_code = [ 'Thin_Shell' , 'Wind' , 'Bicone_X_Slab' ]
index = np.where( Geometry == np.array(Geometry_Set) )[0][0]
filename_root = 'DATA/Dictonary_'+ geo_code[index] +'_Grid_Lines'
if Geometry == 'Bicone_X_Slab':
filename_root += '_In_Bicone_' + str(INSIDE_BICONE)
filename = filename_root + '.npy'
this_dir, this_filename = os.path.split(__file__)
filename = os.path.join(this_dir, filename)
loaded_model = np.load( filename , allow_pickle=True ).item()
return loaded_model
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def Interpolate_Lines_Arrays_3D_grid( V_Arr , logNH_Arr , logta_Arr , x_Arr , Grid_Dictionary ):
Grid_Line = Grid_Dictionary['Grid']
V_Arr_Grid = Grid_Dictionary['V_Arr']
x_Arr_Grid = Grid_Dictionary['x_Arr']
logNH_Arr_Grid = Grid_Dictionary['logNH_Arr']
logta_Arr_Grid = Grid_Dictionary['logta_Arr']
lines_Arr = np.zeros( len(V_Arr) * len( x_Arr ) ).reshape( len(V_Arr) , len( x_Arr ) )
for i in range( 0 , len( V_Arr ) ):
aux_line = Linear_3D_interpolator( V_Arr[i] , logNH_Arr[i] , logta_Arr[i] , V_Arr_Grid , logNH_Arr_Grid , logta_Arr_Grid , Grid_Line )
axu_line_1 = np.interp( x_Arr , x_Arr_Grid , aux_line , left=0.0 , right=0.0 )
Integral = np.trapz( axu_line_1 , x_Arr )
lines_Arr[i] = np.absolute( axu_line_1 * 1. / Integral )
return lines_Arr
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def Interpolate_Lines_Arrays_3D_grid_MCMC( V_Value , logNH_Value , logta_Value , x_Arr , Grid_Dictionary ):
Grid_Line = Grid_Dictionary['Grid']
V_Arr_Grid = Grid_Dictionary['V_Arr']
x_Arr_Grid = Grid_Dictionary['x_Arr']
logNH_Arr_Grid = Grid_Dictionary['logNH_Arr']
logta_Arr_Grid = Grid_Dictionary['logta_Arr']
aux_line = Linear_3D_interpolator( V_Value , logNH_Value , logta_Value , V_Arr_Grid , logNH_Arr_Grid , logta_Arr_Grid , Grid_Line )
axu_line_1 = np.interp( x_Arr , x_Arr_Grid , aux_line , left=0.0 , right=0.0 )
Integral = np.trapz( axu_line_1 , x_Arr )
axu_line_1 = np.absolute( axu_line_1 * 1. / Integral )
return axu_line_1
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def pre_treatment_Line_profile_MCMC( Geometry , V_Value , logNH_Value , ta_Value ):
bool1 = np.isfinite( V_Value )
bool2 = np.isfinite( logNH_Value )
bool3 = np.isfinite( ta_Value )
Bool_good = bool1 * bool2 * bool3
if Geometry in [ 'Bicone_X_Slab' ]:
if V_Value <= 100.0 and logNH_Value >= 20.5 :
#aux_V = logNH_Value*-40 + 920.
aux_V = logNH_Value * ( -100/1.5 ) + ( 100 - ( -100/1.5 * 20.5 ) )
bool_aux = V_Value > aux_V
if bool_aux : V_Value = 100.0001
if not bool_aux : logNH_Value = 20.4999999
if V_Value <= 10.0 : V_Value = 10.000001
if V_Value >= 1000.0 : V_Value = 999.999999
if logNH_Value < 17.0 : logNH_Value = 17.000001
if logNH_Value >= 22.0 : logNH_Value = 21.999999
if ta_Value < 10**(-3.75 ) : ta_Value = 10**(-3.749999999)
if ta_Value >= 10**(-0.125) : ta_Value = 10**(-0.125000001)
return V_Value , logNH_Value , ta_Value , Bool_good
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def RT_Line_Profile_MCMC( Geometry , wavelength_Arr , V_Value , logNH_Value , ta_Value , DATA_LyaRT ):
'''
Return one and only one Lyman alpha line profile for a given outflow properties.
This function is especial to run MCMCs or PSO.
Parameters
----------
Geometry : string
The outflow geometry to use: Options: 'Thins_Shell',
'Galactic_Wind' , 'Bicone_X_Slab'.
wavelength_Arr : 1-D sequence of floats
Array with the wavelength vales where the line
profile is computed. The units are meters, i.e.,
amstrongs * 1.e-10.
V_Value : float
Value of the expansion velocity of the outflow. The unit
are km/s.
logNH_Value : float
Value of the logarithim of the outflow neutral hydrogen
column density. The units of the colum density are in c.g.s,
i.e, cm**-2.
ta_Value : float
Value of the dust optic depth of the outflow.
DATA_LyaRT : Dictionay
This dictonary have all the information of the grid.
This dictionary can be loaded with the function :
load_Grid_Line, for example:
DATA_LyaRT = load_Grid_Line( 'Thin_Shell' )
Returns
-------
lines_Arr : 1-D sequence of float
The Lyman alpha line profile.
'''
V_Value , logNH_Value , ta_Value , Bool_good = pre_treatment_Line_profile_MCMC( Geometry , V_Value , logNH_Value , ta_Value )
if Bool_good :
logta_Value = np.log10( ta_Value )
x_Arr = convert_lamda_into_x( wavelength_Arr )
line_Arr = Interpolate_Lines_Arrays_3D_grid_MCMC( V_Value , logNH_Value , logta_Value , x_Arr , DATA_LyaRT )
if not Bool_good :
line_Arr = np.ones( len(x_Arr) ) * np.nan
return line_Arr
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def pre_treatment_Line_profile( Geometry , V_Arr , logNH_Arr , ta_Arr , Inside_Bicone_Arr ):
V_Arr = np.atleast_1d( V_Arr )
logNH_Arr = np.atleast_1d( logNH_Arr )
ta_Arr = np.atleast_1d( ta_Arr )
V_Arr = V_Arr.astype(float)
logNH_Arr = logNH_Arr.astype(float)
ta_Arr = ta_Arr.astype(float)
bool1 = np.isfinite( V_Arr )
bool2 = np.isfinite( logNH_Arr )
bool3 = np.isfinite( ta_Arr )
mask_good = bool1 * bool2 * bool3
assert sum( mask_good ) != 0 , 'All the V-logNH-ta combinations are np.nan, -np.inf or np.inf'
V_Arr_used = V_Arr[ mask_good ]
logNH_Arr_used = logNH_Arr[ mask_good ]
ta_Arr_used = ta_Arr[ mask_good ]
#============================================#
if Geometry in ['Thin_Shell' , 'Bicone_X_Slab']:
bool1 = V_Arr_used < 100.0
bool2 = logNH_Arr_used >= 20.5
aux_V_arr = logNH_Arr_used * ( -100/1.5 ) + ( 100 - ( -100/1.5 * 20.5 ) )
bool_aux = V_Arr_used > aux_V_arr
V_Arr_used[ bool1 * bool2 * bool_aux ] = 100.000001
logNH_Arr_used[ bool1 * bool2 *~ bool_aux ] = 20.499999
#============================================#
bool5 = V_Arr_used <= 10
V_Arr_used[ bool5 ] = 10.000001
bool6 = V_Arr_used >= 1000
V_Arr_used[ bool6 ] = 999.9999
bool7 = logNH_Arr_used <= 17.0
logNH_Arr_used[ bool7 ] = 17.0000001
bool8 = logNH_Arr_used >= 22.0
logNH_Arr_used[ bool8 ] = 21.9999
bool9 = ta_Arr_used <= 10**(-3.75)
ta_Arr_used[ bool9 ] = 10**(-3.74999999)
bool10 = ta_Arr_used >= 10**(-0.125)
ta_Arr_used[ bool10 ] = 10**(-0.125000000001)
if Inside_Bicone_Arr is None : Inside_Bicone_Arr = np.ones( len(V_Arr) )
else: Inside_Bicone_Arr = np.atleast_1d( Inside_Bicone_Arr )
In_Bool_used = Inside_Bicone_Arr[ mask_good ]
return V_Arr_used , logNH_Arr_used , ta_Arr_used , In_Bool_used , mask_good
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def RT_Line_Profile( Geometry , wavelength_Arr , V_Arr , logNH_Arr , ta_Arr , Inside_Bicone_Arr=None ):
'''
Return the Lyman alpha line profile for a given outflow properties.
Parameters
----------
Geometry : string
The outflow geometry to use: Options: 'Thins_Shell',
'Galactic_Wind' , 'Bicone_X_Slab'.
wavelength_Arr : 1-D sequence of floats
Array with the wavelength vales where the line
profile is computed. The units are meters, i.e.,
amstrongs * 1.e-10.
V_Arr : 1-D sequence of float
Array with the expansion velocity of the outflow. The unit
are km/s.
logNH_Arr : 1-D sequence of float
Array with the logarithim of the outflow neutral hydrogen
column density. The units of the colum density are in c.g.s,
i.e, cm**-2.
ta_Arr : 1-D sequence of float
Array with the dust optic depth of the outflow.
Inside_Bicone_Arr : optional 1-D sequence of bool
This is useless if the geometry is not Bicone_X_Slab.
An Array with booleans, indicating if the bicone is face-on
or edge-on. If True then the bicone is face-on. If false the
bicone is edge-on. The probability of being face on is
np.cos( np.pi/4 ).
.. versionadded:: 0.0.3
Returns
-------
lines_Arr : 2-D sequence of float
The Lyman alpha line profiles. lines_Arr[i] is the line profile
computed at the wavelengths wavelength_Arr for wich V_Arr[i] ,
logNH_Arr[i] , ta_Arr[i] , Inside_Bicone_Arr[i].
'''
assert Geometry in [ 'Thin_Shell' , 'Galactic_Wind' , 'Bicone_X_Slab' ] , 'The requested geoemtry ' + Geometry + ' is not available. The geometries supported are : Thin_Shell , Galactic_Wind , Bicone_X_Slab'
V_Arr = np.atleast_1d( V_Arr )
logNH_Arr = np.atleast_1d( logNH_Arr )
ta_Arr = np.atleast_1d( ta_Arr )
x_Arr = convert_lamda_into_x( wavelength_Arr )
lines_Arr = np.zeros( len(V_Arr) * len( x_Arr ) ).reshape( len(V_Arr) , len( x_Arr ) ) * np.nan
V_Arr_used , logNH_Arr_used , ta_Arr_used , In_Bool_used , mask_good = pre_treatment_Line_profile( Geometry , V_Arr , logNH_Arr , ta_Arr , Inside_Bicone_Arr )
logta_Arr_used = np.log10( ta_Arr_used )
##############################
if Geometry in [ 'Thin_Shell' , 'Galactic_Wind' ] :
DATA_LyaRT = load_Grid_Line( Geometry )
tmp_lines_Arr = Interpolate_Lines_Arrays_3D_grid( V_Arr_used , logNH_Arr_used , logta_Arr_used , x_Arr , DATA_LyaRT )
##############################
if Geometry in [ 'Bicone_X_Slab' ] :
assert not Inside_Bicone_Arr is None , 'Error. Human is dead. Mismatch. \nIf the goemetry is Bicone_X_Slab then it is compulsory to define Inside_Bicone_Arr when colling this function.'
Inside_Bicone_Arr = np.atleast_1d( Inside_Bicone_Arr )
tmp_lines_Arr = np.zeros( len( V_Arr_used ) * len( x_Arr ) ).reshape( len( V_Arr_used ) , len( x_Arr ) )
DATA_LyaRT_in = load_Grid_Line( Geometry , INSIDE_BICONE=True )
DATA_LyaRT_out = load_Grid_Line( Geometry , INSIDE_BICONE=False )
lines_Arr_in = Interpolate_Lines_Arrays_3D_grid( V_Arr_used[ In_Bool_used] , logNH_Arr_used[ In_Bool_used] , logta_Arr_used[ In_Bool_used] , x_Arr , DATA_LyaRT_in )
lines_Arr_out = Interpolate_Lines_Arrays_3D_grid( V_Arr_used[~In_Bool_used] , logNH_Arr_used[~In_Bool_used] , logta_Arr_used[~In_Bool_used] , x_Arr , DATA_LyaRT_out )
tmp_lines_Arr[ In_Bool_used] = lines_Arr_in
tmp_lines_Arr[~In_Bool_used] = lines_Arr_out
lines_Arr[ mask_good ] = tmp_lines_Arr
return lines_Arr
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def Print_the_grid_edges():
print ''
print ' Hi,'
print ''
print ' The expanssion velocity V_exp and neutral hydrogen column density logNH are the same in the escape fraction and line profile grids. However, the optical depth of dust tau_a is different.'
print ''
print ' V_exp [ km/s ] = [ 0 , 10 , ... , 90 , 100 , 150 , 200 , ... , 950 , 1000 ]'
print ''
print ' Bicone_X_Slab :'
print ''
print ' For V_exp < 100 km/s the logNH [ cm**-2 ] = [ 17.0 , 17.25 , ... , 20.25 , 20.5 ]'
print ' '
print ' For V_exp >= 100 km/s the logNH [ cm**-2 ] = [ 17.0 , 17.25 , ... , 21.75 , 22.0 ]'
print ''
print ' Thin_Shell and Galactic_Wind :'
print ''
print ' logNH [ cm**-2 ] = [ 17.0 , 17.25 , ... , 21.75 , 22.0 ]'
print ''
print ' '
print ' For the escape fraction : tau_a = [ -3. , -2. , -1.5 , -1.0 , -0.75 , -0.5 , -0.25 , -0.0 ]'
print ' '
print ' For the line profile : tau_a = [ -0.125 , -0.25 , -0.375 , -0.5 , -0.625 , -0.75 , -0.875 , -1.0 , -1.125 , -1.25 , -1.375 , -1.5 , -1.75 , -2.0 , -2.25 , -2.5 , -2.75 , -3.0 , -3.25 , -3.5 , -3.75 ]'
print ''
print ' Have a nice day!'
print ' El. PSY. CONGROO.'
print ''
return
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def Test_1( ):
print '\nChecking if all the files are found...',
bool_files = Check_if_DATA_files_are_found()
print 'Done!'
if bool_files :
print ' Every file was found. that is great!'
if not bool_files :
print ' Missing files.... Let us download them... ;)'
Download_data()
print '\n Now that we are sure that the data is downloaded in your machine...'
print '\n Let us check every different configuration for computing the escape fraction and the line profiles.'
Geometry_set = [ 'Thin_Shell' , 'Galactic_Wind' , 'Bicone_X_Slab' ]
ML_codes_set = [ 'Tree' , 'Forest' , 'KN' ]
MODE_set = [ 'Parametrization' , 'Raw' , 'Analytic' ]
Algorithm_set = [ 'Intrepolation' , 'Machine_Learning' ]
# Primero vamos a checkear que funciona las fracciones de escape
N_points = int( 1e4 )
V_Arr = np.random.rand( N_points ) * 1000 + 0.0
logNH_Arr = np.random.rand( N_points ) * 5 + 17.0
logta_Arr = np.random.rand( N_points ) * 4.5 - 4.0
In_Arr = np.random.rand( N_points ) > 0.5
print '\nComputing', N_points , 'random configurations of escape fraction with each algorithms...\n'
for Geo in Geometry_set:
for Mod in MODE_set :
if not Mod in [ 'Analytic' ]:
for Algo in Algorithm_set:
if Algo in [ 'Intrepolation' , 'Machine_Learning' ]:
if Algo == 'Machine_Learning' :
for machine in ML_codes_set :
try:
print ' Running : ' , Geo , Mod , Algo , machine ,
fff = RT_f_esc( Geo , V_Arr , logNH_Arr , 10**logta_Arr , Inside_Bicone_Arr=In_Arr , MODE=Mod , Algorithm=Algo , Machine_Learning_Algorithm=machine)
assert np.sum( np.isnan( fff ) ) == 0
print '--> Success!!'
except:
print '--> ERROR. HUMAN IS DEAD. MISMATCH!!'
if Algo != 'Machine_Learning' :
try:
print ' Running : ' , Geo , Mod , Algo ,
fff = RT_f_esc( Geo , V_Arr , logNH_Arr , 10**logta_Arr , Inside_Bicone_Arr=In_Arr , MODE=Mod , Algorithm=Algo )
assert np.sum( np.isnan( fff ) ) == 0
print '--> Success!!'
except:
print '--> ERROR. HUMAN IS DEAD. MISMATCH!!'
if Mod in [ 'Analytic' ]:
try:
print ' Running : ' , Geo , Mod ,
fff = RT_f_esc( Geo , V_Arr , logNH_Arr , 10**logta_Arr , MODE=Mod )
assert np.sum( np.isnan( fff ) ) == 0
print '--> Success!!'
except:
print '--> ERROR. HUMAN IS DEAD. MISMATCH!!'
N_points = int( 1e3 )
print '\nComputing', N_points , 'random configurations of line profile with each algorithms...\n'
V_Arr = np.random.rand( N_points ) * 1000 + 0
logNH_Arr = np.random.rand( N_points ) * 5 + 17.0
logta_Arr = np.random.rand( N_points ) * 5.5 - 4.75
In_Arr = np.random.rand( N_points ) > 0.5
wavelength_Arr = np.linspace( 1215.68 - 20 , 1215.68 + 20 , 1000 ) * 1e-10
RUN_TEST_Lines = True
if RUN_TEST_Lines :
for Geo in Geometry_set:
print ' Running : ' , Geo ,
try:
qq = RT_Line_Profile( Geo , wavelength_Arr , V_Arr , logNH_Arr , 10**logta_Arr , Inside_Bicone_Arr=In_Arr )
assert np.sum( np.isnan( qq ) ) == 0
print '--> Success!!'
except:
print '--> ERROR. HUMAN IS DEAD. MISMATCH!!'
return
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def Test_2( ):
from pylab import *
print '\n Let us make some plots. This will show you just a glimpse of what LyaRT;Grid can do. Just wait for it...'
# Plot some nice line profiles
print '\n Plotting some line profiles...'
wavelength_Arr = np.linspace( 1215.68 - 20 , 1215.68 + 20 , 1000 ) * 1e-10
V_Arr = np.array( [ 10 , 50 , 100 , 200 , 300 ] )
logNH_Arr = np.array( [ 20.0 ] * len( V_Arr ) )
logta_Arr = np.array( [ -1. ] * len( V_Arr ) )
Inside_Bicone_Arr = np.zeros( len(V_Arr) ) == 0
cm = get_cmap( 'rainbow' )
for geo in [ 'Thin_Shell' , 'Galactic_Wind' , 'Bicone_X_Slab' ]:
qq = RT_Line_Profile( geo , wavelength_Arr , V_Arr , logNH_Arr , 10.**logta_Arr , Inside_Bicone_Arr=Inside_Bicone_Arr )
figure()
ax_ax = subplot(111)
for i in range( 0 ,len( V_Arr ) ):
ax_ax.plot( wavelength_Arr*1e10 , qq[i] , color=cm( i*1./( len(V_Arr) -1 ) ) , label=r'$\rm V_{exp} = '+ str(V_Arr[i]) +'km/s$ ' , lw=2 )
texto = r'$\rm N_{H} = 10^{20} cm^{-2}$' + '\n' + r'$\rm \tau_{a} = 0.1$'
ax_ax.text( .95 , 0.45 , texto , verticalalignment='top', horizontalalignment='right', transform=ax_ax.transAxes, fontsize=20 )
ax_ax.set_title( r'$\rm Geometry = $' + geo , size=20 )
ax_ax.set_ylabel( r'$\rm Flux [a.u.]$' , size=20 )
ax_ax.set_xlabel( r'$\rm Wavelength [\AA]$' , size=20 )
ax_ax.set_xlim( 1212.5 , 1222.5 )
ax_ax.legend(loc=0)
print '\n Plotting some escape fractions...'
logta_Arr = np.linspace( -2 , 0.5 , 20 )
logNH_Arr = [20.0] * len( logta_Arr )
for geo in [ 'Thin_Shell' , 'Galactic_Wind' , 'Bicone_X_Slab' ] :
figure()
ax_ax = subplot(111)
for i in range( 0 , len(V_Arr) ):
V_Arr_tmp = [ V_Arr[i] ] * len( logta_Arr )
Inside_Bicone_Arr = np.zeros( len( logta_Arr ) ) == 0
f_esc = RT_f_esc( geo , V_Arr_tmp , logNH_Arr , 10**logta_Arr , Inside_Bicone_Arr=Inside_Bicone_Arr)
ax_ax.plot( logta_Arr , f_esc , color=cm( i*1./( len(V_Arr) -1 ) ) , label=r'$\rm V_{exp} = '+ str(V_Arr[i]) +'km/s$ ' , lw=2 )
Inside_Bicone_Arr = np.zeros( len( logta_Arr ) ) == 1
f_esc = RT_f_esc( geo , V_Arr_tmp , logNH_Arr , 10**logta_Arr , Inside_Bicone_Arr=Inside_Bicone_Arr)
ax_ax.semilogy( logta_Arr , f_esc , '--' , color=cm( i*1./( len(V_Arr) -1 ) ) , lw=2 )
ax_ax.set_xlabel( r'$\rm \log \tau_a$' , size=20 )
ax_ax.set_ylabel( r'$f_{\rm esc} ^{\rm Ly \alpha} $' , size=20 )
texto = r'$\rm N_{H} = 10^{20} cm^{-2}$'
ax_ax.text( .5 , 0.05 , texto , verticalalignment='bottom', horizontalalignment='left', transform=ax_ax.transAxes, fontsize=20 )
ax_ax.set_title( r'$\rm Geometry = $' + geo , size=20 )
legend( loc=0 )
show()
return
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def Test_Installation( Make_Plots=True ):
import warnings
warnings.filterwarnings("ignore")
Test_1()
if Make_Plots : Test_2()
return
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
if __name__ == '__main__':
pass
| 2.578125 | 3 |
keybert/_highlight.py | aucan/KeyBERT | 0 | 12772969 | from rich.console import Console
from rich.highlighter import RegexHighlighter
import string
class NullHighlighter(RegexHighlighter):
base_style = ""
highlights = [r""]
def clean_text(astr):
clean_str = astr.translate(str.maketrans('', '', string.punctuation))
clean_str = ' '.join(clean_str.split()).lower()
return ' ' + clean_str + ' '
def highlight_document(doc, keywords):
highlighted_text = clean_text(doc)
for kwd,_ in keywords:
kwd = clean_text(kwd)
highlighted_text=highlighted_text.replace(kwd,f' [black on #FFFF00]{kwd.strip()}[/] ')
console = Console(highlighter=NullHighlighter())
console.print(highlighted_text.strip())
| 2.796875 | 3 |
tutorial/courses/serializers.py | MaggieChege/Courses-DRF | 0 | 12772970 | from rest_framework import serializers
from . import models
class ReviewSerializer(serializers.ModelSerializer):
class Meta:
extra_kwargs = {
'email':{'write_only':True }
}
fields=('id',
'course',
'name',
'email',
'review',
'rating',
'created_at'
)
model = models.Review
class CourseSerializer(serializers.ModelSerializer):
class Meta:
fields = ('title',
'url'
)
model =models.Course
| 2.078125 | 2 |
1299.replace-elements-with-greatest-element-on-right-side.py | dely2p/Leetcode | 1 | 12772971 | <reponame>dely2p/Leetcode
#
# @lc app=leetcode id=1299 lang=python3
#
# [1299] Replace Elements with Greatest Element on Right Side
#
# @lc code=start
class Solution:
def replaceElements(self, arr: List[int]) -> List[int]:
index = 0
result = []
for i,v in enumerate(arr):
if i+1 == len(arr):
result.append(-1)
break
result.append(max(arr[i+1:len(arr)]))
index = index + 1
return result
# @lc code=end
| 3.390625 | 3 |
app/api/__init__.py | mortimer2015/simple-flask-web | 1 | 12772972 | # -*- coding: UTF-8 -*-
__author__ = 'hunter'
from flask import Blueprint
from app.util.response_util import code_handle
from app.util.login_util import login
api = Blueprint('api', __name__)
api.after_app_request(code_handle)
api.before_app_request(login)
from . import views, errors
| 1.765625 | 2 |
detective/detectiveProject/server.py | TzvetomirTz/FortWatch | 0 | 12772973 | <reponame>TzvetomirTz/FortWatch
import flask
from ml import model
import os
from flask import Flask, request
from ml import model
from werkzeug.utils import secure_filename
app = Flask(__name__)
@app.route("/check/", methods=['POST'])
def checkImage():
imgFile = request.files['image']
imgFilePath = os.path.join(os.path.dirname(os.path.realpath(__file__)), secure_filename("sample.jpg"))
imgFile.save(imgFilePath)
return str(model.hmPeopleIn(imgFilePath))
@app.route("/health/", methods=["GET"])
def checkServerHealth():
return "Up and running!"
app.run(host="0.0.0.0")
| 2.0625 | 2 |
start.py | Victor804/Sky-s-Land | 1 | 12772974 | from game_files.character import Character
from game_files.menu import Menu
from game_files.game import Game
menu = Menu()
button = menu.main()
if (button == "play"):
game = Game()
game.maps_draw()
elif (button == "saves"):
menu.saves()
elif (button == "options"):
menu.options()
| 2.78125 | 3 |
card/card.py | nju161250102/NJUSystem_Server | 1 | 12772975 | <gh_stars>1-10
# -*- coding:utf-8 -*-
from flask import Blueprint
from flask import request
from flask import Response
import requests
import json
import datetime
from bs4 import BeautifulSoup
cardModule = Blueprint('card', __name__)
# 登录并从校园卡自助服务系统导入数据
@cardModule.route('/import/', methods=['POST'])
def import_data():
req_data = json.loads(request.data)
post_data = {
"IDToken1": req_data["name"],
"IDToken2": req_data["password"],
"inputCode": req_data["code"],
"encoded": "false"
}
# 注入验证码对应的cookie
jar = requests.utils.cookiejar_from_dict(request.cookies)
# 禁止重定向以获得登录的cookie
login_req = requests.post("http://cer.nju.edu.cn/amserver/UI/Login", data=post_data, cookies=jar, allow_redirects=False)
jar = requests.utils.cookiejar_from_dict(login_req.cookies)
card_number_req = requests.get('http://cpay.nju.edu.cn/pay/ykt/cardstatus', cookies=jar)
card_number = json.loads(card_number_req.content)["data"]["cardno"]
page_num = 0
while True:
# 构造POST请求数据
post_data = {
"beginDate": req_data["startMonth"] + "/01",
"endDate": req_data["endMonth"] + "/01",
"cardId": card_number,
"serialType": 1,
"page": page_num,
}
r1 = requests.post("https://oa.nju.edu.cn/ecard/njuLogin", data=post_data, cookies=jar, allow_redirects=False)
r2 = requests.post(
"https://oa.nju.edu.cn/ecard/web/" + r1.cookies.get(
"LOGIN") + "/1?p_p_id=querydetail&p_p_action=1&p_p_state=maximized&p_p_mode=view&_querydetail_struts_action=%2Fext%2Fecardtransactionquerydetail_result"
, data=post_data, cookies=r1.cookies, verify=False)
# 解析HTML结果
soup = BeautifulSoup(r2.text, "html.parser")
trs = soup.find_all(name='tr', attrs={"class": ["tr_1", "tr_2"]})
for tr in trs:
_soup = BeautifulSoup(str(tr), "html.parser")
tds = _soup.find_all(name='td')
item = {
"transName": tds[0].get_text(strip=True),
"termName": tds[1].get_text(strip=True),
"transTime": tds[3].get_text(strip=True),
"amount": tds[4].get_text(strip=True),
"balance": tds[5].get_text(strip=True),
}
# 从三个接口获取校园卡相关信息
@cardModule.route('/info/', methods=['GET'])
def info():
jar = requests.utils.cookiejar_from_dict(request.cookies)
r1 = requests.get('http://mapp.nju.edu.cn/mobile/getCardInfo.mo', cookies=jar)
# print r1.content
r2 = requests.get('http://cpay.nju.edu.cn/pay/bankcard/list', cookies=jar)
# print r2.content
r3 = requests.get('http://cpay.nju.edu.cn/pay/ykt/cardstatus', cookies=jar)
# print r3.content
data = json.loads(r3.content)["data"]
status = "正常"
if data["frozenflag"] != "0":
status = "冻结"
if data["lockflag"] != "0":
status = "锁定"
if data["lossflag"] != "0":
status = "挂失"
res_data = {
"name": json.loads(r1.content, encoding='utf-8')["data"]["name"],
"balance": json.loads(r1.content, encoding='utf-8')["data"]["balance"],
"bankCardNo": json.loads(r2.content, encoding='utf-8')["data"]["bankcards"][0]["bankcardno"],
"status": status.decode('utf-8')
}
return json.dumps(res_data, ensure_ascii=False)
# 接口地址https://oa.nju.edu.cn/ecard/web/,访问速度很慢
@cardModule.route('/record/', methods=['GET'])
def oa_record():
# 获取请求中查询参数,并转换日期格式
start_date = request.args.get("from").replace("-", "/")
end_date = request.args.get("to").replace("-", "/")
jar = requests.utils.cookiejar_from_dict(request.cookies)
details = []
date_num = datetime.datetime.strptime(end_date, '%Y/%m/%d') - datetime.datetime.strptime(start_date, '%Y/%m/%d')
daily = [0] * (date_num.days + 1)
page_num = 0
income = 0
expense = 0
# 获取校园卡卡号
r1 = requests.get('http://cpay.nju.edu.cn/pay/ykt/cardstatus', cookies=jar)
card_no = json.loads(r1.content)["data"]["cardno"]
while True:
# 构造POST请求数据
post_data = {
"beginDate": start_date,
"endDate": end_date,
"cardId": card_no,
"serialType": 1,
"page": page_num,
}
r = requests.post("https://oa.nju.edu.cn/ecard/njuLogin", data=post_data, cookies=jar, allow_redirects=False)
rr = requests.post(
"https://oa.nju.edu.cn/ecard/web/"+r.cookies.get("LOGIN")+"/1?p_p_id=querydetail&p_p_action=1&p_p_state=maximized&p_p_mode=view&_querydetail_struts_action=%2Fext%2Fecardtransactionquerydetail_result"
, data=post_data, cookies=r.cookies, verify=False)
# 解析HTML结果
soup = BeautifulSoup(rr.text, "html.parser")
trs = soup.find_all(name='tr', attrs={"class": ["tr_1", "tr_2"]})
for tr in trs:
_soup = BeautifulSoup(str(tr), "html.parser")
tds = _soup.find_all(name='td')
item = {
"transName": tds[0].get_text(strip=True),
"termName": tds[1].get_text(strip=True),
"transTime": tds[3].get_text(strip=True),
"amount": tds[4].get_text(strip=True),
"balance": tds[5].get_text(strip=True),
}
if float(item["amount"]) < 0:
expense += (- float(item["amount"]))
delta = datetime.datetime.strptime(item["transTime"], '%Y-%m-%d %H:%M:%S') - datetime.datetime.strptime(start_date, '%Y/%m/%d')
daily[delta.days] += (- float(item["amount"]))
else:
income += float(item["amount"])
details.append(item)
# 检查是否还有剩余数据
page_soup = soup.find_all(name='td', attrs={"class": "text_brown", "align": "center"})
page_flag = page_soup[0].get_text(strip=True)
if page_flag.split('/')[0] == page_flag.split('/')[1]:
break
page_num += 1
return json.dumps({
"details": details,
"daily": daily,
"income": "%.2f" % income,
"expense": "%.2f" % expense
}, ensure_ascii=False)
# 获得验证码图片并保存cookie, 验证码对应登录网址为http://cer.nju.edu.cn/amserver/UI/Login
@cardModule.route('/ver_code', methods=['GET'])
def cer_code():
r = requests.get("http://cer.nju.edu.cn/amserver/verify/image.jsp")
res = Response(r.content, mimetype="image/jpg")
for key, value in r.cookies.items():
res.set_cookie(key, value)
return res
# 求日期相差的天数
# time1: YYYY-mm-dd time2:yy-MM-dd HH:MM
# 返回 time2 - time1
def compare_date(time1, time2):
time_a = datetime.datetime.strptime(time1, '%Y-%m-%d')
time_b = datetime.datetime.strptime(time2, '%y-%m-%d %H:%M')
delta = time_b - time_a
return delta.days
| 2.640625 | 3 |
medical_prescription/exam/models/newexam.py | ristovao/2017.2-Receituario-Medico | 11 | 12772976 | # Django
from django.db import models
# Local Django
from exam import constants
class NewExam(models.Model):
exam_description = models.CharField(max_length=constants.DESC_TUSS_MAX_LENGTH)
| 1.90625 | 2 |
cogs/ciphers.py | Whykiller/FrontStreetInformer-Discord-Bot | 0 | 12772977 | from assets.variables_and_imports import *
"""All Cipher classes that en-/decrypt"""
class Ciphers(commands.Cog):
"""Some general ciphers to play around with!"""
def __init__(self) -> None:
"""Initializes the dicts used for encyphering"""
self.alphabet_dict = {c: i for i, c in enumerate(string.ascii_lowercase)}
self.numbers_first_dict = {i: c for i, c in enumerate(string.ascii_lowercase)}
@commands.command(pass_context=True)
async def caesar(self, ctx, *args: str):
"""This encrypts your messages in the caesar variety, which shits your letters by 3 and returns it."""
message = ((" ".join(i for i in args)).rstrip()).strip(" ")
new_message = ""
for i in message:
new_word = ""
for j in i.lower():
if j in string.punctuation:
new_word += j
elif self.alphabet_dict[j] > 22:
number = self.alphabet_dict[j] - 23
new_word += self.numbers_first_dict[number]
else:
number = self.alphabet_dict[j] + 3
new_word += self.numbers_first_dict[number]
new_message += new_word + " "
new_message = new_message
await ctx.send(f"Your secret message reads:\n{new_message}")
@commands.command(pass_context=True)
async def atbash(self, ctx, *args: str):
"""This converts your message into the Atbash ciper, which takes the reverse of each letter."""
message = ((" ".join(i for i in args)).rstrip()).strip(" ")
new_message = ""
for i in message:
new_word = ""
for j in i.lower():
if j in string.punctuation:
new_word += j
else:
number = math.sqrt((self.alphabet_dict[j] - 25) ** 2)
new_word += self.numbers_first_dict[number]
new_message += new_word + " "
new_message = new_message
await ctx.send(f"Your secret message reads:\n {new_message}")
if __name__ == "__main__":
print(string.punctuation)
| 3.28125 | 3 |
ta2-eval/d3m_ta2s_eval/ta3ta2api/__init__.py | tonyjo/ubc_primitives | 0 | 12772978 | import sys, os
sys.path.append(os.getcwd()+'/d3m_ta2s_eval/ta3ta2api')
import pipeline_pb2 as pipeline__pb2
import primitive_pb2 as primitive__pb2
import problem_pb2 as problem__pb2
import value_pb2 as value__pb2
| 1.546875 | 2 |
visualizer.py | sgillen/sensel-visualizer | 0 | 12772979 | <gh_stars>0
#!/usr/bin/env python
##########################################################################
# MIT License
#
# Copyright (c) 2013-2017 Sensel, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
# to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
##########################################################################
#sgillen: you'll need to install the sensel API to run this program (https://github.com/sensel/sensel-api)
#once you have that installed simply call "python visualizer.py" and follow the prompts
#if you want to mess around with how sensitive the visualizer is change the MAX_FORCE variable defined right after all the imports
#started with the sensel API example 3, modified it to what you see now, this program visualizes and saves the raw force data coming from the sensel device
#there is some code to save the data as a numpy array or .mat file but it is commented out right now as these files tend to be rather large
#imports
#==============================================================================
#Sensel imports
import sys
sys.path.append('../sensel-api/sensel-lib-wrappers/sensel-lib-python')
import sensel
import binascii
import threading
import time
#matplotlib/numpy imports
#from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm
import matplotlib.animation as animation
import scipy.io
#define some global variables
#==============================================================================
#MAX_FORCE is a constant you can tweak to change how sensitive the visualizer is.
#MAX_FORCE = 8192 #This is allegedly the maximum allowable force
#MAX_FORCE = 500 # this is the maximum force I've seen in tests, when pushing very very hard
MAX_FORCE = 12.5 # this is what I've found makes the animations look nice
# we store each array we get from the sensel here
force_image_list = []
#sensel functions, ripped from example 3
#==============================================================================
def waitForEnter():
global enter_pressed
raw_input("Press Enter to exit...")
enter_pressed = True
return
def openSensel():
handle = None
(error, device_list) = sensel.getDeviceList()
if device_list.num_devices != 0:
(error, handle) = sensel.openDeviceByID(device_list.devices[0].idx)
return handle
def initFrame():
error = sensel.setFrameContent(handle, sensel.FRAME_CONTENT_PRESSURE_MASK)
(error, frame) = sensel.allocateFrameData(handle)
error = sensel.startScanning(handle)
return frame
def closeSensel(frame):
error = sensel.freeFrameData(handle, frame)
error = sensel.stopScanning(handle)
error = sensel.close(handle)
#scan frames is what is called during the animation, it pulls data from the sensel,
#puts it into a 2D numpy array, and then updates the animation
#==============================================================================
def scanFrames(dummy, frame, info):
global force_image_list
force_image = np.zeros((info.num_cols, info.num_rows))
error = sensel.readSensor(handle)
(error, num_frames) = sensel.getNumAvailableFrames(handle)
#get all the available frames from the device
for n in range(num_frames):
error = sensel.getFrame(handle, frame)
for i in range(info.num_cols):
for j in range(info.num_rows):
force_image[i,j] = frame.force_array[j*info.num_cols + i]
force_image_list.append(np.copy(force_image))
#for the most recent frame ONLY, go through and extract the force array into a numpy array
im.set_array(force_image)
return [im]
#this is sort of a janky wrapper function that allows us to save our animation to an mp4
def saveFrames(i):
im.set_array(force_image_list[i])
return [im]
#main method
#==============================================================================
if __name__ == '__main__':
#ask for a file name
file_name = raw_input("Enter a filename to save this session under (for example typing in movie will result in movie.mp4 and movie.csv files)\nleave this blank if you don't want to save anything \n")
#open the sensel and make sure that it worked
handle = openSensel()
if handle is None:
print "error opening sensor! exiting!"
(error, info) = sensel.getSensorInfo(handle)
frame = initFrame()
#set up the canvas on which we will plot
fig = plt.figure()
im = plt.imshow(np.zeros((info.num_cols, info.num_rows)), animated=True, vmin=0, vmax=MAX_FORCE)
# Set up formatting for the movie files
Writer = animation.writers['ffmpeg']
writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)
print "sensel is open and streaming data, x out figure one to quit and save data"
#launch the animation, this will continue until you x out the figure
ani = animation.FuncAnimation(fig, scanFrames , fargs = (frame,info), interval=50, blit=True)
plt.show()
if file_name:
print "saving session, this may take a minute"
#save the movie file
ani = animation.FuncAnimation(fig, saveFrames, frames=len(force_image_list), interval=50, blit=True)
ani.save(file_name + ".mp4" , writer=writer)
# #this copies our list of force images into a 3d array (so we can save an load it more naturally)
# force_image_3d = np.array(force_image_list)
# #this will save the output as a numpy array
# np.save(file_name + ".npy", force_image_3d)
# #this will save this output as .mat file (for use with matlab)
# scipy.io.savemat(file_name + ".mat" , mdict={'sensel_data': force_image_3d})
#close up
closeSensel(frame)
print "all done, the sensel is closed"
| 1.09375 | 1 |
jtmri/scripts/kidney_depth.py | jthacker/jtmri | 1 | 12772980 | import numpy as np
def distance_from_region(label_mask, distance_mask=None, scale=1, ord=2):
"""Find the distance at every point in an image from a set of labeled points.
Parameters
==========
label_mask : ndarray
A mask designating the points to find the distance from. A True value
indicates that the pixel is in the region, a False value indicates it is not.
distance_mask : ndarray
A mask inidicating which regions to calculate the distance in
scale : int
Scale the calculated distance to another distance measure (eg. to millimeters)
ord : int
Order of norm to use when calculating distance. See np.linalg.norm for more details
Returns
=======
distances : ndarray
A masked array of the same size as label_mask.
If distance_mask is passed in then the output array is masked by it.
"""
if distance_mask is None:
distance_mask = np.ones(label_mask.shape, dtype=bool)
assert label_mask.shape == distance_mask.shape
scale = np.array(scale)
output = np.zeros(label_mask.shape)
indxs = np.indices(label_mask.shape)
X = indxs[:, distance_mask].T
Y = indxs[:, label_mask].T
for x in X:
output[tuple(x)] = np.linalg.norm(scale*(x-Y), ord=ord, axis=1).min()
return np.ma.array(output, mask=np.logical_not(distance_mask))
def contours(distances, contours=10):
amin,amax = distances.min(), distances.max()
edges,step = np.linspace(amin, amax, contours, retstep=True)
mask = np.logical_not(np.ma.getmaskarray(distances))
return [np.ma.getdata(mask & (distances >= cntr) & (distances < (cntr+step))) for cntr in edges[:-1]], edges
def plot_by_contours(arr, contour_masks, contour_vals, ax=None):
if ax is None:
import pylab as pl
_,ax = pl.subplots()
x = contour_vals[:-1]
y = np.array([arr[mask].mean() for mask in contour_masks])
ax.set_xlabel('Distance from surface (mm)')
ax.set_ylabel('Mean R2* value')
return ax.plot(x, y, 'o--')[0], x, y
def plot_by_distance(arr, distances, ax=None):
assert arr.shape == distances.shape
if ax is None:
import pylab
_,ax = pylab.subplots()
mask = np.logical_not(np.ma.getmaskarray(distances))
x = distances[mask].ravel()
y = arr[mask].ravel()
return ax.plot(x,y,'o')
| 3.28125 | 3 |
mltrain-nips-2017/yang_shi/vqa-mxnet-gluon/VQAtrainIter.py | gopala-kr/ds-notebooks | 1 | 12772981 | <reponame>gopala-kr/ds-notebooks
import numpy as np
import mxnet as mx
import bisect
class VQAtrainIter(mx.io.DataIter):
def __init__(self, img, sentences, answer, batch_size, buckets=None, invalid_label=-1,
text_name='text', img_name = 'image', label_name='softmax_label', dtype='float32', layout='NTC'):
super(VQAtrainIter, self).__init__()
if not buckets:
buckets = [i for i, j in enumerate(np.bincount([len(s) for s in sentences]))
if j >= batch_size]
buckets.sort()
ndiscard = 0
self.data = [[] for _ in buckets]
for i in range(len(sentences)):
buck = bisect.bisect_left(buckets, len(sentences[i]))
if buck == len(buckets):
ndiscard += 1
continue
buff = np.full((buckets[buck],), invalid_label, dtype=dtype)
buff[:len(sentences[i])] = sentences[i]
self.data[buck].append(buff)
self.data = [np.asarray(i, dtype=dtype) for i in self.data]
self.answer = answer
self.img = img
print("WARNING: discarded %d sentences longer than the largest bucket."%ndiscard)
self.batch_size = batch_size
self.buckets = buckets
self.text_name = text_name
self.img_name = img_name
self.label_name = label_name
self.dtype = dtype
self.invalid_label = invalid_label
self.nd_text = []
self.nd_img = []
self.ndlabel = []
self.major_axis = layout.find('N')
self.default_bucket_key = max(buckets)
if self.major_axis == 0:
self.provide_data = [(text_name, (batch_size, self.default_bucket_key)),
(img_name, (batch_size, self.default_bucket_key))]
self.provide_label = [(label_name, (batch_size, self.default_bucket_key))]
elif self.major_axis == 1:
self.provide_data = [(text_name, (self.default_bucket_key, batch_size)),
(img_name, (self.default_bucket_key, batch_size))]
self.provide_label = [(label_name, (self.default_bucket_key, batch_size))]
else:
raise ValueError("Invalid layout %s: Must by NT (batch major) or TN (time major)")
self.idx = []
for i, buck in enumerate(self.data):
self.idx.extend([(i, j) for j in range(0, len(buck) - batch_size + 1, batch_size)])
self.curr_idx = 0
self.reset()
def reset(self):
self.curr_idx = 0
self.nd_text = []
self.nd_img = []
self.ndlabel = []
for buck in self.data:
label = np.empty_like(buck.shape[0])
label = self.answer
self.nd_text.append(mx.ndarray.array(buck, dtype=self.dtype))
self.nd_img.append(mx.ndarray.array(self.img, dtype=self.dtype))
self.ndlabel.append(mx.ndarray.array(label, dtype=self.dtype))
def next(self):
if self.curr_idx == len(self.idx):
raise StopIteration
i, j = self.idx[self.curr_idx]
self.curr_idx += 1
if self.major_axis == 1:
img = self.nd_img[i][j:j + self.batch_size].T
text = self.nd_text[i][j:j + self.batch_size].T
label = self.ndlabel[i][j:j+self.batch_size]
else:
img = self.nd_img[i][j:j + self.batch_size]
text = self.nd_text[i][j:j + self.batch_size]
label = self.ndlabel[i][j:j+self.batch_size]
data = [text, img]
return mx.io.DataBatch(data, [label],
bucket_key=self.buckets[i],
provide_data=[(self.text_name, text.shape),(self.img_name, img.shape)],
provide_label=[(self.label_name, label.shape)]) | 2.359375 | 2 |
Emotif/motif_filtering.py | YichaoOU/Emotif_Alpha | 0 | 12772982 | <filename>Emotif/motif_filtering.py
from __future__ import division
import os
import sys
import argparse
import shutil
import re
from utils import *
import warnings
warnings.filterwarnings("ignore")
import sys
import numpy as np
from scipy.io.arff import loadarff as la
from sklearn import svm
from sklearn import datasets
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import precision_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import recall_score
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier as RFC
from sklearn.tree import DecisionTreeClassifier as DTC
def RF_gini_filter(jid,confDict,arff,fileList):
data = la(arff)
### get all features
features=list(data[1])[:-1]
top = int(confDict['RF_gini_filter']['top'])
total_ranking_file = jid + "_gini_total_ranking.tsv"
total_ranking = open(total_ranking_file,"wb")
Y=np.array(data[0]["Class"])
X=np.array(map(lambda x:list(x),data[0][features].tolist()))
a=RFC(criterion='gini')
a.fit(X,Y)
count = 0
hash={}
for i in a.feature_importances_:
hash[count]=[i,features[count],i*i]
count+=1
rank_list = sorted(hash,key=lambda x:hash[x][2],reverse=True)
print >>total_ranking,len(rank_list),"feature \t square of weight value"
for pos in rank_list:
print >>total_ranking, hash[pos][1],"\t",hash[pos][2]
top_list_file = jid + "_gini_top.csv"
top_list = open(top_list_file,"wb")
top_motifs = map(lambda x:hash[x][1],rank_list[0:top])
print >>top_list,",".join(top_motifs)
fileList.append(top_list_file)
fileList.append(total_ranking_file)
return top_motifs
def RF_entropy_filter(jid,confDict,arff,fileList):
data = la(arff)
### get all features
features=list(data[1])[:-1]
top = int(confDict['RF_entropy_filter']['top'])
total_ranking_file = jid + "_entropy_total_ranking.tsv"
total_ranking = open(total_ranking_file,"wb")
Y=np.array(data[0]["Class"])
X=np.array(map(lambda x:list(x),data[0][features].tolist()))
a=RFC(criterion='entropy')
a.fit(X,Y)
count = 0
hash={}
for i in a.feature_importances_:
hash[count]=[i,features[count],i*i]
count+=1
rank_list = sorted(hash,key=lambda x:hash[x][2],reverse=True)
print >>total_ranking,len(rank_list),"feature \t square of weight value"
for pos in rank_list:
print >>total_ranking, hash[pos][1],"\t",hash[pos][2]
top_list_file = jid + "_entropy_top.csv"
top_list = open(top_list_file,"wb")
top_motifs = map(lambda x:hash[x][1],rank_list[0:top])
print >>top_list,",".join(top_motifs)
fileList.append(top_list_file)
fileList.append(total_ranking_file)
return top_motifs
def prefilter(jid, confDict):
pos_seq = confDict['input']['pos_seq']
neg_seq = confDict['input']['neg_seq']
output_folder = jid + '_coverage_filter'
pos_mhit = confDict['input']['pos_mhit']
neg_mhit = confDict['input']['neg_mhit']
maxNegCov = float(confDict['coverage_filter']['maxnegcov'])
minPosCov = float(confDict['coverage_filter']['minposcov'])
os.makedirs(output_folder)
fileList = []
totalNumPosSeqs = general_utils.findNumSeqs(pos_seq)
totalNumNegSeqs = general_utils.findNumSeqs(neg_seq)
print 'numPosSeqs:', totalNumPosSeqs,'numNegSeqs:', totalNumNegSeqs
#dict between motif names and their seq lists
posMotifDict = readHitFile(pos_mhit)
negMotifDict = readHitFile(neg_mhit)
# for k in negMotifDict.keys():
# print "Neg Check, ",k,len(negMotifDict[k])
print 'maxNegCov:', maxNegCov,'minPosCov:', minPosCov
filterPosHitFile = jid + '_pos_filter_hitFile_maxNegCov_' + str(int(maxNegCov*100)) + '.mhit'
filterNegHitFile = jid + '_neg_filter_hitFile_maxNegCov_' + str(int(maxNegCov*100)) + '.mhit'
fileList.append(filterPosHitFile)
fileList.append(filterNegHitFile)
filtMotifList = filterHits(posMotifDict, negMotifDict, maxNegCov, minPosCov, totalNumPosSeqs, totalNumNegSeqs)
#write hit files
writeFiltFiles(filtMotifList, filterPosHitFile, filterNegHitFile, posMotifDict, negMotifDict)
#write the pos and neg cov of the filtered motifs
covInfoFileName = jid + '_cov_info.csv'
fileList.append(covInfoFileName)
writeCovFiltFiles(filtMotifList, posMotifDict, negMotifDict, totalNumPosSeqs, totalNumNegSeqs, covInfoFileName)
#posFiltFileName = jid + '_pos_filtered_hits_' + str(minNegCov)
#fileList.append(posFiltFileName)
#negFiltFileName = jid + '_neg_filtered_hits_' + str(minNegCov)
#fileList.append(negFiltFileName)
#move files to results folder
for outFile in fileList:
shutil.move(outFile, output_folder)
return output_folder,filterPosHitFile,filterNegHitFile
def readHitFile(hitFile):
"""Read the hit file and make a dict between motifnames and their seqs """
motifDict = {}
motifName = ''
with open(hitFile, 'rb') as handler:
for line in handler:
line = line.strip()
if re.search(r'>', line):
motifName = line[1:]
if motifName not in motifDict:
motifDict[motifName] = []
continue
motifDict[motifName].append(line)
# print 'len of motifDict:', len(motifDict)
return motifDict
def hit2hit(in_pos_hit,in_neg_hit,out_pos_hit,out_neg_hit,mName):
posMotifDict = readHitFile(in_pos_hit)
negMotifDict = readHitFile(in_neg_hit)
pos_hit = open(out_pos_hit,"wb")
neg_hit = open(out_neg_hit,"wb")
for i in mName:
print >>pos_hit,(">"+i)
print >>pos_hit,"\n".join(posMotifDict[i])
if negMotifDict.has_key(i):
print >>neg_hit,(">"+i)
print >>neg_hit,"\n".join(negMotifDict[i])
def subPwmFile(pwmFileName, selected_motifs_name, allPwmFile):
pwmFile = open(pwmFileName, 'wb')
pwmFile.write('MEME version 4.4\nALPHABET= ACGT\nstrands: + -\nBackground letter frequencies (from web form):\nA 0.25000 C 0.25000 G 0.25000 T 0.25000\n\n')
#read the allPWM file and get the sol PWMs and write them to a file
flag = 0
with open(allPwmFile, 'rb') as handler:
for line in handler:
# print "This is the line in allpwm",line
line = line.strip()
if re.search(r'MOTIF', line):
# print "asd"
split = line.split()
motifName = split[1]
if motifName in selected_motifs_name:
# print "In :",motifName
flag = 1
pwmFile.write(line + '\n')
continue
else:
flag = 0
if flag == 1:
pwmFile.write(line + '\n')
#close the file
pwmFile.close()
| 2.5 | 2 |
673. Number of Longest Increasing Subsequence/main.py | Competitive-Programmers-Community/LeetCode | 2 | 12772983 | <filename>673. Number of Longest Increasing Subsequence/main.py
class Solution:
def findNumberOfLIS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums:
return 0
dp = [1 for i in range(len(nums))]
c = [1 for i in range(len(nums))]
for i in range(1, len(nums)):
for j in range(i):
if nums[j] < nums[i]:
#dp[i] = max(dp[j]+1, dp[i])
if dp[i] < dp[j] + 1:
dp[i] = dp[j] + 1
c[i] = c[j]
elif dp[i] == dp[j] + 1:
c[i] += c[j]
print(c)
print(dp)
m = max(dp)
res = 0
for i in range(len(dp)):
if dp[i] == m:
res += c[i]
return res
| 3.328125 | 3 |
backend/www/test/auth_test.py | xuantan/viewfinder | 645 | 12772984 | #!/usr/bin/env python
#
# Copyright 2012 Viewfinder Inc. All Rights Reserved.
"""Account authorization tests for Facebook and Google accounts.
"""
__authors__ = ['<EMAIL> (<NAME>)',
'<EMAIL> (<NAME>)']
import json
import mock
import os
import time
import unittest
import urllib
from copy import deepcopy
from cStringIO import StringIO
from tornado import httpclient, options
from tornado.ioloop import IOLoop
from urlparse import urlparse
from viewfinder.backend.base import message, util
from viewfinder.backend.db.contact import Contact
from viewfinder.backend.db.identity import Identity
from viewfinder.backend.db.notification import Notification
from viewfinder.backend.db.settings import AccountSettings
from viewfinder.backend.db.user import User
from viewfinder.backend.op.fetch_contacts_op import FetchContactsOperation
from viewfinder.backend.www import auth
from viewfinder.backend.www.test import service_base_test
from viewfinder.backend.www.www_util import GzipEncode
@unittest.skipIf('NO_NETWORK' in os.environ, 'no network')
class AuthTestCase(service_base_test.ServiceBaseTestCase):
"""Initializes the test datastore and the viewfinder schema.
"""
def setUp(self):
super(AuthTestCase, self).setUp()
self._CreateSimpleTestAssets()
self._google_user_dict = {'family_name': 'Kimball', 'name': '<NAME>', 'locale': 'en',
'gender': 'male', 'email': '<EMAIL>',
'link': 'https://plus.google.com/id',
'given_name': 'Andrew', 'id': 'id', 'verified_email': True}
self._facebook_user_dict = {'first_name': 'Andrew', 'last_name': 'Kimball', 'name': '<NAME>',
'id': 'id', 'link': 'http://www.facebook.com/andrew.kimball.50',
'timezone':-7, 'locale': 'en_US', 'email': '<EMAIL>',
'picture': {'data': {'url': 'http://foo.com/pic.jpg',
'is_silhouette': False}},
'verified': True}
self._viewfinder_user_dict = {'name': '<NAME>', 'given_name': 'Andrew', 'email': '<EMAIL>'}
self._mobile_device_dict = {'name': 'Andy\'s IPhone', 'version': '1.0', 'platform': 'IPhone 4S',
'os': 'iOS 5.0.1', 'push_token': 'push_token',
'device_uuid': '926744AC-8540-4103-9F3F-C84AA2F6D648',
'test_udid': '7d527095d4e0539aba40c852547db5da00000000',
'country': 'US', 'language': 'en'}
self._prospective_user, _, _ = self._CreateProspectiveUser()
self._register_user_dict = {'email': self._prospective_user.email,
'name': '<NAME>',
'given_name': 'Jimmy',
'family_name': 'John'}
def tearDown(self):
super(AuthTestCase, self).tearDown()
options.options.freeze_new_accounts = False
def testRegisterWithCookie(self):
"""Register user, overriding current logged-in user."""
# Override registered user.
user, device_id = self._tester.RegisterGoogleUser(self._google_user_dict)
google_cookie = self._GetSecureUserCookie(user, device_id)
user2, _ = self._tester.RegisterFacebookUser(self._facebook_user_dict,
self._mobile_device_dict,
user_cookie=google_cookie)
self.assertNotEqual(user.user_id, user2.user_id)
# Override prospective user.
cookie = self._GetSecureUserCookie(self._prospective_user, self._prospective_user.webapp_dev_id)
user, _ = self._tester.RegisterViewfinderUser(self._viewfinder_user_dict, user_cookie=cookie)
self.assertNotEqual(self._prospective_user.user_id, user.user_id)
# Override with registration of prospective user.
user, _ = self._tester.RegisterViewfinderUser(self._register_user_dict, user_cookie=self._cookie)
self.assertNotEqual(user.user_id, self._user.user_id)
def testEmailAlertSettings(self):
"""Test that email/push alert settings are updated properly during registration."""
def _ValidateAlerts(email_alerts, push_alerts):
settings = self._RunAsync(AccountSettings.QueryByUser, self._client, self._prospective_user.user_id, None)
self.assertEqual(settings.email_alerts, email_alerts)
self.assertEqual(settings.sms_alerts, AccountSettings.SMS_NONE)
self.assertEqual(settings.push_alerts, push_alerts)
# Skip cleanup validation of alerts because a new device is created in this test that did not receive
# notifications sent as part of setUp() call.
self._skip_validation_for = ['Alerts']
# Register a prospective user using the web device.
cookie = self._GetSecureUserCookie(self._prospective_user, self._prospective_user.webapp_dev_id)
_ValidateAlerts(AccountSettings.EMAIL_ON_SHARE_NEW, AccountSettings.PUSH_NONE)
user, device_id = self._tester.RegisterViewfinderUser(self._register_user_dict)
_ValidateAlerts(AccountSettings.EMAIL_ON_SHARE_NEW, AccountSettings.PUSH_NONE)
# Update the user's email alert setting and validate the changed setting.
self._tester.UpdateUser(cookie, settings_dict={'email_alerts': 'none'})
_ValidateAlerts(AccountSettings.EMAIL_NONE, AccountSettings.PUSH_NONE)
# Login and register a new mobile device and validate that email alerts were turned off
# and push alerts turned on.
self._tester.UpdateUser(cookie, settings_dict={'email_alerts': 'on_share_new'})
self._tester.LoginViewfinderUser(self._register_user_dict, self._mobile_device_dict)
_ValidateAlerts(AccountSettings.EMAIL_NONE, AccountSettings.PUSH_ALL)
# Turn off push alerts, and then re-login, and validate that they were not turned back on.
self._tester.UpdateUser(cookie, settings_dict={'push_alerts': 'none'})
self._tester.LoginViewfinderUser(self._register_user_dict)
self._tester.LoginViewfinderUser(self._register_user_dict, self._mobile_device_dict)
_ValidateAlerts(AccountSettings.EMAIL_NONE, AccountSettings.PUSH_NONE)
def testSmsAlertSettings(self):
"""Test that SMS/push alert settings are updated properly during registration."""
def _ValidateAlerts(sms_alerts, push_alerts):
settings = self._RunAsync(AccountSettings.QueryByUser, self._client, prospective_user.user_id, None)
self.assertEqual(settings.email_alerts, AccountSettings.EMAIL_NONE)
self.assertEqual(settings.sms_alerts, sms_alerts)
self.assertEqual(settings.push_alerts, push_alerts)
# Skip cleanup validation of alerts because a new device is created in this test that did not receive
# notifications sent as part of setUp() call.
self._skip_validation_for = ['Alerts']
# Create prospective user with mobile phone.
ident_key = 'Phone:+14251234567'
vp_id, ep_ids = self._tester.ShareNew(self._cookie,
[(self._episode_id, self._photo_ids)],
[ident_key])
prospective_ident = self._RunAsync(Identity.Query, self._client, ident_key, None)
prospective_user = self._RunAsync(User.Query, self._client, prospective_ident.user_id, None)
register_user_dict = {'phone': prospective_user.phone,
'name': '<NAME>',
'given_name': 'Jimmy',
'family_name': 'John'}
# Register a prospective user using the web device.
cookie = self._GetSecureUserCookie(prospective_user, prospective_user.webapp_dev_id)
_ValidateAlerts(AccountSettings.SMS_ON_SHARE_NEW, AccountSettings.PUSH_NONE)
user, device_id = self._tester.RegisterViewfinderUser(register_user_dict)
_ValidateAlerts(AccountSettings.SMS_ON_SHARE_NEW, AccountSettings.PUSH_NONE)
# Login and register a new mobile device and validate that SMS alerts were turned off
# and push alerts turned on.
self._tester.LoginViewfinderUser(register_user_dict, self._mobile_device_dict)
_ValidateAlerts(AccountSettings.SMS_NONE, AccountSettings.PUSH_ALL)
# Turn off push alerts, and then re-login, and validate that they were not turned back on.
self._tester.UpdateUser(cookie, settings_dict={'push_alerts': 'none'})
self._tester.LoginViewfinderUser(register_user_dict)
self._tester.LoginViewfinderUser(register_user_dict, self._mobile_device_dict)
_ValidateAlerts(AccountSettings.SMS_NONE, AccountSettings.PUSH_NONE)
def testMultipleAuthorities(self):
"""Test multiple authorities that authenticate same identity."""
# Login as Google user, then as Viewfinder user with same email, then again as same Google user.
self._tester.RegisterGoogleUser({'name': '<NAME>', 'email': '<EMAIL>', 'verified_email': True})
self._tester.LoginViewfinderUser({'email': '<EMAIL>'},
self._mobile_device_dict)
identity = self._RunAsync(Identity.Query, self._client, 'Email:<EMAIL>', None)
self.assertEqual(identity.authority, 'Viewfinder')
self.assertEqual(identity.expires, 0)
self._tester.LoginGoogleUser({'email': '<EMAIL>', 'verified_email': True})
identity = self._RunAsync(Identity.Query, self._client, 'Email:<EMAIL>', None)
self.assertEqual(identity.authority, 'Google')
def testLoginWithCookie(self):
"""Test successful login override of current logged-in user."""
# Login with cookie from same user.
user, device_id = self._tester.RegisterFacebookUser(self._facebook_user_dict, self._mobile_device_dict)
facebook_cookie = self._GetSecureUserCookie(user, device_id)
self._tester.LoginFacebookUser(self._facebook_user_dict, self._mobile_device_dict, user_cookie=facebook_cookie)
# Login with cookie from different user.
user, device_id = self._tester.RegisterGoogleUser(self._google_user_dict)
google_cookie = self._GetSecureUserCookie(user, device_id)
self._tester.LoginFacebookUser(self._facebook_user_dict, self._mobile_device_dict, user_cookie=google_cookie)
# Login with cookie from prospective user.
cookie = self._GetSecureUserCookie(self._prospective_user, self._prospective_user.webapp_dev_id)
self._tester.LoginFacebookUser(self._facebook_user_dict, user_cookie=cookie)
def testErrorFormat(self):
"""Test that error returned by the service handler is properly formed."""
ident_dict = {'key': 'Email:<EMAIL>', 'authority': 'FakeViewfinder'}
auth_info_dict = {'identity': ident_dict['key']}
url = self._tester.GetUrl('/login/viewfinder')
request_dict = _CreateRegisterRequest(self._mobile_device_dict, auth_info_dict, synchronous=False)
response = _SendAuthRequest(self._tester, url, 'POST', request_dict=request_dict, allow_errors=[403])
self.assertEqual(json.loads(response.body),
{'error': {'id': 'NO_USER_ACCOUNT',
'method': 'login',
'message': 'We can\'t find your Viewfinder account. Are you sure you used ' +
'<EMAIL> to sign up?'}})
def testLoginWithProspective(self):
"""ERROR: Try to log into a prospective user account."""
self.assertRaisesHttpError(403, self._tester.LoginViewfinderUser, self._register_user_dict)
def testLinkWithProspective(self):
"""ERROR: Try to link another identity to a prospective user."""
# Link with cookie from prospective user, using Facebook account that is not yet linked.
cookie = self._GetSecureUserCookie(self._prospective_user, self._prospective_user.webapp_dev_id)
self.assertRaisesHttpError(403, self._tester.LinkFacebookUser, self._facebook_user_dict, user_cookie=cookie)
def testLinkAlreadyLinked(self):
"""ERROR: Try to link a Google account that is already linked to a different Viewfinder account."""
user, device_id = self._tester.RegisterFacebookUser(self._facebook_user_dict)
facebook_cookie = self._GetSecureUserCookie(user, device_id)
self._tester.RegisterGoogleUser(self._google_user_dict)
self.assertRaisesHttpError(403, self._tester.LinkGoogleUser, self._google_user_dict,
self._mobile_device_dict, user_cookie=facebook_cookie)
def testUpdateFriendAttribute(self):
"""Update name of a user and ensure that each friend is notified."""
# Create a prospective user by sharing with an email.
vp_id, ep_ids = self._tester.ShareNew(self._cookie,
[(self._episode_id, self._photo_ids)],
['Email:<EMAIL>', self._user2.user_id])
# Register the user and verify friends are notified.
self._tester.RegisterGoogleUser(self._google_user_dict)
response_dict = self._tester.QueryNotifications(self._cookie2, 1, scan_forward=False)
self.assertEqual(response_dict['notifications'][0]['invalidate'], {u'users': [5]})
def testRegisterContact(self):
"""Register an identity that is the target of a contact, which will
be bound to a user_id as a result.
"""
# Create a contact.
user_dict = {'name': '<NAME>', 'email': '<EMAIL>', 'verified_email': True}
identity_key = 'Email:%s' % user_dict['email']
contact_dict = Contact.CreateContactDict(self._user.user_id,
[(identity_key, None)],
util._TEST_TIME,
Contact.GMAIL,
name=user_dict['name'])
self._UpdateOrAllocateDBObject(Contact, **contact_dict)
# Register the new user.
user, device_id = self._tester.RegisterGoogleUser(user_dict)
response_dict = self._tester.QueryNotifications(self._cookie, 1, scan_forward=False)
self.assertEqual([notify_dict['name'] for notify_dict in response_dict['notifications']],
['first register contact'])
def testRegisterProspectiveContact(self):
"""Register an identity that is the target of a contact (that is still a prospective user)."""
for user_id in [self._user.user_id, self._user2.user_id]:
# Create several contacts.
identity_key = 'Email:%s' % self._prospective_user.email
contact_dict = Contact.CreateContactDict(user_id,
[(identity_key, None)],
util._TEST_TIME,
Contact.GMAIL,
name='Mr. John')
self._UpdateOrAllocateDBObject(Contact, **contact_dict)
# Register the prospective user.
user, device_id = self._tester.RegisterViewfinderUser(self._register_user_dict)
# Expect friend & contact notifications.
response_dict = self._tester.QueryNotifications(self._cookie, 2, scan_forward=False)
self.assertEqual([notify_dict['name'] for notify_dict in response_dict['notifications']],
['register friend', 'first register contact'])
# Expect only contact notification.
response_dict = self._tester.QueryNotifications(self._cookie2, 1, scan_forward=False)
self.assertEqual([notify_dict['name'] for notify_dict in response_dict['notifications']],
['first register contact'])
# Expect only friend notification.
cookie = self._GetSecureUserCookie(self._prospective_user, self._prospective_user.webapp_dev_id)
response_dict = self._tester.QueryNotifications(cookie, 2, scan_forward=False)
self.assertEqual([notify_dict['name'] for notify_dict in response_dict['notifications']],
['register friend', 'share_new'])
def testNewIdentityOnly(self):
"""Register existing user and device, but create new identity via link."""
user, device_id = self._tester.RegisterGoogleUser(self._google_user_dict, self._mobile_device_dict)
cookie = self._GetSecureUserCookie(user, device_id)
self._mobile_device_dict['device_id'] = device_id
self._tester.LinkFacebookUser(self._facebook_user_dict, self._mobile_device_dict, cookie)
def testNewDeviceOnly(self):
"""Register existing user and identity, but create new device as part of login."""
self._tester.RegisterGoogleUser(self._google_user_dict)
self._tester.LoginGoogleUser(self._google_user_dict, self._mobile_device_dict)
def testDuplicateToken(self):
"""Register device with push token that is already in use by another device."""
self._tester.RegisterGoogleUser(self._google_user_dict, self._mobile_device_dict)
self._tester.RegisterFacebookUser(self._facebook_user_dict, self._mobile_device_dict)
def testAsyncRequest(self):
"""Send async register request."""
ident_dict = {'key': 'Email:<EMAIL>', 'authority': 'FakeViewfinder'}
auth_info_dict = {'identity': ident_dict['key']}
url = self._tester.GetUrl('/link/fakeviewfinder')
request_dict = _CreateRegisterRequest(self._mobile_device_dict, auth_info_dict, synchronous=False)
response = _SendAuthRequest(self._tester, url, 'POST', request_dict=request_dict, user_cookie=self._cookie)
response_dict = json.loads(response.body)
self._validate = False
# Wait until notification is written by the background fetch_contacts op.
while True:
notification = self._RunAsync(Notification.QueryLast, self._client, response_dict['user_id'])
if notification.name == 'fetch_contacts':
self.assertEqual(notification.op_id, response_dict['headers']['op_id'])
break
self._RunAsync(IOLoop.current().add_timeout, time.time() + .1)
def testDeviceNoUser(self):
"""ERROR: Try to register existing device without existing user."""
user, device_id = self._tester.RegisterGoogleUser(self._google_user_dict, self._mobile_device_dict)
self._mobile_device_dict['device_id'] = device_id
self.assertRaisesHttpError(403, self._tester.RegisterFacebookUser, self._facebook_user_dict,
self._mobile_device_dict)
def testDeviceNotOwned(self):
"""ERROR: Try to register existing device that is not owned by the
existing user.
"""
self._tester.RegisterGoogleUser(self._google_user_dict, self._mobile_device_dict)
self._mobile_device_dict['device_id'] = 1000
self.assertRaisesHttpError(403, self._tester.RegisterGoogleUser, self._google_user_dict,
self._mobile_device_dict)
def testRegisterFreezeNewAccounts(self):
"""ERROR: Verify that attempt to register fails if --freeze_new_accounts
is true. This is the kill switch the server can throw to stop the tide
of incoming account registrations.
"""
options.options.freeze_new_accounts = True
exc = self.assertRaisesHttpError(403, self._tester.RegisterGoogleUser, self._google_user_dict,
self._mobile_device_dict)
error_dict = json.loads(exc.response.body)
self.assertEqual(error_dict['error']['message'], auth._FREEZE_NEW_ACCOUNTS_MESSAGE)
self.assertRaisesHttpError(403, self._tester.RegisterFacebookUser, self._facebook_user_dict)
def testLoginWithUnboundIdentity(self):
"""ERROR: Try to login with an identity that exists, but is not bound to a user."""
self._UpdateOrAllocateDBObject(Identity, key='Email:<EMAIL>')
self.assertRaisesHttpError(403,
self._tester.LoginViewfinderUser,
self._viewfinder_user_dict,
self._mobile_device_dict)
def testBadRequest(self):
"""ERROR: Verify that various malformed and missing register fields result
in a bad request (400) error.
"""
# Missing request dict.
url = self.get_url('/register/facebook') + '?' + urllib.urlencode({'access_token': 'dummy'})
self.assertRaisesHttpError(400, _SendAuthRequest, self._tester, url, 'POST', request_dict='')
# Malformed request dict.
self.assertRaisesHttpError(400, _SendAuthRequest, self._tester, url, 'POST', request_dict={'device': 'foo'})
def testRegisterExisting(self):
"""ERROR: Try to register a user that already exists."""
self._tester.RegisterViewfinderUser(self._viewfinder_user_dict)
self.assertRaisesHttpError(403,
self._tester.RegisterViewfinderUser,
self._viewfinder_user_dict,
self._mobile_device_dict)
def testLogout(self):
"""Ensure that logout sends back a cookie with an expiration time."""
url = self._tester.GetUrl('/logout')
response = _SendAuthRequest(self._tester, url, 'GET', user_cookie=self._cookie)
self.assertEqual(response.code, 302)
self.assertEqual(response.headers['location'], '/')
self.assertIn('user', response.headers['Set-Cookie'])
self.assertIn('expires', response.headers['Set-Cookie'])
self.assertIn('Domain', response.headers['Set-Cookie'])
def testSessionCookie(self):
"""Test "use_session_cookie" option in auth request."""
# First register a user, requesting a session cookie.
auth_info_dict = {'identity': 'Email:<EMAIL>',
'name': '<NAME>',
'given_name': 'Andy',
'password': '<PASSWORD>'}
url = self._tester.GetUrl('/register/viewfinder')
request_dict = _CreateRegisterRequest(None, auth_info_dict)
response = _SendAuthRequest(self._tester, url, 'POST', request_dict=request_dict)
self.assertNotIn('Set-Cookie', response.headers)
identity = self._tester._RunAsync(Identity.Query, self._client, auth_info_dict['identity'], None)
url = self._tester.GetUrl('/verify/viewfinder')
request_dict = {'headers': {'version': message.MAX_SUPPORTED_MESSAGE_VERSION,
'synchronous': True},
'identity': identity.key,
'access_token': identity.access_token,
'use_session_cookie': True}
response = _SendAuthRequest(self._tester, url, 'POST', request_dict=request_dict)
self.assertNotIn('expires', response.headers['Set-Cookie'])
cookie_user_dict = self._tester.DecodeUserCookie(self._tester.GetCookieFromResponse(response))
self.assertTrue(cookie_user_dict.get('is_session_cookie', False))
# Now log in and request a session cookie.
del auth_info_dict['name']
del auth_info_dict['given_name']
url = self._tester.GetUrl('/login/viewfinder')
request_dict = _CreateRegisterRequest(None, auth_info_dict, synchronous=False)
response = _SendAuthRequest(self._tester, url, 'POST', request_dict=request_dict)
self.assertIn('expires', response.headers['Set-Cookie'])
request_dict['use_session_cookie'] = True
response = _SendAuthRequest(self._tester, url, 'POST', request_dict=request_dict)
self.assertNotIn('expires', response.headers['Set-Cookie'])
cookie = self._tester.GetCookieFromResponse(response)
cookie_user_dict = self._tester.DecodeUserCookie(cookie)
self.assertTrue(cookie_user_dict.get('is_session_cookie', False))
# Now use the session cookie to make a service request and verify it's preserved.
request_dict = {'headers': {'version': message.MAX_SUPPORTED_MESSAGE_VERSION, 'synchronous': True}}
headers = {'Content-Type': 'application/json',
'X-Xsrftoken': 'fake_xsrf',
'Cookie': '_xsrf=fake_xsrf;user=%s' % cookie}
response = self._RunAsync(self.http_client.fetch,
self._tester.GetUrl('/service/query_followed'),
method='POST',
body=json.dumps(request_dict),
headers=headers)
cookie_user_dict = self._tester.DecodeUserCookie(self._tester.GetCookieFromResponse(response))
self.assertTrue(cookie_user_dict.get('is_session_cookie', False))
def _CreateRegisterRequest(device_dict=None, auth_info_dict=None, synchronous=True,
version=message.MAX_SUPPORTED_MESSAGE_VERSION):
"""Returns a new AUTH_REQUEST dict that has been populated with information from the
specified dicts.
"""
request_dict = {'headers': {'version': version}}
util.SetIfNotNone(request_dict, 'device', device_dict)
util.SetIfNotNone(request_dict, 'auth_info', auth_info_dict)
if synchronous:
request_dict['headers']['synchronous'] = True
return request_dict
def _AddMockJSONResponse(mock_client, url, response_dict):
"""Add a mapping entry to the mock client such that requests to
"url" will return an HTTP response containing the JSON-formatted
"response_dict".
"""
def _CreateResponse(request):
return httpclient.HTTPResponse(request, 200,
headers={'Content-Type': 'application/json'},
buffer=StringIO(json.dumps(response_dict)))
mock_client.map(url, _CreateResponse)
def _SendAuthRequest(tester, url, http_method, user_cookie=None, request_dict=None, allow_errors=None):
"""Sends request to auth service. If "request_dict" is defined, dumps it as a JSON body.
If "user_cookie" is defined, automatically adds a "Cookie" header. Raises an HTTPError if
an HTTP error is returned, unless the error code is part of the "allow_errors" set. Returns
the HTTP response object on success.
"""
headers = {'Content-Type': 'application/json',
'Content-Encoding': 'gzip'}
if user_cookie is not None:
headers['Cookie'] = 'user=%s' % user_cookie
# All requests are expected to have xsrf cookie/header.
headers['X-Xsrftoken'] = 'fake_xsrf'
headers['Cookie'] = headers['Cookie'] + ';_xsrf=fake_xsrf' if headers.has_key('Cookie') else '_xsrf=fake_xsrf'
with mock.patch.object(FetchContactsOperation, '_SKIP_UPDATE_FOR_TEST', True):
response = tester._RunAsync(tester.http_client.fetch, url, method=http_method,
body=None if request_dict is None else GzipEncode(json.dumps(request_dict)),
headers=headers, follow_redirects=False)
if response.code >= 400:
if allow_errors is None or response.code not in allow_errors:
response.rethrow()
return response
def _AuthFacebookOrGoogleUser(tester, action, user_dict, ident_dict, device_dict, user_cookie):
"""Registers a user, identity, and device using the auth web service. The interface to Facebook
or Google is mocked, with the contents of "user_dict" returned in lieu of what the real service
would return. If "device_dict" is None, then simulates the web experience; else simulates the
mobile device experience. If "user_cookie" is not None, then simulates case where calling user
is already logged in when registering the new user. Returns the HTTP response that was returned
by the auth service.
"""
if device_dict is None:
# Web client.
url = tester.GetUrl('/%s/%s' % (action, ident_dict['authority'].lower()))
response = _SendAuthRequest(tester, url, 'GET', user_cookie=user_cookie)
assert response.code == 302, response.code
# Invoke authentication again, this time sending code."""
url = tester.GetUrl('/%s/%s?code=code' % (action, ident_dict['authority'].lower()))
response = _SendAuthRequest(tester, url, 'GET', user_cookie=user_cookie)
assert response.code == 302, response.code
assert response.headers['location'].startswith('/view')
else:
if ident_dict['authority'] == 'Facebook':
url = tester.GetUrl('/%s/facebook?access_token=access_token' % action)
else:
url = tester.GetUrl('/%s/google?refresh_token=refresh_token' % action)
request_dict = _CreateRegisterRequest(device_dict)
response = _SendAuthRequest(tester, url, 'POST', user_cookie=user_cookie, request_dict=request_dict)
return response
def _ValidateAuthUser(tester, action, user_dict, ident_dict, device_dict, user_cookie, auth_response):
"""Validates an auth action that has taken place and resulted in the HTTP response given
by "auth_response".
"""
validator = tester.validator
# Validate the response from a GET (device_dict is None) or POST to auth service.
if device_dict is None:
# Get the id of the user that should have been created by the registration.
actual_identity = tester._RunAsync(Identity.Query, validator.client, ident_dict['key'], None)
actual_user_id = actual_identity.user_id
else:
# Extract the user_id and device_id from the JSON response.
response_dict = json.loads(auth_response.body)
actual_op_id = response_dict['headers']['op_id']
actual_user_id = response_dict['user_id']
actual_device_id = response_dict.get('device_id', None)
# Verify that the cookie in the response contains the correct information.
cookie_user_dict = tester.DecodeUserCookie(tester.GetCookieFromResponse(auth_response))
assert cookie_user_dict['user_id'] == actual_user_id, (cookie_user_dict, actual_user_id)
assert device_dict is None or 'device_id' not in device_dict or \
cookie_user_dict['device_id'] == device_dict['device_id'], \
(cookie_user_dict, device_dict)
actual_user = tester._RunAsync(User.Query, validator.client, actual_user_id, None)
if device_dict is None:
# If no mobile device was used, then web device id is expected.
actual_device_id = actual_user.webapp_dev_id
# Get notifications that were created. There could be up to 2: a register_user notification and
# a fetch_contacts notification (in link case).
notification_list = tester._RunAsync(Notification.RangeQuery,
tester.validator.client,
actual_user_id,
range_desc=None,
limit=3,
col_names=None,
scan_forward=False)
if device_dict is None:
actual_op_id = notification_list[1 if action == 'link' else 0].op_id
# Determine what the registered user's id should have been.
if user_cookie is None or action != 'link':
expected_user_id = None
else:
expected_user_id, device_id = tester.GetIdsFromCookie(user_cookie)
expected_identity = validator.GetModelObject(Identity, ident_dict['key'], must_exist=False)
if expected_identity is not None:
# Identity already existed, so expect registered user's id to equal the user id of that identity.
expected_user_id = expected_identity.user_id
# Verify that identity is linked to expected user.
assert expected_user_id is None or expected_user_id == actual_user_id, \
(expected_user_id, actual_user_id)
# Validate the device if it should have been created.
if device_dict is None:
expected_device_dict = None
else:
expected_device_dict = deepcopy(device_dict)
if 'device_id' not in device_dict:
expected_device_dict['device_id'] = actual_device_id
# Re-map picture element for Facebook authority (Facebook changed format in Oct 2012).
scratch_user_dict = deepcopy(user_dict)
if ident_dict['authority'] == 'Facebook':
if device_dict is None:
scratch_user_dict['session_expires'] = ['3600']
if 'picture' in scratch_user_dict:
scratch_user_dict['picture'] = scratch_user_dict['picture']['data']['url']
elif ident_dict['authority'] == 'Viewfinder' and action != 'register':
# Only use name in registration case.
scratch_user_dict.pop('name', None)
# Validate the Identity object.
expected_ident_dict = deepcopy(ident_dict)
expected_ident_dict.pop('json_attrs', None)
if ident_dict['authority'] == 'Viewfinder':
identity = tester._RunAsync(Identity.Query, tester.validator.client, ident_dict['key'], None)
expected_ident_dict['access_token'] = identity.access_token
expected_ident_dict['expires'] = identity.expires
# Validate the User object.
expected_user_dict = {}
before_user = validator.GetModelObject(User, actual_user_id, must_exist=False)
before_user_dict = {} if before_user is None else before_user._asdict()
for k, v in scratch_user_dict.items():
user_key = auth.AuthHandler._AUTH_ATTRIBUTE_MAP.get(k, None)
if user_key is not None:
if before_user is None or getattr(before_user, user_key) is None:
expected_user_dict[auth.AuthHandler._AUTH_ATTRIBUTE_MAP[k]] = v
# Set facebook email if it has not yet been set.
if user_key == 'email' and ident_dict['authority'] == 'Facebook':
if before_user is None or getattr(before_user, 'facebook_email') is None:
expected_user_dict['facebook_email'] = v
expected_user_dict['user_id'] = actual_user_id
expected_user_dict['webapp_dev_id'] = actual_user.webapp_dev_id
op_dict = {'op_timestamp': util._TEST_TIME,
'op_id': notification_list[1 if action == 'link' else 0].op_id,
'user_id': actual_user_id,
'device_id': actual_device_id}
if expected_device_dict:
expected_device_dict.pop('device_uuid', None)
expected_device_dict.pop('test_udid', None)
is_prospective = before_user is None or not before_user.IsRegistered()
validator.ValidateUpdateUser('first register contact' if is_prospective else 'link contact',
op_dict,
expected_user_dict,
expected_ident_dict,
device_dict=expected_device_dict)
after_user_dict = validator.GetModelObject(User, actual_user_id)._asdict()
if expected_identity is not None:
expected_ident_dict['user_id'] = expected_identity.user_id
if action == 'link':
ignored_keys = ['user_id', 'webapp_dev_id']
if 'user_id' not in expected_ident_dict and all(k in ignored_keys for k in expected_user_dict.keys()):
# Only notify self if it hasn't been done through Friends.
validator.ValidateUserNotification('register friend self', actual_user_id, op_dict)
# Validate fetch_contacts notification.
op_dict['op_id'] = notification_list[0].op_id
invalidate = {'contacts': {'start_key': Contact.CreateSortKey(None, util._TEST_TIME)}}
validator.ValidateNotification('fetch_contacts', actual_user_id, op_dict, invalidate)
return actual_user, actual_device_id if device_dict is not None else None
| 2.078125 | 2 |
brian2/tests/test_morphology.py | moritzaugustin/brian2 | 0 | 12772985 | from nose.plugins.attrib import attr
from numpy.testing.utils import assert_equal, assert_allclose, assert_raises
import numpy as np
from brian2.spatialneuron import *
from brian2.units import um, second
@attr('codegen-independent')
def test_basicshapes():
morpho = Soma(diameter=30*um)
morpho.L = Cylinder(length=10*um, diameter=1*um, n=10)
morpho.LL = Cylinder(length=5*um, diameter=2*um, n=5)
morpho.right = Cylinder(length=3*um, diameter=1*um, n=7)
morpho.right['nextone'] = Cylinder(length=2*um, diameter=1*um, n=3)
# Check total number of compartments
assert_equal(len(morpho),26)
assert_equal(len(morpho.L.main),10)
# Check that end point is at distance 15 um from soma
assert_allclose(morpho.LL.distance[-1],15*um)
@attr('codegen-independent')
def test_subgroup():
morpho = Soma(diameter=30*um)
morpho.L = Cylinder(length=10*um, diameter=1*um, n=10)
morpho.LL = Cylinder(length=5*um, diameter=2*um, n=5)
morpho.right = Cylinder(length=3*um, diameter=1*um, n=7)
# Getting a single compartment by index
assert_allclose(morpho.L[2].distance,3*um)
# Getting a single compartment by position
assert_allclose(morpho.LL[0*um].distance,11*um)
assert_allclose(morpho.LL[1*um].distance,11*um)
assert_allclose(morpho.LL[1.5*um].distance,12*um)
assert_allclose(morpho.LL[5*um].distance,15*um)
# Getting a segment
assert_allclose(morpho.L[3*um:5.1*um].distance, [3, 4, 5]*um)
# Indices cannot be obtained at this stage
assert_raises(AttributeError,lambda :morpho.L.indices[:])
# Compress the morphology and get absolute compartment indices
N = len(morpho)
morpho.compress(MorphologyData(N))
assert_equal(morpho.LL.indices[:], [11, 12, 13, 14, 15])
assert_equal(morpho.L.indices[3*um:5.1*um], [3, 4, 5])
assert_equal(morpho.L.indices[3*um:5.1*um],
morpho.L[3*um:5.1*um].indices[:])
assert_equal(morpho.L.indices[:5.1*um], [1, 2, 3, 4, 5])
assert_equal(morpho.L.indices[3*um:], [3, 4, 5, 6, 7, 8, 9, 10])
assert_equal(morpho.L.indices[3.5*um], 4)
assert_equal(morpho.L.indices[3], 4)
assert_equal(morpho.L.indices[-1], 10)
assert_equal(morpho.L.indices[3:5], [4, 5])
assert_equal(morpho.L.indices[3:], [4, 5, 6, 7, 8, 9, 10])
assert_equal(morpho.L.indices[:5], [1, 2, 3, 4, 5])
# Main branch
assert_equal(len(morpho.L.main), 10)
# Non-existing branch
assert_raises(AttributeError, lambda: morpho.axon)
# Incorrect indexing
# wrong units or mixing units
assert_raises(TypeError, lambda: morpho.indices[3*second:5*second])
assert_raises(TypeError, lambda: morpho.indices[3.4:5.3])
assert_raises(TypeError, lambda: morpho.indices[3:5*um])
assert_raises(TypeError, lambda: morpho.indices[3*um:5])
# providing a step
assert_raises(TypeError, lambda: morpho.indices[3*um:5*um:2*um])
assert_raises(TypeError, lambda: morpho.indices[3:5:2])
# incorrect type
assert_raises(TypeError, lambda: morpho.indices[object()])
if __name__ == '__main__':
test_basicshapes()
test_subgroup()
| 2.15625 | 2 |
authlib/__init__.py | jmrafael/Streamlit-Authentication | 0 | 12772986 | <reponame>jmrafael/Streamlit-Authentication
from .common import const, trace_activity, AppError, DatabaseError
from .common.dt_helpers import tnow_iso , tnow_iso_str, dt_from_str, dt_from_ts, dt_to_str
from .common.crypto import aes256cbcExtended
from .common.cookie_manager import CookieManager
| 1.101563 | 1 |
tests/test_utils.py | ishtiaque06/vh-fst | 1 | 12772987 | <filename>tests/test_utils.py
from utils import spe_to_fst
def run_test_on_strings(input_list, output_list, object):
for i in range(len(input_list)):
assert "".join(object.step(list(input_list[i]))) == output_list[i]
def test_spe_to_fst_a_b_c_d():
alphabet = {'a', 'b', 'c', 'd'}
states = {0: '', 1: 'c', 2: 'ca'}
transitions = {
(0, '?'): ('?', 0),
(0, 'a'): ('a', 0),
(0, 'b'): ('b', 0),
(0, 'c'): ('c', 1),
(0, 'd'): ('d', 0),
(1, '?'): ('?', 0),
(1, 'a'): ('', 2),
(1, 'b'): ('b', 0),
(1, 'c'): ('c', 1),
(1, 'd'): ('d', 0),
(2, '?'): ('a?', 0),
(2, 'a'): ('aa', 0),
(2, 'b'): ('ab', 0),
(2, 'c'): ('ac', 1),
(2, 'd'): ('bd', 0),
}
object = spe_to_fst('a', 'b', 'c', 'd')
assert object.states == states
assert object.transitions == transitions
assert object.alphabet == alphabet
input_list = ["","wqcdpo", "qpcadoi", "cadcadcadcad", "cacacacad"]
output_list = ["", "wqcdpo", "qpcbdoi", "cbdcbdcbdcbd", "cacacacbd"]
run_test_on_strings(input_list, output_list, object)
def test_spe_to_fst_a_b_cd_ef():
alphabet = {'a', 'b', 'c', 'd', 'e', 'f'}
states = {0: '', 1: 'c', 2: 'cd', 3: 'cda', 4: 'cdae'}
transitions = {
(0, '?'): ('?', 0),
(0, 'a'): ('a', 0),
(0, 'b'): ('b', 0),
(0, 'c'): ('c', 1),
(0, 'd'): ('d', 0),
(0, 'e'): ('e', 0),
(0, 'f'): ('f', 0),
(1, '?'): ('?', 0),
(1, 'a'): ('a', 0),
(1, 'b'): ('b', 0),
(1, 'c'): ('c', 1),
(1, 'd'): ('d', 2),
(1, 'e'): ('e', 0),
(1, 'f'): ('f', 0),
(2, '?'): ('?', 0),
(2, 'a'): ('', 3),
(2, 'b'): ('b', 0),
(2, 'c'): ('c', 1),
(2, 'd'): ('d', 0),
(2, 'e'): ('e', 0),
(2, 'f'): ('f', 0),
(3, '?'): ('a?', 0),
(3, 'a'): ('aa', 0),
(3, 'b'): ('ab', 0),
(3, 'c'): ('ac', 1),
(3, 'd'): ('ad', 0),
(3, 'e'): ('', 4),
(3, 'f'): ('af', 0),
(4, '?'): ('ae?', 0),
(4, 'a'): ('aea', 0),
(4, 'b'): ('aeb', 0),
(4, 'c'): ('aec', 1),
(4, 'd'): ('aed', 0),
(4, 'e'): ('aee', 0),
(4, 'f'): ('bef', 0),
}
object = spe_to_fst('a', 'b', 'cd', 'ef')
assert object.states == states
assert object.transitions == transitions
assert object.alphabet == alphabet
input_list = [ "",
"pcqwe",
"pcdqwe",
"pcdaqwe",
"pcdaeqwe",
"pcdaeqqwe",
"pcdaefqwe",
"cdaefcdaef",
]
output_list = [ "",
"pcqwe",
"pcdqwe",
"pcdaqwe",
"pcdaeqwe",
"pcdaeqqwe",
"pcdbefqwe",
"cdbefcdbef",
]
def test_spe_to_fst_a_b_empty_cd():
alphabet = {'a', 'b', 'c', 'd'}
states = {0: '', 1: 'a', 2: 'ac'}
transitions = {
(0, '?'): ('?', 0),
(0, 'a'): ('', 1),
(0, 'b'): ('b', 0),
(0, 'c'): ('c', 0),
(0, 'd'): ('d', 0),
(1, '?'): ('a?', 0),
(1, 'a'): ('aa', 1),
(1, 'b'): ('ab', 0),
(1, 'c'): ('', 2),
(1, 'd'): ('ad', 0),
(2, '?'): ('ac?', 0),
(2, 'a'): ('aca', 1),
(2, 'b'): ('acb', 0),
(2, 'c'): ('acc', 0),
(2, 'd'): ('bcd', 0),
}
object = spe_to_fst("a", "b", "", "cd")
assert object.states == states
assert object.transitions == transitions
assert object.alphabet == alphabet
input_list = [
"",
"acd",
"bcd",
"qwertyuiopacd",
"awe",
"acipiqp",
"qwertyuiop",
"acdqwertyuiop",
"acdb",
"acdqwertyuiop",
]
output_list = [
"",
"bcd",
"bcd",
"qwertyuiopbcd",
"awe",
"acipiqp",
"qwertyuiop",
"bcdqwertyuiop",
"bcdb",
"bcdqwertyuiop",
]
run_test_on_strings(input_list, output_list, object)
# Added by Travis
def test_spe_to_fst_a_b_empty_d():
alphabet = {'a', 'b', 'd'}
states = {0: '', 1: 'a'}
transitions = {
(0, 'b'): ('b', 0),
(0, 'd'): ('d', 0),
(0, '?'): ('?', 0),
(0, 'a'): ('', 1),
(1, 'a'): ('aa', 1),
(1, 'd'): ('bd', 0),
(1, 'b'): ('ab', 0),
(1, '?'): ('a?', 0)
}
object = spe_to_fst("a", "b", "", "d")
assert object.states == states
assert object.transitions == transitions
assert object.alphabet == alphabet
input_list = [
"",
"ad",
"abad",
"adcadc",
"qwer",
"qweraddabc",
]
output_list = [
"",
"bd",
"abbd",
"bdcbdc",
"qwer",
"qwerbddabc",
]
run_test_on_strings(input_list, output_list, object)
# Added by Travis
def test_spe_to_fst_a_b_c_empty():
alphabet = {'a', 'b', 'c'}
states = {0: '', 1: 'c'}
transitions = {
(0, 'a'): ('a', 0),
(0, 'b'): ('b', 0),
(0, '?'): ('?', 0),
(0, 'c'): ('c', 1),
(1, 'c'): ('c', 1),
(1, 'a'): ('b', 0),
(1, 'b'): ('b', 0),
(1, '?'): ('?', 0)
}
object = spe_to_fst("a", "b", "c", "")
assert object.states == states
assert object.transitions == transitions
assert object.alphabet == alphabet
def test_spe_to_fst_a_b_cde_empty():
alphabet = {'a', 'b', 'c', 'd', 'e'}
states = {0: '', 1: 'c', 2: 'cd', 3: 'cde'}
transitions = {
(0, '?'): ('?', 0),
(0, 'a'): ('a', 0),
(0, 'b'): ('b', 0),
(0, 'c'): ('c', 1),
(0, 'd'): ('d', 0),
(0, 'e'): ('e', 0),
(1, '?'): ('?', 0),
(1, 'a'): ('a', 0),
(1, 'b'): ('b', 0),
(1, 'c'): ('c', 1),
(1, 'd'): ('d', 2),
(1, 'e'): ('e', 0),
(2, '?'): ('?', 0),
(2, 'a'): ('a', 0),
(2, 'b'): ('b', 0),
(2, 'c'): ('c', 1),
(2, 'd'): ('d', 0),
(2, 'e'): ('e', 3),
(3, '?'): ('?', 0),
(3, 'a'): ('b', 0),
(3, 'b'): ('b', 0),
(3, 'c'): ('c', 1),
(3, 'd'): ('d', 0),
(3, 'e'): ('e', 0),
}
object = spe_to_fst("a", "b", "cde", "")
assert object.states == states
assert object.transitions == transitions
assert object.alphabet == alphabet
input_list = [
"",
"caq",
"cdaq",
"cdeaq",
"deaq",
"eaq",
"cdacdeacdacac",
]
output_list = [
"",
"caq",
"cdaq",
"cdebq",
"deaq",
"eaq",
"cdacdebcdacac",
]
run_test_on_strings(input_list, output_list, object)
| 2.5 | 2 |
PythonExercicios/06-loops-while/ex061.py | mateusmarinho/python3-cursoemvideo | 0 | 12772988 | # REFAZENDO O DESAFIO 51
# mostra os 10 primeiros termos de uma PA
print('GERADOR DE PA')
a1 = int(input('Digite o primeiro termo da PA: '))
r = int(input('Digite a razão da PA: '))
print('Dez primeiros termos da PA:')
termo = a1
i = 1
while i <= 10:
print('{} '.format(termo), end='')
termo += r
i += 1
print('\nFIM')
| 3.921875 | 4 |
capitulo-09/ex35.py | bryan-lima/exercicios-livro-introd-prog-python-3ed | 3 | 12772989 | # Utilizando a função os.walk, crie uma página HTML com o nome e tamanho de cada arquivo de um diretório passado e de
# seus subdiretórios
# Programa 9.10 do livro, página 219
# Programa 9.10 - Árvore de diretórios sendo percorrida
#
# import os
# import sys
#
# for raiz, diretorios, arquivos in os.walk(sys.argv[1]):
# print(f'\nCaminho:', raiz)
# for d in diretorios:
# print(f' {d}/')
# for f in arquivos:
# print(f' {f}/')
# print(f'{len(diretorios)} diretório(s), {len(arquivos)} arquivo(s)')
import sys
import os
import os.path
import urllib.request
def generate_listing(page, directory):
for root, directories, files in os.walk(directory):
for file in files:
full_path = os.path.join(root, file)
size = os.path.getsize(full_path)
link = urllib.request.pathname2url(full_path)
page.write(f"<p><a href='{link}'>{file}</a> ({size} bytes)</p>")
if len(sys.argv) != 2:
print('\n\nDigite o nome do diretório para coletar os arquivos!')
print('Uso: ex35.py diretório\n\n')
sys.exit(1)
directory = sys.argv[1]
page = open('diretorios-e-arquivos.html', 'w', encoding='utf-8')
page.write('''
<!DOCTYPE html>
<html lang="pt-BR">
<head>
<meta charset="utf-8">
<title>Diretórios e Arquivos</title>
</head>
<body>
''')
page.write(f'<h1>Arquivos encontrados a partir do diretório: {directory}</h1>')
generate_listing(page, directory)
page.write('''
</body>
</html>
''')
page.close()
| 3.5 | 4 |
project/views/project_views.py | chaitphani/company-emp-proj-auth | 0 | 12772990 | <reponame>chaitphani/company-emp-proj-auth<gh_stars>0
from project.models import Project
from project.serializers import ProjectSerializer
from company.models import Company
from rest_framework import viewsets,status
from rest_framework.response import Response
class ProjectAPI(viewsets.ViewSet):
serializer_class = ProjectSerializer
http_method_names = ["post", "get", "put", "delete", "head", "options"]
def create(self, request, *args, **kwargs):
try:
try:
Project.objects.get(project_title=request.data["project_title"])
return Response({"Project title already exist"},status=status.HTTP_500_INTERNAL_SERVER_ERROR)
except Project.DoesNotExist:
pass
project = Project(
project_title=request.data["project_title"],
project_description=request.data["project_description"],
project_deadline_date=request.data["project_deadline_date"],
comments=request.data["comments"],
is_active=True,
)
project.save()
return Response(
self.serializer_class(project).data,
status=status.HTTP_201_CREATED
)
except Exception as err:
print(f"ProjectAPI create: {err}")
return Response(err, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def list(self, request, *args, **kwargs):
try:
project_list = Company.objects.filter(is_active=True)
return Response(
self.serializer_class(project_list, many=True).data,
status=status.HTTP_200_OK,
)
except Exception as err:
return Response(err, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def update(self, request, *args, **kwargs):
try:
project = Project.objects.get(id=kwargs["pk"],is_active=True)
serializer = self.serializer_class(project, data=request.data, partial=True)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
except Exception as err:
return Response(err, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
#here doing soft delete
def destroy(self, request, *args, **kwargs):
try:
project = Project.objects.get(id=kwargs["pk"])
project.is_active = request.data["is_active"]
project.save()
return Response(status=status.HTTP_200_OK)
except Exception as err:
return Response(err, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
| 2.375 | 2 |
app/sources_main/migrations/0027_auto_20190403_0759.py | DOSSIER-dev/DOSSIER-Sources | 7 | 12772991 | <reponame>DOSSIER-dev/DOSSIER-Sources
# Generated by Django 2.1 on 2019-04-03 07:59
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('sources_main', '0026_auto_20190326_1544'),
]
operations = [
migrations.RenameField(
model_name='source',
old_name='sourceURI',
new_name='externalServiceId',
),
]
| 1.34375 | 1 |
2020/12/ship.py | cheshyre/advent-of-code | 1 | 12772992 | def rotate(cur_direction, rotation_command):
# Assumes there are only R/L 90/180/270
invert_rl = {
"R": "L",
"L": "R",
}
if int(rotation_command[1:]) == 270:
rotation_command = f"{invert_rl[rotation_command[0]]}90"
elif int(rotation_command[1:]) == 180:
flip = {
"N": "S",
"E": "W",
"S": "N",
"W": "E",
}
return flip[cur_direction]
lookup = {
"L90" : {
"N": "W",
"E": "N",
"S": "E",
"W": "S",
},
"R90" : {
"N": "E",
"E": "S",
"S": "W",
"W": "N",
},
}
return lookup[rotation_command][cur_direction]
def rotate_waypoint(x, y, rotation_command):
# Assumes there are only R/L 90/180/270
invert_rl = {
"R": "L",
"L": "R",
}
if int(rotation_command[1:]) == 270:
rotation_command = f"{invert_rl[rotation_command[0]]}90"
elif int(rotation_command[1:]) == 180:
return -1 * x, -1 * y
if rotation_command == "L90":
return -1 * y, x
elif rotation_command == "R90":
return y, -1 * x
else:
print(f"Invalid command: {rotation_command}")
exit(-1)
def shift_coords(curx, cury, command):
n = int(command[1:])
offsets = {
"N": (0, 1),
"S": (0, -1),
"W": (-1, 0),
"E": (1, 0),
}
offx, offy = offsets[command[0]]
return n * offx + curx, n * offy + cury
class Ship:
def __init__(self):
self.direction = "E"
self.x = 0
self.y = 0
def run_command(self, command):
if "L" in command or "R" in command:
self.direction = rotate(self.direction, command)
else:
command = command.replace("F", self.direction)
self.x, self.y = shift_coords(self.x, self.y, command)
def manhattan_dist(self):
return abs(self.x) + abs(self.y)
class ShipWithWaypoint:
def __init__(self):
self.x = 0
self.y = 0
self.waypoint_x = 10
self.waypoint_y = 1
def run_command(self, command):
if "L" in command or "R" in command:
self.waypoint_x, self.waypoint_y = rotate_waypoint(self.waypoint_x, self.waypoint_y, command)
elif "F" in command:
n = int(command[1:])
self.x += self.waypoint_x * n
self.y += self.waypoint_y * n
else:
self.waypoint_x, self.waypoint_y = shift_coords(self.waypoint_x, self.waypoint_y, command)
def manhattan_dist(self):
return abs(self.x) + abs(self.y)
| 3.359375 | 3 |
tests/test_banner_module.py | tate138/Martindale_Automation | 0 | 12772993 | <gh_stars>0
from library.page_models.martindale.martindale_module_setup import MartindaleModuleSetup
from library.page_models.martindale.martindale_login import MartindaleLogin
from library.page_models.martindale.martindale_navigation import MartindaleNavigation
from library.selenium_actions import *
from library.tools import Tools
from library.page_data.martindale_app.banner_module_data import BannerModuleData
from library.page_data.martindale_app.martindale_data import MartindalePageData
from tests.test_setup.common_setup import CommonSetup, TestRailStatus
import pytest
import sys
####
# Banner Module Test
####
'''
python -m pytest "Documents/IB/Martindale Automation/tests/test_banner_module.py" -s
'''
class TestBannerModule(CommonSetup):
####
# Suite Setup and Teardown
####
@pytest.fixture(autouse=True, scope='module')
def set_up_class(self):
'''
Will be run once first before any test
'''
web_driver = self.fetch_webdriver()
Tools.log("Executing Banner module tests...")
MartindaleNavigation.navigate_to_home_page(web_driver)
MartindaleLogin.login_to_app(web_driver)
CommonSetup.selected_module = "banner"
MartindaleModuleSetup.module_open(web_driver)
# will run at end of all tests
def teardown_class(self):
'''
Will be run once when all tests are complete
'''
Tools.log("Banner module test run complete.")
####
# Case Setup and Teardown
####
def case_setup(self):
'''
Methods that will be run to setup individual cases
called in test as
self.case_setup()
'''
def case_teardown(self):
'''
Methods that will be run to cleanup individual cases
called in test as
self.case_teardown()
'''
self.common_test_teardown()
####
# Test Case Setup and Teardown
####
def test_c18513007(self):
test_case_number = "18513007"
web_driver = self.fetch_webdriver()
try:
click_banner_module = SeleniumActions.fetch_web_element(web_driver, MartindalePageData.MODULE)
SeleniumActions.click_element(web_driver, click_banner_module)
Tools.sleep(2)
click_banner_name = \
SeleniumActions.fetch_web_element(web_driver, BannerModuleData.BANNER_MODULE_HEADER)
SeleniumActions.click_element(web_driver, click_banner_name)
Tools.sleep(2)
click_edit_button = \
SeleniumActions.fetch_web_element(web_driver, MartindalePageData.IFRAME_EDIT_BUTTON)
SeleniumActions.click_element(web_driver, click_edit_button)
Tools.sleep(5)
web_driver.switch_to.default_content()
element_path = BannerModuleData.GALLERY_BANNER_TITLE
web_element = SeleniumActions.find_by_xpath(web_driver, element_path)
banner_text = SeleniumActions.read_web_element_text(web_element)
try:
assert(banner_text == "BANNER MODULE")
CommonSetup.report_test_rail(self, test_case_number, TestRailStatus().PASS, '')
except:
CommonSetup.report_test_rail(self, test_case_number, TestRailStatus().FAIL, '')
except:
print("C18513007 Not Tested")
e = sys.exc_info()[0]
print("<p>Error: %s</p>" % e)
def test_c18513008(self):
test_case_number = "18513008"
web_driver = self.fetch_webdriver()
try:
click_settings_tab = \
SeleniumActions.fetch_web_element(web_driver, BannerModuleData.BANNER_MODULE_SETTINGS_TAB)
SeleniumActions.click_element(web_driver, click_settings_tab)
element_path = BannerModuleData.BANNER_MODULE_SETTINGS_ELEMENT_VISIBILITY
web_element = SeleniumActions.find_by_xpath(web_driver, element_path)
element_visibility_text = SeleniumActions.read_web_element_text(web_element)
click_layout_tab = \
SeleniumActions.fetch_web_element(web_driver, BannerModuleData.BANNER_MODULE_LAYOUT_TAB)
SeleniumActions.click_element(web_driver, click_layout_tab)
element_path = BannerModuleData.BANNER_MODULE_LAYOUT_TYPE
web_element = SeleniumActions.find_by_xpath(web_driver, element_path)
layout_type_text = SeleniumActions.read_web_element_text(web_element)
click_content_tab = \
SeleniumActions.fetch_web_element(web_driver, BannerModuleData.BANNER_MODULE_CONTENT_TAB)
SeleniumActions.click_element(web_driver, click_content_tab)
element_path = BannerModuleData.BANNER_MODULE_CONENT_MANAGE_CONTENT
web_element = SeleniumActions.find_by_xpath(web_driver, element_path)
manage_content_text = SeleniumActions.read_web_element_text(web_element)
try:
assert(element_visibility_text == "ELEMENT VISIBILITY"
and layout_type_text == "LAYOUT TYPE"
and manage_content_text == "Manage Content")
CommonSetup.report_test_rail(self, test_case_number, TestRailStatus().PASS, '')
except:
CommonSetup.report_test_rail(self, test_case_number, TestRailStatus().FAIL, '')
except:
print("C18513008 Not Tested")
e = sys.exc_info()[0]
print("<p>Error: %s</p>" % e)
def test_c18521018(self):
test_case_number = "18521018"
web_driver = self.fetch_webdriver()
try:
click_add_banner = \
SeleniumActions.fetch_web_element(web_driver, BannerModuleData.BANNER_MODULE_CONTENT_ADD_BANNER_BUTTON)
SeleniumActions.click_element(web_driver, click_add_banner)
Tools.sleep(5)
element_path = BannerModuleData.BANNER_MODULE_CONTENT_ADD_BANNER
web_element = SeleniumActions.find_by_xpath(web_driver, element_path)
add_banner_text = SeleniumActions.read_web_element_text(web_element)
try:
assert(add_banner_text == "ADD BANNER")
CommonSetup.report_test_rail(self, test_case_number, TestRailStatus().PASS, '')
except:
CommonSetup.report_test_rail(self, test_case_number, TestRailStatus().FAIL, '')
except:
print("C18521018 Not Tested")
e = sys.exc_info()[0]
print("<p>Error: %s</p>" % e)
def test_c18521037(self):
test_case_number = "18521037"
web_driver = self.fetch_webdriver()
try:
# CTA Button 1
element_path = BannerModuleData.BANNER_MODULE_CONTENT_ADD_LINK_APPEARANCE
web_element = SeleniumActions.find_by_xpath(web_driver, element_path)
SeleniumActions.select_by_index(web_element, "0")
link_text = SeleniumActions.read_select_text(web_element)
SeleniumActions.select_by_index(web_element, "1")
button_text = SeleniumActions.read_select_text(web_element)
SeleniumActions.select_by_index(web_element, "2")
icon_only = SeleniumActions.read_select_text(web_element)
SeleniumActions.select_by_index(web_element, "3")
icon_and_button = SeleniumActions.read_select_text(web_element)
# CTA Button 2
element_path = BannerModuleData.BANNER_MODULE_CONTENT_ADD_LINK_APPEARANCE_TWO
web_element = SeleniumActions.find_by_xpath(web_driver, element_path)
Tools.sleep(2)
SeleniumActions.select_by_index(web_element, "0")
link_text_two = SeleniumActions.read_select_text(web_element)
SeleniumActions.select_by_index(web_element, "1")
button_text_two = SeleniumActions.read_select_text(web_element)
SeleniumActions.select_by_index(web_element, "2")
icon_only_two = SeleniumActions.read_select_text(web_element)
SeleniumActions.select_by_index(web_element, "3")
icon_and_button_two = SeleniumActions.read_select_text(web_element)
try:
assert(link_text == "Text Link" and button_text == "Text Button" and icon_only == "Icon Only"
and icon_and_button == "Icon & Text Button" and link_text_two == "Text Link"
and button_text_two == "Text Button" and icon_only_two == "Icon Only"
and icon_and_button_two == "Icon & Text Button")
CommonSetup.report_test_rail(self, test_case_number, TestRailStatus().PASS, '')
except:
CommonSetup.report_test_rail(self, test_case_number, TestRailStatus().FAIL, '')
except:
print("C18521037 Not Tested")
e = sys.exc_info()[0]
print("<p>Error: %s</p>" % e)
def test_c18521024(self):
test_case_number = "18521024"
web_driver = self.fetch_webdriver()
try:
# CTA Button 1
element_path = BannerModuleData.BANNER_MODULE_CONTENT_ADD_LINK_TYPE
web_element = SeleniumActions.find_by_xpath(web_driver, element_path)
Tools.sleep(2)
SeleniumActions.select_by_index(web_element, "0")
my_pages = SeleniumActions.read_select_text(web_element)
SeleniumActions.select_by_index(web_element, "1")
external_link = SeleniumActions.read_select_text(web_element)
SeleniumActions.select_by_index(web_element, "2")
no_link = SeleniumActions.read_select_text(web_element)
# CTA Button 2
element_path = BannerModuleData.BANNER_MODULE_CONTENT_ADD_LINK_TYPE_TWO
web_element = SeleniumActions.find_by_xpath(web_driver, element_path)
Tools.sleep(2)
SeleniumActions.select_by_index(web_element, "0")
my_pages_two = SeleniumActions.read_select_text(web_element)
SeleniumActions.select_by_index(web_element, "1")
external_link_two = SeleniumActions.read_select_text(web_element)
SeleniumActions.select_by_index(web_element, "2")
no_link_two = SeleniumActions.read_select_text(web_element)
try:
assert(my_pages == "My Pages" and external_link == "External Link" and no_link == "No Link"
and my_pages_two == "My Pages" and external_link_two == "External Link"
and no_link_two == "No Link")
CommonSetup.report_test_rail(self, test_case_number, TestRailStatus().PASS, '')
except:
CommonSetup.report_test_rail(self, test_case_number, TestRailStatus().FAIL, '')
except:
print("C18521024 Not Tested")
e = sys.exc_info()[0]
print("<p>Error: %s</p>" % e)
def test_c18521040(self):
test_case_number = "18521040"
web_driver = self.fetch_webdriver()
try:
# CTA Button 1
element_path = BannerModuleData.BANNER_MODULE_CONTENT_ADD_LINK_TYPE
web_element = SeleniumActions.find_by_xpath(web_driver, element_path)
SeleniumActions.select_by_index(web_element, "0")
Tools.sleep(2)
element_path = BannerModuleData.BANNER_MODULE_CONTENT_ADD_LINK_PAGE_LABEL
web_element = SeleniumActions.find_by_xpath(web_driver, element_path)
link_page_element = SeleniumActions.read_web_element_text(web_element)
# CTA Button 2
element_path = BannerModuleData.BANNER_MODULE_CONTENT_ADD_LINK_TYPE_TWO
web_element = SeleniumActions.find_by_xpath(web_driver, element_path)
SeleniumActions.select_by_index(web_element, "0")
Tools.sleep(2)
element_path = BannerModuleData.BANNER_MODULE_CONTENT_ADD_LINK_PAGE_LABEL_TWO
web_element = SeleniumActions.find_by_xpath(web_driver, element_path)
link_page_element_two = SeleniumActions.read_web_element_text(web_element)
try:
assert(link_page_element == "Link Page *" and link_page_element_two == "Link Page *")
CommonSetup.report_test_rail(self, test_case_number, TestRailStatus().PASS, '')
except:
CommonSetup.report_test_rail(self, test_case_number, TestRailStatus().FAIL, '')
except:
print("C18521040 Not Tested")
e = sys.exc_info()[0]
print("<p>Error: %s</p>" % e)
def test_c18521041(self):
test_case_number = "18521041"
web_driver = self.fetch_webdriver()
try:
# CTA Button 1
element_path = BannerModuleData.BANNER_MODULE_CONTENT_ADD_LINK_TYPE
web_element = SeleniumActions.find_by_xpath(web_driver, element_path)
SeleniumActions.select_by_index(web_element, "1")
Tools.sleep(2)
element_path = BannerModuleData.BANNER_MODULE_CONTENT_ADD_LINK_PAGE_URL_LABEL
web_element = SeleniumActions.find_by_xpath(web_driver, element_path)
link_page_element = SeleniumActions.read_web_element_text(web_element)
# CTA Button 2
element_path = BannerModuleData.BANNER_MODULE_CONTENT_ADD_LINK_TYPE_TWO
web_element = SeleniumActions.find_by_xpath(web_driver, element_path)
SeleniumActions.select_by_index(web_element, "1")
Tools.sleep(2)
element_path = BannerModuleData.BANNER_MODULE_CONTENT_ADD_LINK_PAGE_URL_LABEL_TWO
web_element = SeleniumActions.find_by_xpath(web_driver, element_path)
link_page_element_two = SeleniumActions.read_web_element_text(web_element)
try:
assert(link_page_element == "URL *" and link_page_element_two == "URL *")
CommonSetup.report_test_rail(self, test_case_number, TestRailStatus().PASS, '')
except:
CommonSetup.report_test_rail(self, test_case_number, TestRailStatus().FAIL, '')
except:
print("C18521041 Not Tested")
e = sys.exc_info()[0]
print("<p>Error: %s</p>" % e)
def test_c18521042(self):
test_case_number = "18521042"
web_driver = self.fetch_webdriver()
try:
click_save_button = \
SeleniumActions.fetch_web_element(web_driver, BannerModuleData.ADD_BANNER_SAVE_BUTTON)
SeleniumActions.click_element(web_driver, click_save_button)
element_path = BannerModuleData.BANNER_TITLE_ERROR
web_element = SeleniumActions.find_by_xpath(web_driver, element_path)
banner_title_error = SeleniumActions.read_web_element_text(web_element)
element_path = BannerModuleData.BANNER_CAPTION_ERROR
web_element = SeleniumActions.find_by_xpath(web_driver, element_path)
banner_caption_error = SeleniumActions.read_web_element_text(web_element)
banner_title = SeleniumActions.fetch_web_element(web_driver, BannerModuleData.BANNER_TITLE)
SeleniumActions.write_to_element \
(web_driver, banner_title, "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012")
banner_caption = SeleniumActions.fetch_web_element(web_driver, BannerModuleData.BANNER_CAPTION)
SeleniumActions.write_to_element \
(web_driver, banner_caption, "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012")
click_save_button = \
SeleniumActions.fetch_web_element(web_driver, BannerModuleData.ADD_BANNER_SAVE_BUTTON)
SeleniumActions.click_element(web_driver, click_save_button)
element_path = BannerModuleData.BANNER_TITLE_ERROR
web_element = SeleniumActions.find_by_xpath(web_driver, element_path)
banner_title_error_two = SeleniumActions.read_web_element_text(web_element)
element_path = BannerModuleData.BANNER_CAPTION_ERROR
web_element = SeleniumActions.find_by_xpath(web_driver, element_path)
banner_caption_error_two = SeleniumActions.read_web_element_text(web_element)
try:
assert(banner_title_error == "This field is required."
and banner_caption_error == "This field is required."
and banner_title_error_two == "Please enter no more than 100 characters."
and banner_caption_error_two == "Please enter no more than 200 characters.")
CommonSetup.report_test_rail(self, test_case_number, TestRailStatus().PASS, '')
except:
CommonSetup.report_test_rail(self, test_case_number, TestRailStatus().FAIL, '')
except:
print("C18521042 Not Tested")
e = sys.exc_info()[0]
print("<p>Error: %s</p>" % e)
def test_c18521043(self):
test_case_number = "18521043"
web_driver = self.fetch_webdriver()
try:
banner_title = SeleniumActions.fetch_web_element(web_driver, BannerModuleData.BANNER_TITLE)
SeleniumActions.clearTextField(web_driver, banner_title)
SeleniumActions.write_to_element(web_driver, banner_title, "Automated")
banner_caption = SeleniumActions.fetch_web_element(web_driver, BannerModuleData.BANNER_CAPTION)
SeleniumActions.clearTextField(web_driver, banner_caption)
SeleniumActions.write_to_element(web_driver, banner_caption, "Test")
click_save_button = \
SeleniumActions.fetch_web_element(web_driver, BannerModuleData.ADD_BANNER_SAVE_BUTTON)
SeleniumActions.click_element(web_driver, click_save_button)
Tools.sleep(4)
web_element = SeleniumActions.find_by_xpath(web_driver, "//*[contains(@id, 'item_item-')]")
new_banner_test = SeleniumActions.read_web_element_text(web_element)
try:
assert(new_banner_test == "Title: Automated\nTest")
CommonSetup.report_test_rail(self, test_case_number, TestRailStatus().PASS, '')
except:
CommonSetup.report_test_rail(self, test_case_number, TestRailStatus().FAIL, '')
except:
print("C18521043 Not Tested")
e = sys.exc_info()[0]
print("<p>Error: %s</p>" % e)
def test_c18521045(self):
test_case_number = "18521045"
web_driver = self.fetch_webdriver()
try:
web_element = SeleniumActions.find_by_xpath(web_driver, "//*[contains(@id, 'edit_item-')]")
SeleniumActions.click_element(web_driver, web_element)
Tools.sleep(3)
banner_title = SeleniumActions.fetch_web_element(web_driver, BannerModuleData.BANNER_TITLE)
try:
assert(SeleniumActions.element_is_visible(web_driver, banner_title))
CommonSetup.report_test_rail(self, test_case_number, TestRailStatus().PASS, '')
except:
CommonSetup.report_test_rail(self, test_case_number, TestRailStatus().FAIL, '')
except:
print("C18521045 Not Tested")
e = sys.exc_info()[0]
print("<p>Error: %s</p>" % e)
def test_c18521044(self):
test_case_number = "18521044"
web_driver = self.fetch_webdriver()
try:
web_element = SeleniumActions.find_by_xpath(web_driver, BannerModuleData.ADD_BANNER_CANCEL_BUTTON)
SeleniumActions.click_element(web_driver, web_element)
Tools.sleep(3)
element_path = BannerModuleData.GALLERY_BANNER_TITLE
web_element = SeleniumActions.find_by_xpath(web_driver, element_path)
try:
assert(SeleniumActions.element_is_visible(web_driver, web_element))
CommonSetup.report_test_rail(self, test_case_number, TestRailStatus().PASS, '')
except:
CommonSetup.report_test_rail(self, test_case_number, TestRailStatus().FAIL, '')
except:
print("C18521044 Not Tested")
e = sys.exc_info()[0]
print("<p>Error: %s</p>" % e)
def test_c18521046(self):
test_case_number = "18521046"
web_driver = self.fetch_webdriver()
try:
web_element = SeleniumActions.find_by_xpath(web_driver, "//*[contains(@id, 'delete_item-')]")
SeleniumActions.click_element(web_driver, web_element)
Tools.sleep(2)
web_element = SeleniumActions.find_by_xpath(web_driver, BannerModuleData.DELETE_BANNER_CONFIRMATION)
SeleniumActions.element_is_visible(web_driver, web_element)
web_element = SeleniumActions.find_by_xpath(web_driver, BannerModuleData.DELETE_BANNER_CANCEL_BUTTON)
SeleniumActions.click_element(web_driver, web_element)
Tools.sleep(2)
web_element = SeleniumActions.find_by_xpath(web_driver, "//*[contains(@id, 'item_item-')]")
SeleniumActions.element_is_visible(web_driver, web_element)
web_element = SeleniumActions.find_by_xpath(web_driver, "//*[contains(@id, 'delete_item-')]")
SeleniumActions.click_element(web_driver, web_element)
Tools.sleep(2)
web_element = SeleniumActions.find_by_xpath(web_driver, BannerModuleData.DELETE_BANNER_OK_BUTTON)
SeleniumActions.click_element(web_driver, web_element)
Tools.sleep(4)
web_element = SeleniumActions.find_by_xpath(web_driver, "//*[contains(@id, 'item_item-')]")
new_banner_test = SeleniumActions.read_web_element_text(web_element)
try:
assert(new_banner_test == "Title: Title\nCaption")
CommonSetup.report_test_rail(self, test_case_number, TestRailStatus().PASS, '')
except:
CommonSetup.report_test_rail(self, test_case_number, TestRailStatus().FAIL, '')
self.case_teardown()
except:
print("C18521230 Not Tested")
e = sys.exc_info()[0]
print("<p>Error: %s</p>" % e)
| 2.1875 | 2 |
Lists.py | shyed2001/Python_Programming | 2 | 12772994 | <gh_stars>1-10
#-------------------------------------------------------------------------------
# Name: module2
# Purpose:
#
# Author: user
#
# Created: 28/03/2019
# Copyright: (c) user 2019
# Licence: <your licence>
#-------------------------------------------------------------------------------
print("LISTS")
print("Working with LISTS")
print('''
lists=[], this is an epty list
''')
list=["kevin", 5, 7.9, True]
# list values are indexed
friend=["kevin", "KAren", "Jim", "Toddy", "Frog"]
# 0 1 2 3 4
print('''
list=["kevin", 5, 7.9, True]
# list values are indexed
friend=["kevin", "KAren", "Jim", "Toddy", "Frog"]
# 0 1 2 3 4
''')
print(''' print(friend[0])
=''')
print(friend[0])
print('''
print(friend[-1])
''')
print(friend[-1])
print('''
print(friend[-3])
''')
print(friend[-3])
print('''
print(friend[1:2])
''')
print(friend[1:2])
print('''
print(friend[1:3])
''')
print(friend[1:3])
print('''
print(friend[1:4])
=''')
print(friend[1:4])
print('''
update or modify list values
friend[0]= "000"
print(friend[0])
=''')
friend[0]= "000"
print(friend[0])
print(''' update or modify list values
friend[0:1]= ["000", "ttt"]
print(friend[0:2])
=''')
friend[0:1]= ["000", "ttt"]
print(friend[0:2])
print('''
List Functions
print('
List Functions
')
''')
print('''
Items=["car", "chair", "table", "mat"]
num=[8.8, 7.7,9.9,5.5,25,34,35,43,45,50,52,53,55,61,70,71,77,91,92,93,99,105,115,
122,125,133,151,155,160,170,171,177,181,192,205,250,322,331]
numbers=[4,8,15,16,23,42]
FRINDS=["kevin", "KAren", "Jim", "Toddy", "Frog", "Todd", "Jorge", "Lang"]
# 0 1 2 3 4 5 7 8
''')
Items=["car", "chair", "table", "mat"]
num=[8.8, 7.7,9.9,5.5,25,34,35,43,45,50,52,53,55,61,70,71,77,91,92,93,99,105,115,
122,125,133,151,155,160,170,171,177,181,192,205,250,322,331]
numbers=[4,8,15,16,23,42]
FRINDS=["kevin", "KAren", "Jim", "Toddy", "Frog", "Todd", "Jorge", "Lang"]
# 0 1 2 3 4 5 7 8
print('''
FRINDS.extend(numbers)
print(FRINDS)
=''')
FRINDS.extend(numbers)
print(FRINDS)
print('''
FRINDS.append(num)
print(FRINDS)
=''')
FRINDS.append(num)
print(FRINDS)
print('''
FRINDS.extend(Items)
FRINDS.extend("77777777777")
print(FRINDS)
=''')
FRINDS.extend(Items)
FRINDS.extend("77777777777")
print(FRINDS)
print('''
FRINDS.append(Items)
FRINDS.append("55555555555")
print(FRINDS)
=''')
FRINDS.append(Items)
FRINDS.append("55555555555")
print(FRINDS)
print('''
FRINDS.insert(1,"00000000000000")
# index 1, replace with
print(FRINDS)
=''')
FRINDS.insert(1,"00000000000000")
# index 1, replace with this
print(FRINDS)
print('''
=''')
print('''
FRINDS.insert(7,Items*3)
print(FRINDS)
=''')
FRINDS.insert(7,Items*3)
print(FRINDS)
print(''' remove one item
FRINDS.remove("00000000000000")
print(FRINDS)
=''')
FRINDS.remove("00000000000000" )
print(FRINDS)
print(''' clear or remove last element\item of the list
FRINDS.pop()
print(FRINDS)
=''')
FRINDS.pop()
print(FRINDS)
print(''' find element\item of the list
FRINDS.index('Toddy')
FRINDS.index("car")
print(FRINDS.index("car"))
=''')
print(''' count number of a value
print(FRINDS.count("car"))
print(FRINDS.count("7"))
=''')
print(FRINDS.index("car"))
print(FRINDS.index('Toddy'))
print(FRINDS.count("car"))
print(FRINDS.count("7"))
print(''' clear all items
FRINDS.clear()
print(FRINDS)
=''')
FRINDS.clear()
print(FRINDS)
FRINDS=["You","HE", "he", "WE", "we","We", "wE"]
math=[3, 33, 4.4, 9.0, 9, .1, 0.1, 0, 0.0, .0, .00, 00, 00.00, 00.000, 0000]
print(''' sort/arrange all items alphabetically in assending order
FRINDS=["You","HE", "he", "WE", "we","We", "wE"]
math=[3, 33, 4.4, 9.0, 9, .1, 0.1, 0, 0.0, .0, .00, 00, 00.00, 00.000, 0000]
FRINDS.sort()
math.sort()
print(FRINDS)
print(math)
=''')
FRINDS.sort()
math.sort()
print(FRINDS)
print(math)
print(""" reverse the list items
FRINDS.reverse()
math.reverse()
print(FRINDS)
print(math)
""")
FRINDS.reverse()
math.reverse()
print(FRINDS)
print(math)
print(""" copy the list items to another list
FRINDS3=math
FRINDS2=math.copy()
FRINDS3=math
print(FRINDS2)
print(FRINDS3)
""")
FRINDS3=math
FRINDS2=math.copy()
FRINDS3=math
print(FRINDS2)
print(FRINDS3)
#-------------------------------------------------------------------------------
# Name: Print Book pages module1
# Purpose: Get printed missing pages / numbers
#
# Author: user/shyed
#
# Created: 27/03/2019
# Copyright: (c) user 2019
# Licence: <your licence>
#-------------------------------------------------------------------------------
a=225 # start at 225
b=226 # start at 226
l=599 # stop at 599
list1=[] # initialize an empty list
for i in range(a,l+1,4): list1.append(i)
for j in range(b,l, 4): list1.append(j)
list1.sort()
print(list1)
print('''
a=225
b=226
l=599
list1=[]
for i in range(a,l+1,4): list1.append(i)
for j in range(b,l, 4): list1.append(j)
list1.sort()
print(list1)
''')
print(""" That code will result this list,
[225, 226, 229, 230, 233, 234, 237, 238, 241, 242, 245, 246,
249, 250, 253, 254, 257, 258, 261, 262, 265, 266, 269, 270, 273, 274,
277, 278, 281, 282, 285, 286, 289, 290, 293, 294, 297, 298, 301, 302,
305, 306, 309, 310, 313, 314, 317, 318, 321, 322, 325, 326, 329,
330, 333, 334, 337, 338, 341, 342, 345, 346, 349, 350, 353, 354,
357, 358, 361, 362, 365, 366, 369, 370, 373, 374, 377, 378, 381,
382, 385, 386, 389, 390, 393, 394, 397, 398, 401, 402, 405, 406,
409, 410, 413, 414, 417, 418, 421, 422, 425, 426, 429, 430, 433,
434, 437, 438, 441, 442, 445, 446, 449, 450, 453, 454, 457, 458,
461, 462, 465, 466, 469, 470, 473, 474, 477, 478, 481, 482, 485,
486, 489, 490, 493, 494, 497, 498, 501, 502, 505, 506, 509, 510,
513, 514, 517, 518, 521, 522, 525, 526, 529, 530, 533, 534,
537, 538, 541, 542, 545, 546, 549, 550, 553, 554, 557, 558,
561, 562, 565, 566, 569, 570, 573, 574, 577, 578, 581, 582,
585, 586, 589, 590, 593, 594, 597, 598]""")
| 3.71875 | 4 |
setup.py | gabrielbdornas/gdlib | 0 | 12772995 | <reponame>gabrielbdornas/gdlib
from setuptools import setup, find_packages
import codecs
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
if __name__ == '__main__':
# Setting up
setup(
name='gdlib',
version='0.0.1.9001',
author='<NAME>',
author_email='<EMAIL>',
description="""Breve Pacote Olá Mundo criado como tutorial e futuro template boilerplate para criação de pacotes""",
long_description_content_type='text/markdown',
long_description=open('README.md').read() + '\n\n' + open('CHANGELOG.md').read(),
url='https://github.com/gabrielbdornas/gdlib',
packages=find_packages(),
install_requires=open('requirements.txt').read(),
keywords=['python'],
classifiers=[
"Development Status :: 1 - Planning",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3",
"Operating System :: Unix",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
],
entry_points="""
[console_scripts]
gdlib=gdlib.cli:cli
"""
)
| 1.625 | 2 |
tests/data_structures/linked_lists/test_linked_lists.py | maurobaraldi/python-algorithms | 2 | 12772996 | <reponame>maurobaraldi/python-algorithms
#!/usr/bin/env python
import unittest
from data_structures.linked_lists.linked_lists import Node, LinkedList
class TestNode(unittest.TestCase):
def test_define_a_node(self):
node = Node(1)
self.assertIsInstance(node, Node)
self.assertEqual(node.data, 1)
class TestLinkdeList(unittest.TestCase):
def test_instantiate_a_linked_list(self):
linkedl = LinkedList()
linkedl.head = Node(1)
self.assertIsInstance(linkedl, LinkedList)
self.assertEqual(linkedl.head.data, 1)
def test_instantiate_a_linked_list_with_three_nodes(self):
linkedl = LinkedList()
first = Node(1)
second = Node(2)
third = Node(3)
linkedl.head = first
linkedl.head.next = second
second.next = third
self.assertEqual(linkedl.head.data, 1)
self.assertEqual(linkedl.head.next.data, 2)
self.assertEqual(linkedl.head.next.next.data, 3)
def test_push_node_to_linked_list(self):
linked_list = LinkedList()
linked_list.head = Node(1)
linked_list.push(2)
self.assertEqual(linked_list.head.data, 2)
self.assertEqual(linked_list.head.next.data, 1)
def test_insert_after_node_in_linked_list(self):
linked_list = LinkedList()
linked_list.head = Node(1)
linked_list.head.next = Node(2)
linked_list.insert_after(linked_list.head, 3)
linked_list._print()
self.assertEqual(linked_list.head.data, 1)
self.assertEqual(linked_list.head.next.data, 3)
self.assertEqual(linked_list.head.next.next.data, 2)
def test_append_node_to_linked_list(self):
linked_list = LinkedList()
linked_list.append(1)
linked_list.append(3)
linked_list.append(6)
self.assertEqual(linked_list.head.data, 1)
self.assertEqual(linked_list.head.next.data, 3)
self.assertEqual(linked_list.head.next.next.data, 6)
def test_delete_node_from_linked_list(self):
linked_list = LinkedList()
linked_list.append(1)
linked_list.append(2)
linked_list.append(3)
linked_list.delete(2)
self.assertEqual(linked_list.head.data, 1)
self.assertEqual(linked_list.head.next.data, 3)
def test_reverse_a_linked_list(self):
linked_list = LinkedList()
linked_list.append(1)
linked_list.append(2)
linked_list.append(3)
linked_list.reverse()
self.assertEqual(linked_list.head.data, 3)
self.assertEqual(linked_list.head.next.next.data, 1)
| 4.15625 | 4 |
0_ACMSGURU/100 A+B.py | zielman/Codeforces-solutions | 0 | 12772997 | <gh_stars>0
# https://codeforces.com/problemsets/acmsguru/problem/99999/100
A, B = map(int, input().split())
print(A+B)
| 2.859375 | 3 |
decoder.py | craigderington/python-socket-server | 1 | 12772998 | # .env/bin/python
# coding: utf-8
from collections import namedtuple
from datetime import datetime
def pad_reading(_reading):
"""
Return a full binary representation of the individual bytes
:param _reading:
:return: binary
"""
prefix = 0
for i in range(len(_reading) - 1):
prefix += '0'
return prefix
def hex_to_dec(hex_str):
return int(str(hex_str), 16)
def bin_to_dec(binary_str):
return sum([int(binary_str[-i]) * 2 ** (i - 1) for i in range(1, len(binary_str) + 1)])
def inspect_header(h, n):
return [h[i:i+n] for i in range(0, len(h), n)]
def decode_header(header):
"""
Decode the payload header and return a named tuple as an OrderedDict
:param header: bytes representation of the first 32 bytes of data
:return: OrderedDict
"""
# define named tuple
DecodedHeader = namedtuple('DecodedHeader', 'product_type, hardware_rev, firmware_rev, contact_reason, '
'alarm_status, imei gsm_rssi, battery_status, message_type, '
'payload_len')
# start conversions: each byte has a different conversion method, so try this...
# optionally, use data.decode('utf-8') in hex_to_dec function
for idx, data in enumerate(header):
# print(idx, data)
if idx == 0:
product_type = int(str(data), 16)
elif idx == 1:
hardware_rev = int(data, 2)
elif idx == 2:
firmware = bin(int(data, 16)).replace('0b', '')
firmware_rev_minor = bin_to_dec(firmware[0:3])
firmware_rev_major = bin_to_dec(firmware[4:8])
firmware_rev = str(firmware_rev_major) + '.' + str(firmware_rev_minor)
elif idx == 3:
contact_reason = bin(int(data, 16))
elif idx == 4:
alarm_status = bin(int(data, 16))
elif idx == 5:
gsm_rssi = int(str(data), 16)
elif idx == 6:
battery_status = int(str(data), 16)
elif idx == 15:
message_type = int(str(data), 16)
elif idx == 16:
payload_len = int(str(data), 16)
# create imei from the middle of the string
imei_list = header[7:15]
# the list elements are bytes, re-encode to create string
imei = ''.join(str(i) for i in imei_list)
# print vars
print('Product Type: {}'.format(product_type))
print('Hardware Rev: {}'.format(hardware_rev))
print('Firmware Rev: {}'.format(firmware_rev))
print('Contact Reason: {}'.format(contact_reason))
print('Alarm Status: {}'.format(alarm_status))
print('RSSI: {}'.format(gsm_rssi))
print('Battery Status: {}'.format(battery_status))
print('IMEI: {}'.format(imei))
print('Message Type: {}'.format(message_type))
print('Payload Length: {}'.format(payload_len))
# set the variable to the decoded values
hdr = DecodedHeader(product_type=product_type, hardware_rev=hardware_rev, firmware_rev=firmware_rev,
contact_reason=contact_reason, alarm_status=alarm_status, gsm_rssi=gsm_rssi,
battery_status=battery_status, imei=imei, message_type=message_type, payload_len=payload_len)
# return hdr as an ordered dict
return hdr._asdict()
def decode_readings(reading):
"""
Decode the transmission readings from the payload
:param hex reading:
:return: decoded values
"""
_reading = bin(int(reading, 16)).replace('0b', '')
if len(_reading) < 32:
_reading = pad_reading(_reading)
distance, temperature, src, rssi = 0
timestamp = datetime.now()
# sample reading = 0A5B2877
byte1 = _reading[0:8] # 00001010
byte2 = _reading[8:16] # 01011011
byte3 = _reading[16:24] # 00101000
byte4 = _reading[24:33] # 01110111
# concatenate upper and lower bits from 3 and 4
# modify value for binary to hex to decode
_distance = byte3[:2] + byte4
_temp = hex(int(byte2, 2))
_rssi = hex(int(byte1, 2))
_src = byte3[2:6]
decoded_reading = {
'distance': bin_to_dec(_distance),
'temperature': float(hex_to_dec(_temp)) / 2 * 30,
'src': bin_to_dec(_src),
'rssi': float(hex_to_dec(_rssi))
}
return decoded_reading
| 3.125 | 3 |
projects/tests/analysis-weibel.py | Krissmedt/imprunko | 5 | 12772999 | <gh_stars>1-10
import numpy as np
import matplotlib.pyplot as plt
import h5py
import sys, os
import matplotlib.ticker as ticker
from scipy.stats import mstats
from scipy.optimize import curve_fit
from combine_files import combine_files, combine_tiles
from configSetup import Configuration
#--------------------------------------------------
# combine multiple files
fdir = 'weibel/out/'
conf = Configuration('config-weibel.ini')
print("files...")
ex = combine_files(fdir, "field", "ex", conf)
ey = combine_files(fdir, "field", "ey", conf)
ez = combine_files(fdir, "field", "ez", conf)
bx = combine_files(fdir, "field", "bx", conf)
by = combine_files(fdir, "field", "by", conf)
bz = combine_files(fdir, "field", "bz", conf)
rho = combine_files(fdir, "field", "rho", conf)
ekin = combine_files(fdir, "analysis", "edens",conf, isp=0)
#--------------------------------------------------
# read simulation data from file
print "Ex shape:", np.shape(ex)
#Read simulation values
dt = conf.dt*conf.interval
dx = conf.dx
print dt, dx
nt, nx, ny, nz = np.shape(ex)
maxi = -1
time = np.arange(nt)*dt
maxtime = time[maxi]
#--------------------------------------------------
#set up figure
plt.rc('font', family='serif')
plt.rc('xtick', labelsize=7)
plt.rc('ytick', labelsize=7)
plt.rc('axes', labelsize=7)
fig = plt.figure(figsize=(3.54, 6.0)) #single column fig
#fig = plt.figure(figsize=(7.48, 4.0)) #two column figure
gs = plt.GridSpec(5, 1, wspace=0.0)
axs = []
axs.append( plt.subplot(gs[0,0]) )
axs.append( plt.subplot(gs[1,0]) )
axs.append( plt.subplot(gs[2,0]) )
axs.append( plt.subplot(gs[3,0]) )
axs.append( plt.subplot(gs[4,0]) )
for ax in axs:
ax.minorticks_on()
ax.set_xlabel(r'time $t\omega_{\mathrm{p}}$ ')
ax.set_xlim((0.0, maxtime))
axs[0].set_ylabel(r'$\ln \delta E_x$')
axs[1].set_ylabel(r'Energy $\epsilon$')
axs[2].set_ylabel(r'$\Delta m$')
axs[3].set_ylabel(r'$\epsilon_K$')
axs[4].set_ylabel(r'$E_T$')
def flatten_spatial(arr):
return arr.reshape( arr.shape[:-3] + (-1,) )
#--------------------------------------------------
# max{| X |}
axs[0].set_yscale('log')
ex_max = np.max( np.abs( flatten_spatial(ex) ),1 )
axs[0].plot(time, ex_max, 'b.-')
#ey_max = np.max( np.abs( flatten_spatial(ey) ),1 )
#axs[0].plot(time, ey_max, 'g-')
#
#bz_max = np.max( np.abs( flatten_spatial(bz) ),1 )
#axs[0].plot(time, bz_max, 'r-')
#axs[0].set_ylim(-20.0, 1.0)
#TODO:
Gm = 0.040
Gms = -22.7 + time*Gm
#axs[0].plot(time, Gms, 'r--')
##################################################
axs[1].set_yscale('log')
ex_edens = np.sum( flatten_spatial(ex*ex), 1 )
axs[1].plot(time, ex_edens, linestyle="dashed", color="blue")
ey_edens = np.sum( flatten_spatial(ey*ey), 1 )
axs[1].plot(time, ey_edens, linestyle="dotted", color="blue")
bz_edens = np.sum( flatten_spatial(bz*bz), 1 )
axs[1].plot(time, ex_edens, linestyle="-.", color="red")
plt.subplots_adjust(left=0.18, bottom=0.12, right=0.98, top=0.85, wspace=0.0, hspace=0.0)
plt.savefig('weibel/weibel.pdf')
#ey_edens = np.sum( flatten_spatial(ey*ey), 1 )
#axs[1].plot(time, ey_edens)
#
#bz_edens = np.sum( flatten_spatial(bz*bz), 1 )
#axs[1].plot(time, bz_edens)
edens = np.sum( flatten_spatial( ex*ex + ey*ey + ez*ez ), 1 )
#edens = np.sum( flatten_spatial( ex*ex ), 1 )
bdens = np.sum( flatten_spatial( bx*bx + by*by + bz*bz ), 1 )
#bdens = np.sum( flatten_spatial( bz*bz ), 1 )
axs[1].plot(time, edens, "b-")
axs[1].plot(time, bdens, "r-")
#TODO: model prediction
#Gms = -16.0 + time*Gm # 1/2 comes from compensation of E_x^2
#axs[1].plot(time, Gms, 'r--')
#axs[1].set_ylim(-10.0, 4.0)
##################################################
prtcls = np.sum( flatten_spatial(rho), 1) #integrate particle density
#prtcls /= prtcls[0]
prtcls = np.abs(prtcls - prtcls[0] )/prtcls[0]
#prtcls = np.clip(prtcls, 1.0e-8, 1.0e2)
axs[2].plot(time, np.log10(prtcls))
#axs[2].plot(time, prtcls)
##################################################
ekintot = np.sum( flatten_spatial(ekin), 1)
axs[3].plot(time, ekintot)
##################################################
print("ekin max:", np.max(ekintot))
print("efield max:", np.max(edens))
print("bfield max:", np.max(bdens))
print("ratio: ekin/e", np.mean(ekintot)/np.mean(edens))
print("ratio: ekin/b", np.mean(ekintot)/np.mean(bdens))
etot = ekintot + edens + bdens
#axs[4].plot(time, etot, "k-" )
#axs[4].plot(time, ekintot, "b--")
axs[4].plot(time, edens, "b--")
axs[4].plot(time, bdens, "r--")
#axs[4].plot(time, ex_edens, "r--")
#axs[4].plot(time, ey_edens, "r--")
#axs[4].plot(time, bz_edens, "r--")
plt.subplots_adjust(left=0.18, bottom=0.12, right=0.98, top=0.85, wspace=0.0, hspace=0.0)
plt.savefig('weibel/weibel.pdf')
| 1.859375 | 2 |
app/article/migrations/0007_auto_20200419_1210.py | nabechin/article | 0 | 12773000 | <filename>app/article/migrations/0007_auto_20200419_1210.py
# Generated by Django 3.0.5 on 2020-04-19 12:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('article', '0006_auto_20200419_1015'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='article',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='comment', to='article.Article'),
),
]
| 1.257813 | 1 |
flask_SQLAlchemy_app/db_airline2/create_db.py | pk8742/CS50_flask_tutorials | 0 | 12773001 | # app.py
# python file to create database
from flask import Flask, render_template, request
from models import * # our file defined above to define the classes/tables
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = 'postgresql://postgres:pk8742@localhost:5432/airline2'
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db.init_app(app) # tie this database with this flask application
def main():
db.create_all()
if __name__ == "__main__":
with app.app_context(): # we need this to properly interact with flask-application
main()
| 2.953125 | 3 |
scrapyd/db/pgdbadapter.py | c9dong/scrapyd-heroku-postgres | 0 | 12773002 | import psycopg2 as pg
import urlparse
class PgDbAdapter:
def __init__(self, db_url):
url = urlparse.urlparse(db_url)
# Remove query strings.
path = url.path[1:]
path = path.split('?', 2)[0]
args = {
'dbname': path,
'user': url.username,
'password': <PASSWORD>,
'host': url.hostname,
'port': url.port,
}
conn_string = ' '.join('%s=%s' % item for item in args.items())
self.conn = pg.connect(conn_string)
def execute(self, query, args=None):
try:
cursor = self.conn.cursor()
print query
print args
cursor.execute(query, args)
except (pg.InterfaceError, pg.OperationalError) as err:
raise Exception('bad query: ' + query)
try:
results = list(cursor)
except pg.ProgrammingError:
results = []
return results
def commit(self):
self.conn.commit()
| 2.9375 | 3 |
network-config-analyzer/MethodSet.py | np-guard/network-config-analyzer | 15 | 12773003 | #
# Copyright 2020- IBM Inc. All rights reserved
# SPDX-License-Identifier: Apache2.0
#
import copy
from CanonicalIntervalSet import CanonicalIntervalSet
class MethodSet(CanonicalIntervalSet):
"""
A class for holding a set of HTTP methods
"""
all_methods_list = ['GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'CONNECT', 'OPTIONS', 'TRACE', 'PATCH']
def __init__(self, all_methods=False):
"""
:param bool all_methods: whether to create the object holding all methods
"""
super().__init__()
if all_methods: # the whole range
self.add_interval(self._whole_range_interval())
@staticmethod
def _whole_range_interval():
"""
:return: the interval representing the whole range (all methods)
"""
return CanonicalIntervalSet.Interval(0, len(MethodSet.all_methods_list) - 1)
@staticmethod
def _whole_range_interval_set():
"""
:return: the interval set representing the whole range (all methods)
"""
interval = MethodSet._whole_range_interval()
return CanonicalIntervalSet.get_interval_set(interval.start, interval.end)
def is_whole_range(self):
"""
:return: True if the MethodSet contains all methods, False otherwise
"""
return self == self._whole_range_interval_set()
@staticmethod
def _get_method_names_from_interval_set(interval_set):
"""
Returns names of methods represented by a given interval set
:param CanonicalIntervalSet interval_set: the interval set
:return: the list of method names
"""
res = []
for interval in interval_set:
assert interval.start >= 0 and interval.end < len(MethodSet.all_methods_list)
for index in range(interval.start, interval.end + 1):
res.append(MethodSet.all_methods_list[index])
return res
@staticmethod
def _get_compl_method_names_from_interval_set(interval_set):
"""
Returns names of methods not included in a given interval set
:param CanonicalIntervalSet interval_set: the interval set
:return: the list of complement method names
"""
res = MethodSet.all_methods_list.copy()
for method in MethodSet._get_method_names_from_interval_set(interval_set):
res.remove(method)
return res
def __str__(self):
"""
:return: Compact string representation of the MethodSet
"""
if self.is_whole_range():
return '*'
if not self:
return 'Empty'
method_names = self._get_method_names_from_interval_set(self)
compl_method_names = self._get_compl_method_names_from_interval_set(self)
if len(method_names) <= len(compl_method_names):
values_list = ', '.join(method for method in method_names)
else:
values_list = 'all but ' + ', '.join(method for method in compl_method_names)
return values_list
def copy(self):
new_copy = copy.copy(self)
return new_copy
| 2.640625 | 3 |
channels.py | tunelipt/boundarylayer | 0 | 12773004 | import math
def num_string(x, n=3):
return ('{}'.format(x + 10**n))[1:]
class ChannelConfig(object):
def __init__(self, nchans, chans=None, addref=True, istart=0):
self.addref = addref
self.istart = istart
if chans is None:
nchars = math.ceil(math.log10(nchans+istart))
self.chans = ["CHAN{}".format(num_string(i+istart, nchars)) for i in range(nchans)]
else:
self.chans = [chans[i] for i in range(nchans)]
if addref:
self.chans.append("REF")
self.nconn = len(self.chans)
self.nch = nchans
self.selected = [False for i in range(self.nconn)]
def isavailable(self, chidx):
return not self.selected[chidx]
def check(self, chidx):
self.selected[chidx] = True
def uncheck(self, chidx):
self.selected[chidx] = False
def findfirst(self):
for i in range(self.nch):
if not self.selected[i]:
self.selected[i] = True
return i
return -1
def names(self):
return self.chans
def nchans(self):
return self.nch
def save_config(self):
return dict(kind='channel', nchans=self.nch, chans=self.chans, use=self.selected)
| 3 | 3 |
selfdrive/controls/lib/drive_helpers.py | UVA-DSA/OpenPilot0.3.5 | 1 | 12773005 | from common.numpy_fast import clip
def rate_limit(new_value, last_value, dw_step, up_step):
return clip(new_value, last_value + dw_step, last_value + up_step)
def learn_angle_offset(lateral_control, v_ego, angle_offset, d_poly, y_des, steer_override):
# simple integral controller that learns how much steering offset to put to have the car going straight
min_offset = -1. # deg
max_offset = 1. # deg
alpha = 1./36000. # correct by 1 deg in 2 mins, at 30m/s, with 50cm of error, at 20Hz
min_learn_speed = 1.
# learn less at low speed or when turning
alpha_v = alpha*(max(v_ego - min_learn_speed, 0.))/(1. + 0.5*abs(y_des))
# only learn if lateral control is active and if driver is not overriding:
if lateral_control and not steer_override:
angle_offset += d_poly[3] * alpha_v
angle_offset = clip(angle_offset, min_offset, max_offset)
return angle_offset
| 2.734375 | 3 |
seebuoy/ndbc/_request.py | nickc1/seebuoy | 5 | 12773006 | <filename>seebuoy/ndbc/_request.py
import requests
def make_request(url):
resp = requests.get(url)
if resp.status_code == 200:
return resp.text
elif resp.status_code == 404:
print(f"Dataset not available (404 Error) for url: \n {url}")
return None
else:
raise ValueError(f"Error code {resp.status_code} for url: \n {url}")
| 2.875 | 3 |
methods and decorators/optimization.py | Zoki92/Snippets | 0 | 12773007 | <reponame>Zoki92/Snippets
"""
Check if items are in list
"""
def has_invalid_fields(fields):
for field in fields:
if field not in ['foo', 'bar']:
return True
return False
def has_invalid_fields2(fields):
return bool(set(fields) - set(['foo', 'bar']))
""" set eliminates duplicates """
""" Code is valid but it will a variation of the example
many times """
def add_animal_in_family(species, animal, family):
if family not in species:
species[family] = set()
species[family].add(animal)
species = {}
add_animal_in_family(species, 'cat', 'felidea')
# import collections
def add_animal_in_family2(species, animal, family):
species[family].add(animal)
species = collections.defaultdict(set)
add_animal_in_family2(species, 'cat', 'felidea')
""" Each time you try try to access a nonexistent item from
your dict the defaultdict will use the function that was
passed as argument to its constructor to build a new
value instead of raising a KeyError. In this case the
set() function is used to build a new set each time we
need it.
"""
# Ordered list and bisect
""" Sorted lists use a bisecting algorithm for lookup
to achieve a retrieve time of O(logn). The idea is to split
the list in half and look on which side, left or right, the
item must appear in and so which side should be searched next.
We need to import bisect
"""
farm = sorted(['haystack', 'needle', 'cow', 'pig'])
bisect.bisect(farm, 'needle')
""" bisect.bisect() returns the position where an element
should be inserted to keep the list sorted. Only works if
the list is properly sorted to begin with.
Using bisect module you could also create a special SortedList
class inheriting from list to create a list that is always
sorted,"""
class SortedList(list):
def __init__(self, iterable):
super(SortedList, self).__init__(sorted(iterable))
def insort(self, item):
bisect.insort(self, item)
def extend(self, other):
for item in other:
self.insort(item)
@staticmethod
def append(o):
raise RuntimeError("Cannot append to a sorted list")
def index(self, value, start=None, stop=None):
place = bisect.bisect_left(self[start:stop], value)
if start:
place += start
end = stop or len(self)
if place < end and self[place] == value:
return place
raise ValueError(f'{value} is not in list')
""" In python regular objects store all of their attributes inside
a dictionary and this dictionary is itself store in the __dict__
attribute """
class Point(object):
def __init__(self, x, y):
self.x = x
self.y = y
p = Point(1, 2)
p.__dict__
""" {'y': 2, 'x': 1}
Instead we can use __slots__ that will turn this dictionary into
list, the idea is that dictionaries are expensive for memory but
lists are less, only works for classes that inherit from object,
or when we have large number of simple objects
"""
class Foobar(object):
__slots__ = ('x',)
def __init__(self, x):
self.x = x
""" However this limits us to them being immutable. Rather than
having to reference them by index, namedtuple provides the ability to
retrieve tuple elements by referencing a named attribute.
import collections
Foobar = collections.namedtuple('Foodbar', [x])
Foobar(42)
Foobar(42).x
42
Its a little bit less efficient than __slots__ but gives
the ability to inde by name
"""
# Memoization
""" Its an optimization technique used to speed up function
calls by caching their results. The results of a function
can be cached only if the function is pure.
functools module provides a least recently used LRU cache
decorator. This provides the same functionality as memoization
but with the benefit that it limits the number of entries in the cache
removing the least recently used one when the cache reaches its
maximum size. Also provides statistics.>>> import functools
>>> import math
>>> @functools.lru_cache(maxsize=2)
... def memoized_sin(x):
... return math.sin(x)
...
>>> memoized_sin(2)
0.9092974268256817
>>> memoized_sin.cache_info()
CacheInfo(hits=0, misses=1, maxsize=2, currsize=1)
>>> memoized_sin(2)
0.9092974268256817
>>> memoized_sin.cache_info()
CacheInfo(hits=1, misses=1, maxsize=2, currsize=1)
>>> memoized_sin(3)
0.1411200080598672
>>> memoized_sin.cache_info()
CacheInfo(hits=1, misses=2, maxsize=2, currsize=2)
>>> memoized_sin(4)
-0.7568024953079282
>>> memoized_sin.cache_info()
"""
| 4.21875 | 4 |
python_scripts/criaCaracteristicas.py | LeoHSRodrigues/backendTCC | 1 | 12773008 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
PyFingerprint
Copyright (C) 2015 <NAME> <<EMAIL>>
All rights reserved.
"""
import time
from pyfingerprint import PyFingerprint
# Inicializando o leitor
try:
f = PyFingerprint('COM4', 115200, 0xFFFFFFFF, 0x00000000)
if (f.verifyPassword() == False):
raise ValueError('The given fingerprint sensor password is wrong!')
except Exception as e:
print('The fingerprint sensor could not be initialized!')
print('Exception message: ' + str(e))
exit(1)
f.setSecurityLevel(5)
# Tenta registrar digital
try:
print('Aproxime o dedo...')
# Aguardando reconhecer dedo no leitor
while (f.readImage() == False):
pass
# Converter a imagem lida para caracteristicas e salva no 1 buffer
f.convertImage(0x01)
f.createTemplate()
# Salva as caracteristicas
positionNumber = f.downloadCharacteristics()
print(positionNumber)
except Exception as e:
print('Operation failed!')
print('Exception message: ' + str(e))
exit(1)
| 2.875 | 3 |
pythia/seo/sunspotter.py | dpshelio/pythia | 0 | 12773009 | import warnings
from pathlib import Path
import astropy.units as u
import matplotlib.pyplot as plt
import pandas as pd
from astropy.coordinates import SkyCoord
from sunpy.map import Map, MapSequence
from sunpy.net import Fido
from sunpy.net import attrs as a
from sunpy.net import hek
from sunpy.util import SunpyUserWarning
__all__ = ['Sunspotter']
path = Path(__file__).parent.parent.parent / "data/all_clear"
class Sunspotter:
def __init__(self, *, timesfits: str = path / "lookup_timesfits.csv", get_all_timesfits_columns: bool = True,
properties: str = path / "lookup_properties.csv", get_all_properties_columns: bool = True,
timesfits_columns: list = ['#id'], properties_columns: list = ['#id'],
classifications=None, classifications_columns=None,
delimiter: str = ';', datetime_fmt: str = '%Y-%m-%d %H:%M:%S'):
"""
Parameters
----------
timesfits : str
filepath to `lookup_timesfits.csv`
by default points to the Timesfits file from All Clear Dataset
stored in `~pythia/data/all_clear`
get_all_timesfits_columns : bool, optional
Load all columns from the Timesfits CSV file, by default True
properties : str
filepath to `lookup_properties.csv`
by default points to the Properties file from All Clear Dataset
stored in `~pythia/data/all_clear`
get_all_properties_columns : bool, optional
Load all columns from the Properties CSV file, by default True
timesfits_columns : list, optional
Columns required from lookup_timesfits.csv, by default ['#id']
Will be overridden if `get_all_timesfits_columns` is True.
properties_columns : list, optional
Columns required from lookup_properties.csv, by default ['#id']
Will be overridden if `get_all_properties_columns` is True.
classifications : str, optional
filepath to `classifications.csv`
Default behaviour is not to load the file, hence by default None
classifications_columns : list, optional
Columns required from `classifications.csv`
Default behaviour is not to load the file, hence by default None
delimiter : str, optional
Delimiter for the CSV files, by default ';'
datetime_fmt : str, optional
Format for interpreting the observation datetimes in the CSV files,
by default '%Y-%m-%d %H:%M:%S'
"""
self.timesfits = timesfits
self.get_all_timesfits_columns = get_all_timesfits_columns
self.properties = properties
self.get_all_properties_columns = get_all_properties_columns
self.timesfits_columns = set(timesfits_columns)
self.properties_columns = set(properties_columns)
self.classifications = classifications
self.classifications_columns = classifications_columns
self.datetime_fmt = datetime_fmt
self._get_data(delimiter)
def _get_data(self, delimiter: str):
# Reading the Timesfits file
try:
if self.get_all_timesfits_columns:
self.timesfits = pd.read_csv(self.timesfits,
delimiter=delimiter)
else:
self.timesfits = pd.read_csv(self.timesfits,
delimiter=delimiter,
usecols=self.timesfits_columns)
except ValueError:
raise SunpyUserWarning("Sunspotter Object cannot be created."
" Either the Timesfits columns do not match, or the file is corrupted")
if not self.timesfits_columns.issubset(self.timesfits.columns):
missing_columns = self.timesfits_columns - self.timesfits_columns.intersection(self.timesfits.columns)
missing_columns = ", ".join(missing_columns)
raise SunpyUserWarning("Sunspotter Object cannot be created."
" The Timesfits CSV is missing the following columns: " +
missing_columns)
if 'obs_date' in self.timesfits.columns:
self.timesfits.obs_date = pd.to_datetime(self.timesfits.obs_date,
format=self.datetime_fmt)
self.timesfits.set_index("obs_date", inplace=True)
# Reading the Properties file
try:
if self.get_all_properties_columns:
self.properties = pd.read_csv(self.properties,
delimiter=delimiter)
else:
self.properties = pd.read_csv(self.properties,
delimiter=delimiter,
usecols=self.properties_columns)
except ValueError:
raise SunpyUserWarning("Sunspotter Object cannot be created."
" Either the Properties columns do not match, or the file is corrupted")
if not self.properties_columns.issubset(self.properties.columns):
missing_columns = self.properties_columns - self.properties_columns.intersection(self.properties.columns)
missing_columns = ", ".join(missing_columns)
raise SunpyUserWarning("Sunspotter Object cannot be created."
" The Properties CSV is missing the following columns: " +
missing_columns)
if '#id' in self.properties.columns:
self.properties.set_index("#id", inplace=True)
# Reading the Classification file
if self.classifications is not None:
if self.classifications_columns is None:
raise SunpyUserWarning("Classifications columns cannot be None"
" when classifications.csv is to be loaded.")
try:
self.classifications = pd.read_csv(self.classifications,
delimiter=delimiter,
usecols=self.classifications_columns)
except ValueError:
raise SunpyUserWarning("Sunspotter Object cannot be created."
" Either the Classifications columns do not match, or the file is corrupted")
self.classifications_columns = set(self.classifications_columns)
if not self.classifications_columns.issubset(self.classifications.columns):
missing_columns = self.classifications_columns - self.classifications_columns.intersection(self.classifications.columns)
missing_columns = ", ".join(missing_columns)
raise SunpyUserWarning("Sunspotter Object cannot be created."
" The Classifications CSV is missing the following columns: " +
missing_columns)
def get_timesfits_id(self, obsdate: str):
"""
Returns the Sunspotter observation id for the
first observation a given observation date and time.
Parameters
----------
obsdate : str
The observation time and date.
Returns
-------
id : int
The Sunspotter observation id for the first observation
for the given observation date and time.
Examples
--------
>>> from pythia.seo import Sunspotter
>>> sunspotter = Sunspotter()
>>> obsdate = '2000-01-01 12:47:02'
>>> sunspotter.get_timesfits_id(obsdate)
1
"""
obsdate = self.get_nearest_observation(obsdate)
return self.timesfits.loc[obsdate].get(key='#id').iloc[0]
def get_all_ids_for_observation(self, obsdate: str):
"""
Returns all the Sunspotter observation ids for the
given observation date and time.
Parameters
----------
obsdate : str
The observation time and date.
Returns
-------
ids : pandas.Series
All the Sunspotter observation ids for the
given observation date and time.
Examples
--------
>>> from pythia.seo import Sunspotter
>>> sunspotter = Sunspotter()
>>> obsdate = '2000-01-01 12:47:02'
>>> sunspotter.get_all_ids_for_observation(obsdate)
array([1, 2, 3, 4, 5])
"""
obsdate = self.get_nearest_observation(obsdate)
return self.timesfits.loc[obsdate].get(key='#id').values
def get_properties(self, idx: int):
"""
Returns the observed properties for a given Sunspotter id.
Parameters
----------
idx : int
The Sunspotter observation id for a particualar observation.
Returns
-------
properties : pandas.Series
The observed properties for the given Sunspotter id.
Examples
--------
>>> from pythia.seo import Sunspotter
>>> sunspotter = Sunspotter()
>>> idx = 0
>>> sunspotter.get_properties(idx)
filename 530be1183ae74079c300000d.jpg
zooniverse_id ASZ000090y
angle 37.8021
area 34400
areafrac 0.12
areathesh 2890
bipolesep 3.72
c1flr24hr 0
id_filename 1
flux 2.18e+22
fluxfrac 0.01
hale beta
hcpos_x 452.27
hcpos_y 443.93
m1flr12hr 0
m5flr12hr 0
n_nar 1
noaa 8809
pxpos_x 229.193
pxpos_y 166.877
sszn 1
zurich bxo
Name: 1, dtype: object
"""
return self.properties.loc[idx]
def get_properties_from_obsdate(self, obsdate: str):
"""
Returns the observed properties for a given observation time and date.
Parameters
----------
obsdate : str
The observation time and date.
Returns
-------
properties : pandas.DataFrame
The observed properties for the given observation time and date.
Examples
--------
>>> from pythia.seo import Sunspotter
>>> sunspotter = Sunspotter()
>>> obsdate = '2000-01-01 12:47:02'
>>> sunspotter.get_properties_from_obsdate(obsdate)
filename 530be1183ae74079c300000d.jpg
zooniverse_id ASZ000090y
angle 37.8021
area 34400
areafrac 0.12
areathesh 2890
bipolesep 3.72
c1flr24hr 0
id_filename 1
flux 2.18e+22
fluxfrac 0.01
hale beta
hcpos_x 452.27
hcpos_y 443.93
m1flr12hr 0
m5flr12hr 0
n_nar 1
noaa 8809
pxpos_x 229.193
pxpos_y 166.877
sszn 1
zurich bxo
Name: 1, dtype: object
[1 rows x 23 columns]
"""
return self.get_properties(self.get_timesfits_id(obsdate))
def number_of_observations(self, obsdate: str):
"""
Returns number of Sunspotter observations for the
given observation date and time.
Parameters
----------
obsdate : str
The observation time and date.
Returns
-------
number_of_observations : int
Number of Sunspotter observations
for the given observation date and time.
Examples
--------
>>> from pythia.seo import Sunspotter
>>> sunspotter = Sunspotter()
>>> obsdate = '2000-01-01 12:47:02'
>>> sunspotter.number_of_observations(obsdate)
5
"""
return self.timesfits.loc[obsdate].shape[0]
def get_nearest_observation(self, obsdate: str):
"""
Returns the observation time and date in the Timesfits that is
closest to the given observation time and date.
Parameters
----------
obsdate : str
The observation time and date.
Returns
-------
closest_observation : str
Observation time and date in the Timesfits that is
closest to the given observation time and date.
Examples
--------
>>> from pythia.seo import Sunspotter
>>> sunspotter = Sunspotter()
>>> obsdate = '2000-01-01 22:47:02'
>>> sunspotter.get_nearest_observation(obsdate)
'2000-01-01 12:47:02'
"""
unique_dates = self.timesfits.index.unique()
index = unique_dates.get_loc(obsdate, method='nearest')
nearest_date = str(unique_dates[index])
if nearest_date != str(obsdate): # casting to str because obsdate can be a pandas.Timestamp
warnings.warn(SunpyUserWarning("The given observation date isn't in the Timesfits file.\n"
"Using the observation nearest to the given obsdate instead."))
return nearest_date
def get_all_observations_ids_in_range(self, start: str, end: str):
"""
Returns all the observations ids in the given timerange.
The nearest start and end time in the Timesfits are used
to form the time range.
Parameters
----------
start : str
The starting observation time and date.
end : str
The ending observation time and date.
Returns
-------
ids : numpy.array
All the Sunspotter observation ids for the
given observation time range.
Examples
--------
>>> from pythia.seo import Sunspotter
>>> sunspotter = Sunspotter()
>>> start = '2000-01-02 12:51:02'
>>> end = '2000-01-03 12:51:02'
>>> sunspotter.get_all_observations_ids_in_range(start, end)
array([ 6, 7, 8, 9, 10, 11, 12, 13])
"""
start = self.get_nearest_observation(start)
end = self.get_nearest_observation(end)
return self.timesfits[start:end]['#id'].values
def get_fits_filenames_from_range(self, start: str, end: str):
"""
Returns all the FITS filenames for observations in the given timerange.
The nearest start and end time in the Timesfits are used to form the
time range.
Parameters
----------
start : str
The starting observation time and date.
end : str
The ending observation time and date.
Returns
-------
filenames : pandas.Series
all the FITS filenames for observations in the given timerange.
Notes
-----
If start time is equal to end time, all the filenames corresponding to
that particular observation will be returned.
Examples
--------
>>> from pythia.seo import Sunspotter
>>> sunspotter = Sunspotter()
>>> start = '2000-01-02 12:51:02'
>>> end = '2000-01-03 12:51:02'
>>> sunspotter.get_fits_filenames_from_range(start, end)
obs_date
2000-01-02 12:51:02 20000102_1251_mdiB_1_8810.fits
2000-01-02 12:51:02 20000102_1251_mdiB_1_8813.fits
2000-01-02 12:51:02 20000102_1251_mdiB_1_8814.fits
2000-01-02 12:51:02 20000102_1251_mdiB_1_8815.fits
2000-01-03 12:51:02 20000103_1251_mdiB_1_8810.fits
2000-01-03 12:51:02 20000103_1251_mdiB_1_8813.fits
2000-01-03 12:51:02 20000103_1251_mdiB_1_8814.fits
2000-01-03 12:51:02 20000103_1251_mdiB_1_8815.fits
Name: filename, dtype: object
"""
ids_in_range = self.get_all_observations_ids_in_range(start, end)
return self.timesfits[self.timesfits['#id'].isin(ids_in_range)]['filename']
def get_mdi_fulldisk_fits_file(self, obsdate: str, filepath: str = str(path) + "/fulldisk/"):
"""
Downloads the MDI Fulldisk FITS file corresponding to a particular observation.
Parameters
----------
obsdate : str
The observation time and date.
filepath : mdi_mapsequence : sunpy.map.MapSequence,
By default downloaded files are stored in `~pythia/data/fulldisk`
Returns
-------
filepath : str
Filepath to the downloaded FITS file.
Examples
--------
>>> from pythia.seo import Sunspotter
>>> sunspotter = Sunspotter()
>>> obsdate = '2000-01-01 12:47:02'
>>> sunspotter.get_mdi_fulldisk_fits_file(obsdate)
'~pythia/data/all_clear/fulldisk/fd_m_96m_01d_2556_0008.fits'
"""
# TODO: Figure out a way to test the downloaded file.
obsdate = self.get_nearest_observation(obsdate)
search_results = Fido.search(a.Time(obsdate, obsdate), a.Instrument.mdi)
downloaded_file = Fido.fetch(search_results, path=filepath)
return downloaded_file[0]
def get_mdi_fulldisk_map(self, obsdate: str, filepath: str = str(path) + "/fulldisk/"):
"""
Downloads the MDI Fulldisk FITS file corresponding to a particular observation.
And returns a SunPy Map corresponding to the downloaded file.
Parameters
----------
obsdate : str
The observation time and date.
filepath : mdi_mapsequence : sunpy.map.MapSequence,
By default downloaded files are stored in `~pythia/data/fulldisk`
Returns
-------
filepath : str
Filepath to the downloaded FITS file.
Examples
--------
>>> from pythia.seo import Sunspotter
>>> sunspotter = Sunspotter()
>>> obsdate = '2000-01-01 12:47:02'
>>> sunspotter.get_mdi_fulldisk_map(obsdate)
<sunpy.map.sources.soho.MDIMap object at 0x7f6ca7aedc88>
SunPy Map
---------
Observatory: SOHO
Instrument: MDI
Detector: MDI
Measurement: magnetogram
Wavelength: 0.0 Angstrom
Observation Date: 2000-01-01 12:47:02
Exposure Time: 0.000000 s
Dimension: [1024. 1024.] pix
Coordinate System: helioprojective
Scale: [1.98083342 1.98083342] arcsec / pix
Reference Pixel: [511.36929067 511.76453018] pix
Reference Coord: [0. 0.] arcsec
array([[nan, nan, nan, ..., nan, nan, nan],
[nan, nan, nan, ..., nan, nan, nan],
[nan, nan, nan, ..., nan, nan, nan],
...,
[nan, nan, nan, ..., nan, nan, nan],
[nan, nan, nan, ..., nan, nan, nan],
[nan, nan, nan, ..., nan, nan, nan]], dtype=float32)
"""
# TODO: Figure out the file naming convention to check if the file has been downloaded already.
# TODO: Test this!
obsdate = self.get_nearest_observation(obsdate)
search_results = Fido.search(a.Time(obsdate, obsdate), a.Instrument.mdi)
downloaded_file = Fido.fetch(search_results, path=filepath)
return Map(downloaded_file[0])
def get_available_obsdatetime_range(self, start: str, end: str):
"""
Returns all the observations datetimes in the given timerange.
The nearest start and end time in the Timesfits are used
to form the time range.
Parameters
----------
start : str
The starting observation time and date.
end : str
The ending observation time and date.
Returns
-------
obs_list : pandas.DatetimeIndex
All the Sunspotter observation datetimes for the
given observation time range.
Examples
--------
>>> from pythia.seo import Sunspotter
>>> sunspotter = Sunspotter()
>>> start = '2000-01-01 12:47:02'
>>> end = '2000-01-15 12:47:02'
>>> sunspotter.get_available_obsdatetime_range(start, end)
DatetimeIndex(['2000-01-01 12:47:02', '2000-01-02 12:51:02',
'2000-01-03 12:51:02', '2000-01-04 12:51:02',
'2000-01-05 12:51:02', '2000-01-06 12:51:02',
'2000-01-11 12:51:02', '2000-01-12 12:51:02',
'2000-01-13 12:51:02', '2000-01-14 12:47:02',
'2000-01-15 12:47:02'],
dtype='datetime64[ns]', name='obs_date', freq=None)
"""
start = self.get_nearest_observation(start)
end = self.get_nearest_observation(end)
return self.timesfits[start: end].index.unique()
def get_mdi_map_sequence(self, start: str, end: str, filepath: str = str(path) + "/fulldisk/"):
"""
Get MDI Map Sequence for observations from given range.
Parameters
----------
start : str
The starting observation time and date.
end : str
The ending observation time and date.
filepath : str, optional
[description], by default str(path)+"/fulldisk/"
Returns
-------
mdi_mapsequence : sunpy.map.MapSequence
Map Sequece of the MDI maps in the given range.
Examples
--------
>>> from pythia.seo import Sunspotter
>>> sunspotter = Sunspotter()
>>> start = '2000-01-01 12:47:02'
>>> end = '2000-01-05 12:51:02'
>>> sunspotter.get_mdi_map_sequence(start, end)
<sunpy.map.mapsequence.MapSequence object at 0x7f2c7b85cda0>
MapSequence of 5 elements, with maps from MDIMap
"""
# TODO: Test this!
obsrange = self.get_available_obsdatetime_range(start, end)
maplist = []
for obsdate in obsrange:
maplist.append(self.get_mdi_fulldisk_map(obsdate, filepath))
return MapSequence(maplist)
def get_observations_from_hek(self, obsdate: str, event_type: str = 'AR',
observatory: str = 'SOHO'):
"""
Gets the observation metadata from HEK for the given obsdate.
By default gets Active Region data recieved from SOHO.
Parameters
----------
obsdate : str
The observation time and date.
event_type : str, optional
The type of Event, by default 'AR'
observatory : str, optional
Observatory that observed the Event, by default 'SOHO'
Returns
-------
results = sunpy.hek.HEKTable
The table of results recieved from HEK.
Examples
--------
>>> from pythia.seo import Sunspotter
>>> sunspotter = Sunspotter()
>>> obsdate = '2000-01-01 12:47:02'
>>> sunspotter.get_observations_from_hek(obsdate)
<HEKTable length=5>
SOL_standard absnetcurrenthelicity ... unsignedvertcurrent
str30 object ... object
------------------------------ --------------------- ... -------------------
SOL2000-01-01T09:35:02L054C117 None ... None
SOL2000-01-01T09:35:02L058C100 None ... None
SOL2000-01-01T09:35:02L333C106 None ... None
SOL2000-01-01T09:35:02L033C066 None ... None
SOL2000-01-01T09:35:02L012C054 None ... None
"""
obsdate = self.get_nearest_observation(obsdate)
client = hek.HEKClient()
result = client.search(hek.attrs.Time(obsdate, obsdate), hek.attrs.EventType(event_type))
obsdate = "T".join(str(obsdate).split())
result = result[result['obs_observatory'] == 'SOHO']
result = result[result['event_starttime'] <= obsdate]
result = result[result['event_endtime'] > obsdate]
return result
def plot_observations(self, obsdate: str, mdi_map: Map = None):
"""
Plots the Active Regions for a given observation on the
MDI map corresponding to that observation.
Parameters
----------
obsdate : str
The observation time and date.
mdi_map : Map, optional
The MDI map corresponding to the given observation,
If None, the Map will be downloaded first.
By default None.
Examples
--------
>>> from pythia.seo import Sunspotter
>>> sunspotter = Sunspotter()
>>> obsdate = '2000-01-01 12:47:02'
>>> sunspotter.plot_observations(obsdate)
"""
obsdate = self.get_nearest_observation(obsdate)
if mdi_map is None:
mdi_map = self.get_mdi_fulldisk_map(obsdate)
hek_result = self.get_observations_from_hek(obsdate)
bottom_left_x = hek_result['boundbox_c1ll']
bottom_left_y = hek_result['boundbox_c2ll']
top_right_x = hek_result['boundbox_c1ur']
top_right_y = hek_result['boundbox_c2ur']
number_of_observations = len(hek_result)
bottom_left_coords = SkyCoord([(bottom_left_x[i], bottom_left_y[i]) * u.arcsec
for i in range(number_of_observations)],
frame=mdi_map.coordinate_frame)
top_right_coords = SkyCoord([(top_right_x[i], top_right_y[i]) * u.arcsec
for i in range(number_of_observations)],
frame=mdi_map.coordinate_frame)
fig = plt.figure(figsize=(12, 10), dpi=100)
mdi_map.plot()
for i in range(number_of_observations):
mdi_map.draw_rectangle(bottom_left_coords[i],
top_right=top_right_coords[i],
color='b', label="Active Regions")
hek_legend, = plt.plot([], color='b', label="Active Regions")
plt.legend(handles=[hek_legend])
plt.show()
| 2.265625 | 2 |
hanjatagger/tagger.py | kaniblu/hanja-tagger | 13 | 12773010 | <gh_stars>10-100
import difflib
from . import hanjaro
from . import compat2unified
from . import zh2hans
def is_chinese_char(c):
return (0x4e00 <= c <= 0x9fff or # CJK Unified Ideographs
0x3400 <= c <= 0x4d8f or # CJK Unified Ideographs Extension A
0x20000 <= c <= 0x2a6df or # CJK Unified Ideographs Extension B
0x2a700 <= c <= 0x2b73f or # CJK Unified Ideographs Extension C
0x2b740 <= c <= 0x2b81f or # CJK Unified Ideographs Extension D
0x2b820 <= c <= 0x2ceaf or # CJK Unified Ideographs Extension E
0xf900 <= c <= 0xfaff or # CJK Compatibility Ideoggraphs
0x2f800 <= c <= 0x2fa1f) # CJK Compatibility Ideographs Supplement
class ChunkedNdiff(object):
def __init__(self, a, b):
self.a, self.b = a, b
self.ret = []
self.buffer = []
self.last = None
def flush(self):
if self.last is not None and self.buffer:
self.ret.append((self.last, "".join(self.buffer)))
self.buffer = []
self.last = None
def diff(self):
for d, _, c in difflib.ndiff(self.a, self.b):
if d != self.last:
self.flush()
self.last = d
self.buffer.append(c)
self.flush()
return self.ret
def chunked_ndiff(a, b):
return ChunkedNdiff(a, b).diff()
class HanjaroTagger(object):
def __init__(self, hanjaro: hanjaro.Hanjaro,
unified_cjk: bool = False,
simplified_han: bool = False):
self.hj = hanjaro
self.unified_cjk = unified_cjk
self.simplified_han = simplified_han
self.compat2unified = None
self.zh2hans = None
if self.unified_cjk:
self.compat2unified = compat2unified.Compat2Unified()
if self.simplified_han:
self.zh2hans = zh2hans.Zh2Hans()
def stratify(self, ko, hj):
ret = []
chunks = chunked_ndiff(ko, hj)
for (d1, chunk1), (d2, chunk2) in zip(chunks, chunks[1:]):
assert d1 != "-" and d2 != "-", \
f"unexpected operator from korean to hanja tag: '{d1}/{d2}'"
if d1 == "+":
continue
assert d1 != d2, \
f"two adjacent chunks cannot be both additions: '{chunk1}/{chunk2}"
assert chunk2[0] == "(" and chunk2[-1] == ")", \
f"additive chunk is not properly surrounded in parentheses: {chunk2}"
cn = chunk2[1:-1]
assert len(cn) <= len(chunk1), \
f"length of hanja must be shorter than "\
f"the Korean chunk: {len(cn)} > {len(chunk1)}"
ret.append(" " * (len(chunk1) - len(cn)) + cn)
assert chunks[-1][0] == " ", \
f"unexpected operator from the last chunk: '{chunks[-1][0]}'"
ret.append(" " * len(chunks[-1][1]))
return "".join(ret)
def tag(self, s):
cn = self.hj.query(s)
tags = self.stratify(s, cn)
if self.unified_cjk:
tags = self.compat2unified.convert(tags)
if self.simplified_han:
tags = self.zh2hans.convert(tags)
return tags
| 2.3125 | 2 |
lib/graph.py | bendmorris/beaver | 2 | 12773011 | <reponame>bendmorris/beaver
from types import BeaverException, Variable, Uri, Value, updated_context
from statement import Statement
from command import Command, PrefixCommand
from parser import parse_string, parse_file, parse_stream
import sys
import urllib2
default = [
PrefixCommand('rdf', Uri('http://www.w3.org/1999/02/22-rdf-syntax-ns#')),
]
class Graph(object):
'''A collection of triples.'''
def __init__(self, verbose=False):
self.verbose = verbose
self.reinit()
def reinit(self):
self.statements = {}
self.prefixes = {}
self.defs = {}
self.base_uri = None
self.execute(default)
def add_stmt(self, stmt):
if self.verbose: print str(stmt)
subj = stmt.subject
#if isinstance(subj, Value): raise BeaverException('Literals are not allowed as RDF subjects.')
for verb, objects in stmt.verb_objects:
#if isinstance(verb, Value): raise BeaverException('Literals are not allowed as RDF predicates.')
for obj in objects:
for x in (subj, verb, obj):
if isinstance(x, Variable):
raise BeaverException('Unresolved variable: %s' % x)
if not subj in self.statements:
self.statements[subj] = {}
if not verb in self.statements[subj]: self.statements[subj][verb] = set()
self.statements[subj][verb].add(obj)
def remove_stmt(self, stmt):
if self.verbose: print '@del %s' % str(stmt)
subj = stmt.subject
for verb, objects in stmt.verb_objects:
for obj in objects:
if not subj in self.statements: return
if not verb in self.statements[subj]: return
try: self.statements[subj][verb].remove(obj)
except KeyError: pass
if len(self.statements[subj][verb]) == 0: del self.statements[subj][verb]
if len(self.statements[subj]) == 0: del self.statements[subj]
def execute(self, stmt, context={}):
if isinstance(stmt, Statement):
replace = stmt.replace(context, self.defs)
if replace:
new_stmt, new_context = replace
context = updated_context(context, new_context)
return self.execute(new_stmt, context)
self.add_stmt(stmt)
elif isinstance(stmt, Command):
replace = stmt.replace(context, self.defs)
if replace:
new_stmt, new_context = replace
context = updated_context(context, new_context)
return self.execute(new_stmt, context)
stmt.execute(self, context)
elif hasattr(stmt, '__iter__'):
for substmt in stmt:
self.execute(substmt, context)
return
else:
raise BeaverException('Unrecognized statement: %s' % stmt)
def uri(self, uri):
if hasattr(uri, 'prefix'):
try:
base = self.prefixes[uri.prefix]
except KeyError:
raise BeaverException('Prefix %s is not defined.' % uri.prefix)
else: base = ''
return Uri(base + uri.url)
def parse(self, filename=None, text=None, stream=None):
if filename:
parsed = parse_file(filename)
elif text:
parsed = parse_string(text)
elif stream:
parsed = parse_stream(stream)
else:
raise BeaverException('Must specify filename, text, or stream to parse.')
stmts = 0
for stmt in parsed:
stmts += 1
self.execute(stmt)
return stmts
def draw(self, filename, use='pydot'):
if use=='pygraphviz':
try: import pygraphviz as pgv
except ImportError: raise BeaverException('pygraphviz is required to draw graphs.')
graph = pgv.AGraph(overlap=False, strict=False)
def format_label(s):
if isinstance(s, Uri):
s = s.apply_prefix(self)
s = str(s)
for lchar, rchar in ["<>", '""', "''"]:
if s.startswith(lchar) and s.endswith(rchar): s = s[1:-1]
return s
nodes = set()
def new_node(s):
node = format_label(s)
if not node in nodes:
graph.add_node(node)
nodes.add(node)
for s in self.statements:
new_node(s)
for v, objs in self.statements[s].items():
for o in objs:
new_node(o)
graph.add_edge(format_label(s), format_label(o), label=format_label(v), dir='forward')
graph.layout(prog='dot')
graph.draw(filename)
elif use=='pydot':
try: import pydot
except ImportError: raise BeaverException('pydot is required to draw graphs.')
graph = pydot.Dot(graph_type='digraph')
def format_node_name(s):
s = str(s)
for lchar, rchar in ["<>", '""', "''"]:
if s.startswith(lchar) and s.endswith(rchar): s = s[1:-1]
bad_chars = "<>'\":"
for char in bad_chars:
s = s.replace(char, '')
return 'x%s' % s
def format_label(s):
if isinstance(s, Uri):
s = s.apply_prefix(self)
s = str(s)
for lchar, rchar in ["<>", "''"]:
if s.startswith(lchar) and s.endswith(rchar): s = s[1:-1]
if (s.startswith('"') and s.endswith('"')):
s = '"\\"%s\\""' % s[1:-1]
else:
s = '"%s"' % s
return s
nodes = set()
def new_node(s):
node = format_node_name(s)
label = format_label(s)
if not node in nodes:
graph.add_node(pydot.Node(node, label=label))
nodes.add(node)
for s in self.statements:
new_node(s)
for v, objs in self.statements[s].items():
for o in objs:
new_node(o)
graph.add_edge(pydot.Edge(format_node_name(s), format_node_name(o), label=format_label(v)))
img_format = filename.split('.')[-1]
graph.write(filename, format=img_format)
def write(self, filename=None):
if filename is None:
handle = sys.stdout
else:
try:
handle = urllib2.urlopen(filename, 'w')
except ValueError:
handle = open(filename, 'w')
if self.prefixes:
handle.write('\n'.join(['@prefix %s: %s' % (key, value) for key, value in self.prefixes.items()]))
handle.write('\n\n')
newline = False
for subj in self.statements:
if newline: handle.write('\n')
else: newline = True
semicolon = False
for verb in self.statements[subj]:
if semicolon: s = ' ;\n '
else: s = subj.apply_prefix(self); semicolon = True
v = verb.apply_prefix(self)
if v == 'rdf:type': v = 'a'
objs = self.statements[subj][verb]
o = ', '.join([str(obj.apply_prefix(self)) for obj in objs])
handle.write('%s %s %s' % (s, v, o))
handle.write(' .\n')
| 2.328125 | 2 |
processes/playout.py | Julian-Theis/AVATAR | 7 | 12773012 | <gh_stars>1-10
import os
import numpy as np
import argparse
from pm4py.objects.petri.importer import pnml as pnml_importer
from pm4py.algo.simulation.playout.versions import basic_playout as playout
from util.playout import readVariantFile, getMaxVariantLength
from conf.settings import DATA_PATH
WORK_PATH = os.path.abspath(os.getcwd())
def intersection(lst1, lst2):
ls1 = []
for i in lst1:
ls1.append(str(i))
ls2 = []
for i in lst2:
ls2.append(str(i))
return list(set(ls1) & set(ls2))
def writeToFile(file, lst):
with open(file, 'w') as outfile:
traces = set()
for entry in lst:
trace = ""
for index, ev in enumerate(entry):
e = str(ev['concept:name'])
e = e[0:-1]
if index == 0:
trace = trace + str(e)
else:
trace = trace + " " + str(e)
traces.add(trace)
for trace in traces:
outfile.write(trace)
outfile.write("\n")
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--system', help='Which system? pb_system_5_3 for example', required=True)
parser.add_argument('-e', '--eval_only', help='Evaluation only (Boolean)', required=True)
parser.add_argument('-pn', '--pn', help='Petri net filename (for example fodina_pb_system_5_3_train.txt_0.4_1.0_0.4_true.pnml)', required=True)
parser.add_argument('-traces', '--traces', help='Max number of traces to generate (default: 1,000,000)', default=1000000)
args = parser.parse_args()
system = args.system
pn = args.pn
n_traces = int(args.traces)
eval_only = str2bool(args.eval_only)
if DATA_PATH is None:
f_pop = os.path.join(WORK_PATH, "data", "variants", str(system) + "_pop.txt")
f_train = os.path.join(WORK_PATH, "data", "variants", str(system) + "_train.txt")
f_test = os.path.join(WORK_PATH, "data", "variants", str(system) + "_test.txt")
f_out = os.path.join(WORK_PATH, "data", "variants", pn + ".txt")
f_pn = os.path.join(WORK_PATH, "data", "pns", system, pn)
else:
f_pop = os.path.join(DATA_PATH, "variants", str(system) + "_pop.txt")
f_train = os.path.join(DATA_PATH, "variants", str(system) + "_train.txt")
f_test = os.path.join(DATA_PATH, "variants", str(system) + "_test.txt")
f_out = os.path.join(DATA_PATH, "variants", pn + ".txt")
f_pn = os.path.join(DATA_PATH, "pns", system, pn)
seq_len = getMaxVariantLength(f_pop)
n_decimal = 8
if eval_only:
print("*** Variant Evaluation of " + system + " using " + pn + " ***")
else:
print("*** Playout Variants of " + system + " and Evaluation using " + pn + " ***")
print("Maximum Variant Length is:", str(seq_len))
if not eval_only:
net, initial_marking, final_marking = pnml_importer.import_net(f_pn)
out = playout.apply(net, initial_marking, parameters={"noTraces": n_traces, "maxTraceLength" : seq_len-1})
writeToFile(f_out, out)
train = readVariantFile(f_train, unique=True)
test = readVariantFile(f_test, unique=True)
pop = readVariantFile(f_pop, unique=True)
gen = readVariantFile(f_out, unique=True)
new_train = []
for i in train:
new_train.append([x.lower() for x in i])
train = new_train
new_test = []
for i in test:
new_test.append([x.lower() for x in i])
test = new_test
new_pop = []
for i in pop:
new_pop.append([x.lower() for x in i])
pop = new_pop
new_gen = []
for i in gen:
new_gen.append([x.lower() for x in i])
gen = new_gen
total_gen_samples = len(gen)
cnt_true = 0
labeled = []
for sample in gen:
label = 0
if sample in pop:
cnt_true = cnt_true + 1
label = 1
string = ""
for i in sample:
string = string + " " + i
string = string + "," + str(label)
labeled.append(string)
print("** EVALUATION **")
print("# System Variants:", len(pop))
print("Approximated # System Variants:", total_gen_samples)
print("TP:", np.round(cnt_true / total_gen_samples, n_decimal))
print("TP_system:",
np.round(len(intersection(gen, pop)) / len(pop), n_decimal))
print("TP_observed:",
np.round(len(intersection(gen, train)) / len(train), n_decimal))
print("TP_unobserved:",
np.round(len(intersection(gen, test)) / len(test), n_decimal)) | 2.21875 | 2 |
clases/duodecimasegunda.py | PythonCisco/clase | 0 | 12773013 |
"""
"""
import turtle
class Pluma():
def __init__(self, color, grosor):
self.color = color
self.grosor = grosor
self.posicion = [0, 0]
self.previa = self.posicion
def mueve(self, x, y):
self.previa = self.posicion
self.posicion = [x, y]
turtle.goto(*self.posicion)
def regresa(self):
self.posicion, self.previa = self.previa, self.posicion
turtle.goto(*self.posicion)
# añade a la clase Pluma una función llamada 'regresa' que mueva la pluma a su previa posición
| 3.734375 | 4 |
src/airfly/_vendor/airflow/providers/google/cloud/transfers/bigquery_to_bigquery.py | ryanchao2012/airfly | 7 | 12773014 | <gh_stars>1-10
# Auto generated by 'inv collect-airflow'
from airfly._vendor.airflow.models.baseoperator import BaseOperator
class BigQueryToBigQueryOperator(BaseOperator):
source_project_dataset_tables: "typing.Union[typing.List[str], str]"
destination_project_dataset_table: "str"
write_disposition: "str"
create_disposition: "str"
gcp_conn_id: "str"
bigquery_conn_id: "typing.Union[str, NoneType]"
delegate_to: "typing.Union[str, NoneType]"
labels: "typing.Union[typing.Dict, NoneType]"
encryption_configuration: "typing.Union[typing.Dict, NoneType]"
location: "typing.Union[str, NoneType]"
impersonation_chain: "typing.Union[str, typing.Sequence[str], NoneType]"
| 1.929688 | 2 |
labs/queryset/aggregation.py | DhiaTN/djangolab | 7 | 12773015 | from django.db.models import Max, Avg, Sum, F
from labs.common.models import Registration
def highest_discount(event_id=1):
"""
>>> SELECT MAX(registration.discount) AS max_discount FROM registration
WHERE registration.event_id = event_id
"""
registrations = Registration.objects.filter(event_id=event_id)
max_dicount = registrations.aggregate(max_discount=Max('discount'))
print(max_dicount)
def total_sold_tickets(event_id=1):
"""
>>> SELECT SUM(registration.ticket) AS total_ticket FROM registration
WHERE registration.event_id = event_id
"""
registrations = Registration.objects.filter(event_id=event_id)
max_dicount = registrations.aggregate(total_ticket=Sum('ticket'))
print(max_dicount)
def average_tickets_per_registration():
"""
>>> SELECT AVG(registration.ticket) AS avg_ticket FROM registration
"""
avg_ticket = Registration.objects.aggregate(avg_ticket=Avg('ticket'))
print(avg_ticket)
def registration_tickets_price():
"""
>>> SELECT ..., (registration.ticket * ((100 - registration.discount) * event.ticket_price) / 100) AS price
FROM registration INNER JOIN event ON (registration.event_id = event.id)
INNER JOIN member ON (registration.member_id = member.id)
"""
tickets_price = F('ticket') * (100 - F('discount')) * F('event__ticket_price') / 100
registration_list = Registration.objects.annotate(
price=tickets_price).select_related()
for r in registration_list:
print("{0} pays {1} for {2} tickt(s) with {3}% discount".format(
r.member, r.price, r.ticket, r.discount))
def event_income():
"""
>>> SELECT event.name, SUM((((registration.ticket * (100 - registration.discount)) * event.ticket_price) / 100)) AS price
FROM registration INNER JOIN event ON (registration.event_id = event.id) GROUP BY event.name
"""
tickets_price = F('ticket') * (100 - F('discount')) * F('event__ticket_price') / 100
registration_list = Registration.objects.values('event__name')
# ==> group by event
events_income = registration_list.annotate(income=Sum(tickets_price))
for e in events_income:
print("{event__name} reaches {income}$ as an income".format(**e))
| 2.578125 | 3 |
day19.py | alexrudy/AoC2018 | 0 | 12773016 | <filename>day19.py
# Hand decompiled day 19 part 2
a,b,c,d,e,f = 1,0,0,0,0,0
c += 2
c *= c
c *= 209
b += 2
b *= 22
b += 7
c += b
if a == 1:
b = 27
b *= 28
b += 29
b *= 30
b *= 14
b *= 32
c += b
a = 0
for d in range(1, c+1):
if c % d == 0:
a += d
print(a) | 3.234375 | 3 |
SEResNet/Res_block.py | Yfyangd/TaiwanTree | 0 | 12773017 | <gh_stars>0
from keras.models import Model
from keras.layers import *
from keras.regularizers import l2
from keras import backend as K
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.utils import to_categorical
from keras import layers, Input, models
import tensorflow as tf
def resnet_block(input, filters, k=1, strides=(1, 1)):
init = input
channel_axis = -1
x = BatchNormalization(axis=channel_axis)(input)
x = Activation('relu')(x)
if strides != (1, 1) or init._keras_shape[channel_axis] != filters * k:
init = Conv2D(filters * k, (1, 1), padding='same', kernel_initializer='he_normal',
use_bias=False, strides=strides)(x)
x = Conv2D(filters * k, (3, 3), padding='same', kernel_initializer='he_normal',
use_bias=False, strides=strides)(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
x = Conv2D(filters * k, (3, 3), padding='same', kernel_initializer='he_normal',
use_bias=False)(x)
# squeeze and excite block
x = squeeze_excite_block(x)
m = add([x, init])
return m | 2.484375 | 2 |
playlist_compare/app.py | devjeffersonsilveira/playlist-compare | 0 | 12773018 | from flask import Flask, request, json
from playlist_compare import playlistService, helloService, searchService
app = Flask(__name__)
@app.route("/")
def helloRoute():
data = helloService.hello()
return json.jsonify(data)
@app.route("/list")
def listAll():
token = request.args.get("token")
username = request.args.get("username")
data = playlistService.getAll(token, username)
return json.jsonify(data)
@app.route("/listOne")
def listOne():
token = request.args.get("token")
username = request.args.get("username")
playlist = request.args.get("playlist")
data = playlistService.getTracks(token, username, playlist)
return json.jsonify(data)
@app.route("/listDuplicates")
def listDuplicates():
token = request.args.get("token")
username = request.args.get("username")
data = playlistService.getDuplicates(token, username)
return json.jsonify(data)
@app.route("/search")
def searchRoute():
data = searchService.search()
return json.jsonify(data)
| 2.890625 | 3 |
profiles_project/profiles_api/views.py | sanskar1991/local-comms | 0 | 12773019 | <gh_stars>0
from rest_framework.response import Response
from rest_framework import status
from rest_framework import viewsets
from rest_framework.authentication import TokenAuthentication
from rest_framework import filters
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from rest_framework.permissions import IsAuthenticated
from .serializers import UserProfileSerializer, ProfileFeedItemSerializer
from .models import UserProfile, ProfileFeedItem
from .permissions import UpdateOwnProfile, UpdateOwnProfile
class UserProfileViewSets(viewsets.ModelViewSet):
"""Handle creating and updating of user profile"""
serializer_class = UserProfileSerializer
queryset = UserProfile.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (UpdateOwnProfile,)
filter_backends = (filters.SearchFilter,)
search_fields = ('name', 'email',)
class UserLoginApiView(ObtainAuthToken):
"""Handle creating user authentiction tokens"""
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class ProfileFeedItemViewSet(viewsets.ModelViewSet):
"""Handling profile feed items"""
serializer_class = ProfileFeedItemSerializer
authentication_classes = (TokenAuthentication,)
queryset = ProfileFeedItem.objects.all()
permission_classes = (UpdateOwnProfile, IsAuthenticated)
def perform_create(self, serializer):
"""Sets the user profile to logged in user"""
serializer.save(user_profile=self.request.user) | 2.125 | 2 |
Project/ConcurrencyControl/RWLock.py | CDDSCLab/DMCL-2018 | 56 | 12773020 | <reponame>CDDSCLab/DMCL-2018
#-*- coding=utf8 -*-
# @Time :2018-11-27
# @Author :ehds
import threading
import queue
class RWLock:
"""
A simple reader-writer lock Several readers can hold the lock
simultaneously, XOR one writer. Write locks have priority over reads to
prevent write starvation. wake up writer accords to FIFO
"""
def __init__(self):
self.wait_writers_q=queue.Queue()
self.rwlock = 0
self.writers_waiting = 0
self.monitor = threading.RLock()
self.readers_ok = threading.Condition(self.monitor)
def acquire_read(self):
"""Acquire a read lock. Several threads can hold this typeof lock.
It is exclusive with write locks."""
self.monitor.acquire()
while self.rwlock < 0 or self.writers_waiting:
self.readers_ok.wait()
self.rwlock += 1
self.monitor.release()
def acquire_write(self):
"""Acquire a write lock. Only one thread can hold this lock, and
only when no read locks are also held."""
self.monitor.acquire()
while self.rwlock != 0:
self.writers_waiting += 1
writers_ok= threading.Condition(self.monitor)
self.wait_writers_q.put(writers_ok)
writers_ok.wait()
self.writers_waiting -= 1
self.rwlock = -1
self.monitor.release()
def release(self):
"""Release a lock, whether read or write."""
self.monitor.acquire()
if self.rwlock < 0:
self.rwlock = 0
else:
self.rwlock -= 1
wake_writers = self.writers_waiting and self.rwlock == 0
wake_readers = self.writers_waiting == 0
self.monitor.release()
if wake_writers:
# print "wake write..."
writers_ok=self.wait_writers_q.get_nowait()
writers_ok.acquire()
writers_ok.notify()
writers_ok.release()
elif wake_readers:
self.readers_ok.acquire()
self.readers_ok.notifyAll()
self.readers_ok.release() | 3.234375 | 3 |
DialogXL/utils.py | ahsanMuh/DialogXL | 33 | 12773021 | import numpy as np
import torch
def person_embed(speaker_ids, person_vec):
'''
:param speaker_ids: torch.Tensor ( T, B)
:param person_vec: numpy array (num_speakers, 100)
:return:
speaker_vec: torch.Tensor (T, B, D)
'''
speaker_vec = []
for t in speaker_ids:
speaker_vec.append([person_vec[int(i)].tolist() if i != -1 else [0] * 100 for i in t])
speaker_vec = torch.FloatTensor(speaker_vec)
return speaker_vec | 2.703125 | 3 |
seimas/migrations/0031_auto_20190103_2313.py | zinaukarenku/zkr-platform | 2 | 12773022 | # Generated by Django 2.1.4 on 2019-01-03 23:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('seimas', '0030_auto_20190103_2234'),
]
operations = [
migrations.AlterField(
model_name='committee',
name='slug',
field=models.SlugField(unique=True),
),
migrations.AlterField(
model_name='fraction',
name='slug',
field=models.SlugField(unique=True),
),
]
| 1.375 | 1 |
vhserverutils/serverutils.py | CermitDFrog/valheimserverutilities | 0 | 12773023 | <filename>vhserverutils/serverutils.py
#!python3
import shutil
from datetime import datetime
from os import path as ospath
from pathlib import Path
class backup():
"""Utilities for backing up the server and cleaning the directory."""
def __init__(self):
pass
def checkDirSize(self, dirpath, size):
"""Returns True if directory is greater in size than value provided."""
basepath = Path(dirpath)
return sum(ospath.getsize(f) for f in basepath.glob('**/*') if f.is_file()) > size
def getoldest(self, dirpath):
"""Returns the oldest file in the supplied path."""
return min([f for f in Path(dirpath).glob('**/*')], key=ospath.getctime)
def archive(self, worldPath, backupPath):
"""Archives worldpath into backupPath"""
zipName = ospath.join(backupPath, f"VHworlds{datetime.now().strftime('%Y%m%d%H%M%S')}")
shutil.make_archive(zipName, 'zip', base_dir=worldPath)
def deleteold(self, backupPath, maxsize):
"""Checks current size of backupPath, and deletes oldest
files until it is smaller than maxsize"""
maxarch = maxsize * 1048576
oldestfile = (backupPath)
if self.checkDirSize(backupPath, maxarch):
while self.checkDirSize(backupPath, maxarch):
oldestfile = self.getoldest(backupPath)
Path(oldestfile).unlink()
| 2.9375 | 3 |
__init__.py | mattyx17/LenasPostmodernTeam | 0 | 12773024 | from GiftTheCode2018.camp_quality_dashboard import create_camper_history_charts
| 1.09375 | 1 |
generate_regex.py | WilliamBurdett/grim_dawn_regex | 2 | 12773025 | <gh_stars>1-10
from utils.character_management import add_skills_from_classes, extend_skills
from utils.damage_type_parsers import (
check_damage_source,
calculate_damage_types,
add_elemental,
)
from utils.output_messages import retaliation_messages, player_messages, pet_messages, return_char, \
add_both_skills_message, add_convert_types_message
def main():
damage_types_list = ["physical"]
convert_from_types_list = []
classes_list = [
"soldier",
"oathkeeper"
]
skills_list = [
"blitz",
"shattering smash",
"oleron's rage",
]
damage_source = check_damage_source("player")
damage_types_list = calculate_damage_types(damage_types_list)
add_elemental(convert_from_types_list)
add_skills_from_classes(skills_list, classes_list)
extend_skills(skills_list)
skills_list.extend(classes_list)
skills = "|".join(skills_list)
damage_types = "|".join(damage_types_list)
convert_from_types = "|".join(convert_from_types_list)
messages = {"retaliation": retaliation_messages, "player": player_messages, "pet": pet_messages}
output = messages[damage_source](skills, damage_types)
add_both_skills_message(output, classes_list)
add_convert_types_message(output, convert_from_types, damage_types)
for item in output:
message_type = item["message_type"]
message = item["message"]
print(f"\t{message_type}")
print(message)
if __name__ == "__main__":
main()
| 2.421875 | 2 |
spider/python/word.py | ferryhang/spider_job | 322 | 12773026 | import jieba #分词库
import jieba.analyse
import pymongo
import redis
import os
import re
import json
client = pymongo.MongoClient(host="127.0.0.1", port=27017)
db = client['job']
collection = db['position']
data = collection.find({})
text = ""
for item in data:
text += item['body']
pwd = os.path.split(os.path.realpath(__file__))[0]
stopWord = pwd+'/stop.txt'
jieba.analyse.set_stop_words(stopWord)
cut_text= jieba.cut(text)
it_text = dict({})
for x in cut_text:
G = re.match('[a-zA-Z]+',x)
if G:
key = G.group()
keys = map(lambda x: x.lower(), it_text.keys())
if key.lower() in keys:
it_text[key.lower()] += 1
else:
it_text[key.lower()] = 1
with open("word.json","w+",encoding="utf-8") as file:
data = file.write(json.dumps((it_text)))
result= "/".join(cut_text)#必须给个符号分隔开分词结果来形成字符串,否则不能绘制词云
data = jieba.analyse.extract_tags(result.replace('/',''), withWeight=False, allowPOS=())
#print(",".join(data))
| 2.640625 | 3 |
src/xml-python/Converting_XML_to_HTML.py | sudeep0901/python | 0 | 12773027 | from urllib.parse import urlencode, urldefrag, quote, unquote
import requests
# from requests.urllib3 import urlretrieve
import urllib
from urllib.request import urlretrieve
mydict = {
"Name": "<NAME>",
"address": "test address",
"fav char": "<NAME>"
}
strUrl = urlencode(mydict)
print(strUrl)
string = urlencode({"v": "what is your favroute editor, VS Code, atom , sublime"})
print(string)
sdecode = urldefrag(string)
print(sdecode)
qt = quote('Famous Quote:"I think, there I am')
print(qt)
print(unquote(qt))
fd = urllib.request.urlopen("ftp://ftp.oreilly.com")
print(fd.read())
fd.close()
ob = urlretrieve("ftp://ftp.oreilly.com", "menu.txt")
print(ob)
"""
retrieve.py example
"""
def callback(blocknum, blocksize, totalsize):
print("Downloaded " + str((blocknum * blocksize)))
print(" of ", totalsize)
urlretrieve("http://www.example.com/pyxml.xml", "px.xml", callback)
print("Download Complete") | 3.046875 | 3 |
Passphrase Gen Py/pw.py | NicolasFlandrois/My-Mini-Py-Scripts-Training | 0 | 12773028 | <filename>Passphrase Gen Py/pw.py<gh_stars>0
#!#!usr/bin/python3
#################################################################################
# MIT License
#
# Copyright (c) 2019 - <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#################################################################################
#Thu 10 Jan 2019 12:42:24 PM CET - Stradate 96627.31 (STO)
#Author: <NAME>
#Description: Generatin a list of random words, according to length chosen, from an import of words in a text.
#Version: v1.0
import random
liste = []
with open('text.txt') as f:
liste = list(set(item.lower() for item in [
word for line in f for word in line.split()]))
secure_random = random.SystemRandom()
pw =[]
n = int(input("n? "))
for i in range(n):
pw.append(secure_random.choice(liste))
print("pw: ", pw)
#Still to do: Filter and remove none ASCII letters and numbers from source list imported from .txt... Or! Shall we keep it to create special caracters inclusion in passwords?
#Also can transfer the list back into a string for easier readings
| 1.609375 | 2 |
services/anilist/src/player/api/controllers.py | C4T-BuT-S4D/training-17-11-19 | 1 | 12773029 | <filename>services/anilist/src/player/api/controllers.py
import secrets
import ujson
from sanic.exceptions import abort
import storage
async def get_current_user(request):
if 'user' in request:
return request['user']
sess = request.cookies.get('session')
if not sess:
abort(401)
redis = await storage.get_async_redis_pool()
tr = redis.multi_exec()
tr.exists(sess)
tr.get(sess)
user_exists, user = await tr.execute()
if not user_exists:
abort(401)
try:
user = ujson.loads(user.decode())
except (UnicodeDecodeError, ValueError):
abort(401)
request['user'] = user
return user
async def add_user_upload(request):
user = await get_current_user(request)
upload_token = secrets.token_hex(16)
name = request.json.get('name', '')
conn = await storage.get_db_conn()
query = '''INSERT INTO anime_uploads (user_id, name, token) VALUES ($1, $2, $3)'''
await conn.execute(query, user['id'], name, upload_token)
query = '''SELECT * FROM anime_uploads WHERE token=$1'''
result = await conn.fetchrow(query, upload_token)
result = dict(result)
redis = await storage.get_async_redis_pool()
await redis.set(upload_token, ujson.dumps(result))
return upload_token
async def get_upload(upload_token):
redis = await storage.get_async_redis_pool()
upload = await redis.get(upload_token)
if not upload:
return None
return ujson.loads(upload.decode())
async def user_upload_exists_or_404(request, token):
user = await get_current_user(request)
upload = await get_upload(token)
if not upload or upload.get('user_id') != user['id']:
abort(404)
return True
async def get_upload_or_404(token):
upload = await get_upload(token)
if not upload:
abort(404)
return upload
async def get_user_uploads(request):
user = await get_current_user(request)
conn = await storage.get_db_conn()
query = '''SELECT * FROM anime_uploads WHERE user_id = $1'''
rows = await conn.fetch(query, user['id'])
return list(map(dict, rows))
| 2.15625 | 2 |
cloud/src/baseline_cloud/core/json.py | MartinMReed/aws-iot-baseline | 1 | 12773030 | import decimal
import json
import typing
from datetime import datetime
import baseline_cloud.core.date
from baseline_cloud import core
class JSONEncoder(json.JSONEncoder):
def default(self, o: typing.Any) -> typing.Any:
if isinstance(o, datetime):
return core.date.format_utc(o)
if isinstance(o, decimal.Decimal):
return float(o)
return super(JSONEncoder, self).default(o)
| 2.484375 | 2 |
pyziabm/trader2017_r3.py | blakelucey/pyziabm | 35 | 12773031 | <reponame>blakelucey/pyziabm
import random
import numpy as np
class ZITrader(object):
'''
ZITrader generates quotes (dicts) based on mechanical probabilities.
A general base class for specific trader types.
Public attributes: quote_collector
Public methods: none
'''
def __init__(self, name, maxq):
'''
Initialize ZITrader with some base class attributes and a method
quote_collector is a public container for carrying quotes to the exchange
'''
self._trader_id = name # trader id
self._max_quantity = maxq
self.quote_collector = []
self._quote_sequence = 0
def __repr__(self):
return 'Trader({0}, {1})'.format(self._trader_id, self._max_quantity)
def _make_add_quote(self, time, quantity, side, price):
'''Make one add quote (dict)'''
self._quote_sequence += 1
order_id = '%s_%d' % (self._trader_id, self._quote_sequence)
return {'order_id': order_id, 'timestamp': time, 'type': 'add', 'quantity': quantity,
'side': side, 'price': price}
class PennyJumper(ZITrader):
'''
PennyJumper jumps in front of best quotes when possible
Subclass of ZITrader
Public attributes: trader_type, quote_collector (from ZITrader), cancel_collector
Public methods: confirm_trade_local (from ZITrader)
'''
def __init__(self, name, maxq, mpi):
'''
Initialize PennyJumper
cancel_collector is a public container for carrying cancel messages to the exchange
PennyJumper tracks private _ask_quote and _bid_quote to determine whether it is alone
at the inside or not.
'''
ZITrader.__init__(self, name, maxq)
self.trader_type = 'PennyJumper'
self._mpi = mpi
self.cancel_collector = []
self._ask_quote = None
self._bid_quote = None
def __repr__(self):
return 'Trader({0}, {1}, {2}, {3})'.format(self._trader_id, self._max_quantity, self._mpi, self.trader_type)
def _make_cancel_quote(self, q, time):
return {'type': 'cancel', 'timestamp': time, 'order_id': q['order_id'], 'quantity': q['quantity'],
'side': q['side'], 'price': q['price']}
def confirm_trade_local(self, confirm):
'''PJ has at most one bid and one ask outstanding - if it executes, set price None'''
if confirm['side'] == 'buy':
self._bid_quote = None
else:
self._ask_quote = None
def process_signal(self, time, qsignal, q_taker):
'''PJ determines if it is alone at the inside, cancels if not and replaces if there is an available price
point inside the current quotes.
'''
self.quote_collector.clear()
self.cancel_collector.clear()
if qsignal['best_ask'] - qsignal['best_bid'] > self._mpi:
# q_taker > 0.5 implies greater probability of a buy order; PJ jumps the bid
if random.uniform(0,1) < q_taker:
if self._bid_quote: # check if not alone at the bid
if self._bid_quote['price'] < qsignal['best_bid'] or self._bid_quote['quantity'] < qsignal['bid_size']:
self.cancel_collector.append(self._make_cancel_quote(self._bid_quote, time))
self._bid_quote = None
if not self._bid_quote:
price = qsignal['best_bid'] + self._mpi
side = 'buy'
q = self._make_add_quote(time, self._max_quantity, side, price)
self.quote_collector.append(q)
self._bid_quote = q
else:
if self._ask_quote: # check if not alone at the ask
if self._ask_quote['price'] > qsignal['best_ask'] or self._ask_quote['quantity'] < qsignal['ask_size']:
self.cancel_collector.append(self._make_cancel_quote(self._ask_quote, time))
self._ask_quote = None
if not self._ask_quote:
price = qsignal['best_ask'] - self._mpi
side = 'sell'
q = self._make_add_quote(time, self._max_quantity, side, price)
self.quote_collector.append(q)
self._ask_quote = q
else: # spread = mpi
if self._bid_quote: # check if not alone at the bid
if self._bid_quote['price'] < qsignal['best_bid'] or self._bid_quote['quantity'] < qsignal['bid_size']:
self.cancel_collector.append(self._make_cancel_quote(self._bid_quote, time))
self._bid_quote = None
if self._ask_quote: # check if not alone at the ask
if self._ask_quote['price'] > qsignal['best_ask'] or self._ask_quote['quantity'] < qsignal['ask_size']:
self.cancel_collector.append(self._make_cancel_quote(self._ask_quote, time))
self._ask_quote = None
class Taker(ZITrader):
'''
Taker generates quotes (dicts) based on take probability.
Subclass of ZITrader
Public attributes: trader_type, quote_collector (from ZITrader)
Public methods: process_signal
'''
def __init__(self, name, maxq):
ZITrader.__init__(self, name, maxq)
self.trader_type = 'Taker'
def __repr__(self):
return 'Trader({0}, {1}, {2})'.format(self._trader_id, self._max_quantity, self.trader_type)
def process_signal(self, time, q_taker):
'''Taker buys or sells with 50% probability.'''
self.quote_collector.clear()
if random.uniform(0,1) < q_taker: # q_taker > 0.5 implies greater probability of a buy order
price = 2000000 # agent buys at max price (or better)
side = 'buy'
else:
price = 0 # agent sells at min price (or better)
side = 'sell'
q = self._make_add_quote(time, self._max_quantity, side, price)
self.quote_collector.append(q)
class Provider(ZITrader):
'''
Provider generates quotes (dicts) based on make probability.
Subclass of ZITrader
Public attributes: trader_type, quote_collector (from ZITrader), cancel_collector, local_book
Public methods: confirm_cancel_local, confirm_trade_local, process_signal, bulk_cancel
'''
def __init__(self, name, maxq, mpi, delta):
'''Provider has own mpi and delta; a local_book to track outstanding orders and a
cancel_collector to convey cancel messages to the exchange.
'''
ZITrader.__init__(self, name, maxq)
self.trader_type = 'Provider'
self._mpi = mpi
self._delta = delta
self.local_book = {}
self.cancel_collector = []
def __repr__(self):
return 'Trader({0}, {1}, {2})'.format(self._trader_id, self._max_quantity, self.trader_type)
def _make_cancel_quote(self, q, time):
return {'type': 'cancel', 'timestamp': time, 'order_id': q['order_id'], 'quantity': q['quantity'],
'side': q['side'], 'price': q['price']}
def confirm_cancel_local(self, cancel_dict):
del self.local_book[cancel_dict['order_id']]
def confirm_trade_local(self, confirm):
to_modify = self.local_book.get(confirm['order_id'], "WTF???")
if confirm['quantity'] == to_modify['quantity']:
self.confirm_cancel_local(to_modify)
else:
self.local_book[confirm['order_id']]['quantity'] -= confirm['quantity']
def bulk_cancel(self, time):
'''bulk_cancel cancels _delta percent of outstanding orders'''
self.cancel_collector.clear()
lob = len(self.local_book)
if lob > 0:
order_keys = list(self.local_book.keys())
orders_to_delete = np.random.ranf(lob)
for idx in range(lob):
if orders_to_delete[idx] < self._delta:
self.cancel_collector.append(self._make_cancel_quote(self.local_book.get(order_keys[idx]), time))
def process_signal(self, time, qsignal, q_provider, lambda_t):
'''Provider buys or sells with probability related to q_provide'''
self.quote_collector.clear()
if np.random.uniform(0,1) < q_provider:
price = self._choose_price_from_exp('bid', qsignal['best_ask'], lambda_t)
side = 'buy'
else:
price = self._choose_price_from_exp('ask', qsignal['best_bid'], lambda_t)
side = 'sell'
q = self._make_add_quote(time, self._max_quantity, side, price)
self.local_book[q['order_id']] = q
self.quote_collector.append(q)
def _choose_price_from_exp(self, side, inside_price, lambda_t):
'''Prices chosen from an exponential distribution'''
# make pricing explicit for now. Logic scales for other mpi.
plug = np.int(lambda_t*np.log(np.random.rand()))
if side == 'bid':
#price = np.int(5*np.floor((inside_price-1-plug)/5))
price = inside_price-1-plug
else:
#price = np.int(5*np.ceil((inside_price+1+plug)/5))
price = inside_price+1+plug
return price
class Provider5(Provider):
'''
Provider5 generates quotes (dicts) based on make probability.
Subclass of Provider
'''
def __init__(self, name, maxq, mpi, delta):
'''Provider has own mpi and delta; a local_book to track outstanding orders and a
cancel_collector to convey cancel messages to the exchange.
'''
Provider.__init__(self, name, maxq, mpi, delta)
def _choose_price_from_exp(self, side, inside_price, lambda_t):
'''Prices chosen from an exponential distribution'''
# make pricing explicit for now. Logic scales for other mpi.
plug = np.int(lambda_t*np.log(np.random.rand()))
if side == 'bid':
price = np.int(5*np.floor((inside_price-1-plug)/5))
else:
price = np.int(5*np.ceil((inside_price+1+plug)/5))
return price
class MarketMaker(Provider):
'''
MarketMaker generates a series of quotes near the inside (dicts) based on make probability.
Subclass of Provider
Public attributes: trader_type, quote_collector (from ZITrader), cancel_collector (from Provider),
cash_flow_collector
Public methods: confirm_cancel_local (from Provider), confirm_trade_local, process_signal
'''
def __init__(self, name, maxq, mpi, delta, num_quotes, quote_range):
'''_num_quotes and _quote_range determine the depth of MM quoting;
_position and _cashflow are stored MM metrics
'''
Provider.__init__(self, name, maxq, mpi, delta)
self.trader_type = 'MarketMaker'
self._num_quotes = num_quotes
self._quote_range = quote_range
self._position = 0
self._cash_flow = 0
self.cash_flow_collector = []
def __repr__(self):
return 'Trader({0}, {1}, {2}, {3})'.format(self._trader_id, self._max_quantity, self.trader_type, self._num_quotes)
def confirm_trade_local(self, confirm):
'''Modify _cash_flow and _position; update the local_book'''
if confirm['side'] == 'buy':
self._cash_flow -= confirm['price']*confirm['quantity']
self._position += confirm['quantity']
else:
self._cash_flow += confirm['price']*confirm['quantity']
self._position -= confirm['quantity']
to_modify = self.local_book.get(confirm['order_id'], "WTF???")
if confirm['quantity'] == to_modify['quantity']:
self.confirm_cancel_local(to_modify)
else:
self.local_book[confirm['order_id']]['quantity'] -= confirm['quantity']
self._cumulate_cashflow(confirm['timestamp'])
def _cumulate_cashflow(self, timestamp):
self.cash_flow_collector.append({'mmid': self._trader_id, 'timestamp': timestamp, 'cash_flow': self._cash_flow,
'position': self._position})
def process_signal(self, time, qsignal, q_provider):
'''
MM chooses prices from a grid determined by the best prevailing prices.
MM never joins the best price if it has size=1.
'''
# make pricing explicit for now. Logic scales for other mpi and quote ranges.
self.quote_collector.clear()
if random.uniform(0,1) < q_provider:
max_bid_price = qsignal['best_bid'] if qsignal['bid_size'] > 1 else qsignal['best_bid']-self._mpi
prices = np.random.choice(range(max_bid_price-self._quote_range+1, max_bid_price+1, self._mpi), size=self._num_quotes)
side = 'buy'
else:
min_ask_price = qsignal['best_ask'] if qsignal['ask_size'] > 1 else qsignal['best_ask']+self._mpi
prices = np.random.choice(range(min_ask_price, min_ask_price+self._quote_range, self._mpi), size=self._num_quotes)
side = 'sell'
for price in prices:
q = self._make_add_quote(time, self._max_quantity, side, price)
self.local_book[q['order_id']] = q
self.quote_collector.append(q)
class MarketMaker5(MarketMaker):
'''
MarketMaker5 generates a series of quotes near the inside (dicts) based on make probability.
Subclass of MarketMaker
Public methods: process_signal
'''
def __init__(self, name, maxq, mpi, delta, num_quotes, quote_range):
'''
_num_quotes and _quote_range determine the depth of MM quoting;
_position and _cashflow are stored MM metrics
'''
MarketMaker.__init__(self, name, maxq, mpi, delta, num_quotes, quote_range)
self._p5ask = [1/20, 1/12, 1/12, 1/12, 1/12, 1/12, 1/12, 1/12, 1/12, 1/12, 1/12, 1/12, 1/30]
self._p5bid = [1/30, 1/12, 1/12, 1/12, 1/12, 1/12, 1/12, 1/12, 1/12, 1/12, 1/12, 1/12, 1/20]
def process_signal(self, time, qsignal, q_provider):
'''
MM chooses prices from a grid determined by the best prevailing prices.
MM never joins the best price if it has size=1.
'''
# make pricing explicit for now. Logic scales for other mpi and quote ranges.
self.quote_collector.clear()
if random.uniform(0,1) < q_provider:
max_bid_price = qsignal['best_bid'] if qsignal['bid_size'] > 1 else qsignal['best_bid']-self._mpi
prices = np.random.choice(range(max_bid_price-self._quote_range, max_bid_price+1, self._mpi), size=self._num_quotes, p=self._p5bid)
side = 'buy'
else:
min_ask_price = qsignal['best_ask'] if qsignal['ask_size'] > 1 else qsignal['best_ask']+self._mpi
prices = np.random.choice(range(min_ask_price, min_ask_price+self._quote_range+1, self._mpi), size=self._num_quotes, p=self._p5ask)
side = 'sell'
for price in prices:
q = self._make_add_quote(time, self._max_quantity, side, price)
self.local_book[q['order_id']] = q
self.quote_collector.append(q)
| 3.03125 | 3 |
dataset/util.py | vinay-swamy/gMVP | 2 | 12773032 | import os, sys
import json
from collections import defaultdict
import numpy as np
import pandas as pd
dna_pair = {'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C'}
amino_acid_index_table = {
'A': 0,
'B': 20,
'C': 1,
'D': 2,
'E': 3,
'F': 4,
'G': 5,
'H': 6,
'I': 7,
'J': 20,
'K': 8,
'L': 9,
'M': 10,
'N': 11,
'O': 20,
'P': 12,
'Q': 13,
'R': 14,
'S': 15,
'T': 16,
'U': 20,
'V': 17,
'W': 18,
'X': 20,
'Y': 19,
'Z': 20,
}
ss3_encode = {'H': 0, 'E': 1, 'C': 2}
ss8_encode = {'H': 0, 'G': 1, 'I': 2, 'B': 3, 'E': 4, 'S': 5, 'T': 6, 'C': 7}
ss8_ss3_encode = {
'H': 0,
'G': 0,
'I': 0,
'B': 1,
'E': 1,
'S': 2,
'T': 2,
'C': 2
}
def read_fasta(fasta_path, name_rule=None):
fasta = defaultdict(str)
seq = ""
print(fasta_path)
with open(fasta_path) as f:
for line in f:
print(line)
if len(line.strip()) == 0:
continue
if line.startswith('>'):
print(line)
if seq != '':
fasta[head] = seq
seq = ''
if name_rule is None:
head = line.strip().split()[0][1:]
else:
head = name_rule(line)
else:
seq += line.strip()
if seq != "":
fasta[head] = seq
return fasta
def aa_index(aa):
return amino_acid_index_table.get(aa, 20)
def get_complement_dna(dna):
return ''.join([dna_pair.get(a, '-') for a in dna.strip()])
#reverse strand
def get_reverse_dna(dna):
r = ''.join([dna_pair[a] for a in dna])
return r[::-1]
def read_vep(input_path, read_id=True):
head = [
'grch38_chrom', 'gch38_pos', 'ref', 'alt', 'ref_codon', 'alt_codon',
'frame', 'transcript_stable_id', 'protein_len', 'aa_pos', 'ref_aa',
'alt_aa'
]
skiprows = 0
with open(input_path) as f:
for line in f:
if not line.startswith('## '):
break
skiprows += 1
df = pd.read_csv(input_path, sep='\t', skiprows=skiprows)
#filters
df = df[df['CANONICAL'] == 'YES']
if 'VARIANT_CLASS' in df.columns:
df = df[df['VARIANT_CLASS'] == 'SNV']
df = df[df['Consequence'].apply(lambda x: 'missense_variant' in x)]
if read_id:
df['label'] = df['#Uploaded_variation'].apply(
lambda x: int(x.split('|')[-1]))
df['source'] = df['#Uploaded_variation'].apply(
lambda x: x.split('|')[2])
else:
df['label'] = -1
df['source'] = 'unknown'
df['transcript_stable_id'] = df['Feature'].apply(lambda x: x.split('.')[0])
df = df[df['transcript_stable_id'].apply(lambda x: x in used_tr)]
df['protein_var'] = df.apply(_get_protein_var, axis=1)
df['var'] = df.apply(_get_var, axis=1)
def _get_af(x):
if type(x) == str and x == '-':
return 0.0
return float(x)
df['af'] = df['gnomAD_AF'].apply(_get_af)
def _get_frame(x):
r = 0
for a in x:
if a.isupper():
return r
r += 1
assert 1 == 2
return 0
df['frame'] = df['Codons'].apply(_get_frame)
df = df.drop_duplicates(['var'])
df = df[head]
return df
def parse_uniprot_isoform():
pass
| 2.625 | 3 |
LedControl.py | looterwar/LightPy | 0 | 12773033 | <gh_stars>0
import time
from neopixel import *
from random import randint
# LED strip configuration:
LED_COUNT = 62 # Number of LED pixels.
LED_PIN = 18 # GPIO pin connected to the pixels (18 uses PWM!).
#LED_PIN = 10 # GPIO pin connected to the pixels (10 uses SPI /dev/spidev0.0).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 5 # DMA channel to use for generating signal (try 5)
LED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
LED_CHANNEL = 0 # set to '1' for GPIOs 13, 19, 41, 45 or 53
LED_STRIP = ws.WS2811_STRIP_GRB # Strip type and colour ordering
if __name__ == '__main__':
# Create NeoPixel object with appropriate configuration.
strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL, LED_STRIP) | 3 | 3 |
map_reduce_dataproc.py | originalankur/distributed-data-processing-with-python-intro-talk | 0 | 12773034 | <filename>map_reduce_dataproc.py
import json
from mrjob.job import MRJob
from mrjob.step import MRStep
class MRYelpTopCity(MRJob):
"""
Find city with most no of restaurants in Yelp dataset.
https://www.yelp.com/dataset
business.json stores the data of all restaurants in Yelp dataset.
In each individual business.json file, there is a field called "city"
we want to find out the city with most number of restaurants.
"""
def mapper_get_city(self, key, line):
try:
single_business = json.loads(line)
if single_business.get('city'):
# "San Francisco", 1
# "Los Angeles", 1
yield single_business['city'], 1
else:
yield 'missing_city', 1
except:
pass
def combiner_count_cities(self, city, occurance):
# "San Francisco", (1,1,1,1,1,1,1,1,1) # per worker node
yield (city, sum(occurance))
def reducer_count_cities(self, city, counts):
# "San Francisco", [9,3,7,2]
# 21, "San Francisco"
yield None, (sum(counts), city)
def reducer_find_max_city(self, _, city_count_pairs):
# [(21, "San Francisco"), (30, "Los Angeles"), (79, "San Diego"), (21, "San Jose")]
# (79, "San Diego")
yield max(city_count_pairs)
def steps(self):
return [
MRStep(mapper=self.mapper_get_city,
combiner=self.combiner_count_cities,
reducer=self.reducer_count_cities),
MRStep(reducer=self.reducer_find_max_city)
]
if __name__ == '__main__':
MRYelpTopCity.run()
| 3.375 | 3 |
face_detection/face_train.py | tiago369/opencv-course | 0 | 12773035 | import os
import cv2
from matplotlib.pyplot import gray
import numpy as np
people = ['<NAME>', '<NAME>', '<NAME>', 'Madonna', '<NAME>']
DIR = r'/home/senai/tiago-projects/opencv-course/Resources/Faces/train'
haar_cascade = cv2.CascadeClassifier('/home/senai/tiago-projects/opencv-course/face_detection/haar_face.xml')
features = []
labels = []
def create_train():
for person in people:
path = os.path.join(DIR, person)
label = people.index(person)
for img in os.listdir(path):
img_path = os.path.join(path, img)
img_array = cv2.imread(img_path)
gray = cv2.cvtColor(img_array, cv2.COLOR_BGR2GRAY)
faces_rect = haar_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=4)
for(x,y,w,h) in faces_rect:
faces_roi = gray[y:y+h, x:x+w]
features.append(faces_roi)
labels.append(label)
create_train()
features = np.array(features)
labels = np.array(labels)
face_recognizer = cv2.face.LBPHFaceRecognizer_create()
# Train the recognizer on the features list and the labels list
face_recognizer.train(features, labels)
face_recognizer.save('face_trained.yml')
np.save('features.npy', features)
np.save('labels.npy', labels)
| 3.296875 | 3 |
pyl4c/science.py | arthur-e/pyl4c | 1 | 12773036 | '''
Specialized scientific functions for biogeophysical variables and L4C model
processes.
'''
import numpy as np
from functools import partial
from scipy.ndimage import generic_filter
from scipy.linalg import solve_banded
from scipy.sparse import dia_matrix
from pyl4c import suppress_warnings
from pyl4c.data.fixtures import HDF_PATHS, BPLUT
from pyl4c.utils import get_pft_array, subset
from pyl4c.stats import ols, ols_variance, linear_constraint
def arrhenius(
tsoil, beta0: float, beta1: float = 66.02, beta2: float = 227.13):
r'''
The Arrhenius equation for response of enzymes to (soil) temperature,
constrained to lie on the closed interval [0, 1].
$$
f(T_{SOIL}) = \mathrm{exp}\left[\beta_0\left( \frac{1}{\beta_1} -
\frac{1}{T_{SOIL} - \beta_2} \right) \right]
$$
Parameters
----------
tsoil : numpy.ndarray
Array of soil temperature in degrees K
beta0 : float
Coefficient for soil temperature (deg K)
beta1 : float
Coefficient for ... (deg K)
beta2 : float
Coefficient for ... (deg K)
Returns
-------
numpy.ndarray
Array of soil temperatures mapped through the Arrhenius function
'''
a = (1.0 / beta1)
b = np.divide(1.0, np.subtract(tsoil, beta2))
# This is the simple answer, but it takes on values >1
y0 = np.exp(np.multiply(beta0, np.subtract(a, b)))
# Constrain the output to the interval [0, 1]
return np.where(y0 > 1, 1, np.where(y0 < 0, 0, y0))
def bias_correction_parameters(
series, npoly: int = 1, cutoff: float = 1, var_cutoff: float = None,
add_intercept: bool = True):
'''
Calculate the bias correction parameters for two overlapping time series,
nominally the Nature Run and L4C Operational products, of a given
variable using quantile mapping. For example, can correct the bias in
Nature Run (2000-2017) against the L4C Ops record (2015-Present) by
fitting bias correction parameters for the overlap period 2015-2017.
Model can be specified:
y = alpha + X beta_0 + X^2 beta_1 + ...
NOTE: Because Nature Run and L4C Ops compare very well in some locations,
a degree-1 polynomial (straight line) is fit first (regardless of npoly);
if this solution produces corrections that are <1 gC m^-2, the degree-1
solution is used. In some areas, there is a strong linear correspondence
between most measurements but a small number have a super-linear
relationship that is poorly fit by a degree-2 polynomial; in these cases
(where model variance of the degree-2 fit is > var_cutoff), the degree-1
solution is used. Forcing the line of best fit through the origin (with
intercept=False) is also not recommended.
Parameters
----------
series : numpy.ndarray
A (t x 2) NumPy array where t rows correspond to t time steps and
each column is a product; the first column is the reference product
or dependent variable in the linear bias correction.
npoly : int
Degree of the polynomial to use in bias correction (Default: 1)
cutoff : float
Cutoff for the degree-1 bias correction, in data units (e.g.,
1 g C m-2 day-1); defaults to 1.0, i.e., the residual after correction
must be greater than 1 g C m-2 day-1, which is the average impact of
L4SM versus model-only observations. If this cutoff is exceeded, the
degree-1 solution is returned.
var_cutoff : float or None
Cutoff in variance for higher-order solutions; if the residual model
variance exceeds this threshold for the degree-N solution, then return
the degree (N-1) solution (Default: None)
add_intercept : bool
True to add a the y-intercept term (Default: True)
Returns
-------
numpy.ndarray
A vector of length N + 1 where N is the degree of the polynomial
fit requested
'''
def xmat(x, npoly):
# Creates the design/ model matrix for a polynomial series
# Add a column for each power of the requested polynomial series
x = np.repeat(x.reshape((t, 1)), npoly, axis = 1)
for i in range(1, npoly):
# Calculate X^n for n up to N powers
x[:,i] = np.power(x[:,0], npoly + 1)
return x
def fit(x, y, npoly):
# Fits the model using OLS
# If all of the Y values are NaN
if np.all(np.isnan(y)): return np.ones((npoly + 1,)) * np.nan
try:
return ols(xmat(x, npoly), y, add_intercept)
except np.linalg.linalg.LinAlgError:
return np.ones((npoly + 1,)) * np.nan
# Sort the input series from low -> high
t = series.shape[0]
y = np.sort(series[:,0])
x = np.sort(series[:,1])
# For some pixels, the time series has zero variance, and this can produce
# unstable OLS estimates (e.g., zero slope)
if np.var(y) == 0 or np.var(x) == 0:
# Return coefficients: (0, 1, 0, ..., 0)
return np.hstack(((0, 1), list(0 for i in range(1, npoly))))
if np.var(y) == 0 and np.var(x) == 0:
# Intercept (mean) is the only necessary predictor
return np.hstack(((1, 0), list(0 for i in range(1, npoly))))
fit1 = np.hstack(
(fit(x, y, npoly = 1), list(0 for i in range(1, npoly))))
if npoly == 1:
return fit1
# First, try a degree-1 polynomial (straight-line) fit; if the bias
# correction slope is such that the correction is < 1 gC/m^-2,
# which is similar to the average impact of L4SM vs. model-only
# observations, then use the degree-1 fit parameters
if x.mean() - (fit1[1] * x.mean()) < cutoff:
return fit1
# Second, starting with the simpler model, check if progressively more
# complicated models (up to a maximum of npoly) really do fit the data
# better; if not, or if the model variance is above a cutoff, use the
# next most-complicated model (last_model)
last_model = fit1 # Starting with the simplest model...
for p in range(2, npoly + 1):
model = fit(x, y, npoly = p)
# Calculates unbiased estimate of model variance
model_var = ols_variance(xmat(x, p), y, model, add_intercept)
# Without a cutoff for guidance, if the model variance of the degree-1
# fit is lower than that of the degree-2 fit...
if var_cutoff is None:
if model_var > ols_variance(
xmat(x, 1), y, last_model[0:p], add_intercept):
return last_model
else:
if model_var > var_cutoff:
return last_model
last_model = model
# Unless a simpler model was better, return coefficients for the requested
# polynomial degree
return model
def climatology365(series, dates):
'''
Computes a 365-day climatology for different locations from a time series
of length T. Ignores leap days. The climatology could then be indexed
using ordinals generated by `ordinals365()`.
Parameters
----------
series : numpy.ndarray
T x ... array of data
dates : list or tuple
Sequence of datetime.datetime or datetime.date instances
Returns
-------
numpy.ndarray
'''
@suppress_warnings
def calc_climatology(x):
return np.array([
np.nanmean(x[ordinal == day,...], axis = 0)
for day in range(1, 366)
])
# Get first and last day of the year (DOY)
ordinal = np.array([
# Finally, subtract 1 from each day in a leap year after Leap Day
(doy - 1) if ((dates[i].year % 4 == 0) and doy >= 60) else doy
for i, doy in enumerate([
# Next, fill in 0 wherever Leap Day occurs
0 if (dates[i].year % 4 == 0 and doy == 60) else doy
for i, doy in enumerate([
# First, convert datetime.datetime to ordinal day-of-year (DOY)
int(dt.strftime('%j')) for dt in dates
])
])
])
return calc_climatology(series)
def daynight_partition(arr_24hr, updown, reducer = 'mean'):
'''
Partitions a 24-hour time series array into daytime and nighttime values,
then calculates the mean in each group. Daytime is defined as when the sun
is above the horizon; nighttime is the complement.
Parameters
----------
arr_24hr : numpy.ndarray
A size (24 x ...) array; the first axis must have 24 elements
corresponding to the measurement in each hour
updown: numpy.ndarray
A size (2 x ...) array, compatible with arr_24hr, where the first axis
has the hour of sunrise and sunset, in that order, for each element
reducer : str
One of "mean" or "sum" indicating whether an average or a total of the
daytime/ nighttime values should be calculated; e.g., for "mean", the
hourly values from daytime hours are added up and divided by the
length of the day (in hours).
Returns
-------
numpy.ndarray
A size (2 x ...) array where the first axis enumerates the daytime and
nighttime mean values, respectively
'''
assert reducer in ('mean', 'sum'),\
'Argument "reducer" must be one of: "mean", "sum"'
# Prepare single-valued output array
arr_daytime = np.zeros(arr_24hr.shape[1:])
arr_nighttime = arr_daytime.copy()
daylight_hrs = arr_daytime.copy().astype(np.int16)
# Do sunrise and sunset define an interval? (Sunset > Sunrise)?
inside_interval = np.apply_along_axis(lambda x: x[1] > x[0], 0, updown)
# Or is the sun never up?
never_up = np.logical_and(updown[0,...] == -1, updown[1,...] == -1)
# Iteratively sum daytime VPD and temperature values
for hr in range(0, 24):
# Given only hour of sunrise/set on a 24-hour clock...
# if sun rises and sets on same day: SUNRISE <= HOUR <= SUNSET;
# if sun sets on next day: either SUNRISE <= HOUR or HOUR <= SUNSET;
sun_is_up = np.logical_or( # Either...
np.logical_and(inside_interval, # ...Rises and sets same day
np.logical_and(updown[0,...] <= hr, hr <= updown[1,...])),
np.logical_and(~inside_interval, # ...Sets on next day
np.logical_or(updown[0,...] <= hr, hr <= updown[1,...])))
# For simplicity, compute a 24-hour mean even if the sun never rises;
# there's no way to know what the "correct" daytime value is
mask = np.logical_or(never_up, sun_is_up)
np.add(np.where(
mask, arr_24hr[hr,...], 0), arr_daytime, out = arr_daytime)
np.add(np.where(
~mask, arr_24hr[hr,...], 0), arr_nighttime, out = arr_nighttime)
# Keep track of the denominator (hours) for calculating the mean;
# note that this over-estimates actual daylight hours by 1 hour
# but results in the correct denominator for the sums above
np.add(np.where(mask, 1, 0), daylight_hrs, out = daylight_hrs)
arr_24hr = None
# Calculate mean quantities
if reducer == 'mean':
arr_daytime = np.divide(arr_daytime, daylight_hrs)
arr_nighttime = np.divide(arr_nighttime, 24 - daylight_hrs)
# For sites where the sun is always above/ below the horizon, set missing
# nighttime values to zero
arr_nighttime[~np.isfinite(arr_nighttime)] = 0
return np.stack((arr_daytime, arr_nighttime))
def e_mult(params, tmin, vpd, smrz, ft):
'''
Calculate environmental constraint multiplier for gross primary
productivity (GPP), E_mult, based on current model parameters. The
expected parameter names are "LUE" for the maximum light-use
efficiency; "smrz0" and "smrz1" for the lower and upper bounds on root-
zone soil moisture; "vpd0" and "vpd1" for the lower and upper bounds on
vapor pressure deficity (VPD); "tmin0" and "tmin1" for the lower and
upper bounds on minimum temperature; and "ft0" for the multiplier during
frozen ground conditions.
Parameters
----------
params : dict
A dict-like data structure with named model parameters
tmin : numpy.ndarray
(T x N) vector of minimum air temperature (deg K), where T is the
number of time steps, N the number of sites
vpd : numpy.ndarray
(T x N) vector of vapor pressure deficit (Pa), where T is the number
of time steps, N the number of sites
smrz : numpy.ndarray
(T x N) vector of root-zone soil moisture wetness (%), where T is the
number of time steps, N the number of sites
ft : numpy.ndarray
(T x N) vector of the (binary) freeze-thaw status, where T is the
number of time steps, N the number of sites (Frozen = 0, Thawed = 1)
Returns
-------
numpy.ndarray
'''
# Calculate E_mult based on current parameters
f_tmin = linear_constraint(params['tmin0'], params['tmin1'])
f_vpd = linear_constraint(params['vpd0'], params['vpd1'], 'reversed')
f_smrz = linear_constraint(params['smrz0'], params['smrz1'])
f_ft = linear_constraint(params['ft0'], 1.0, 'binary')
return f_tmin(tmin) * f_vpd(vpd) * f_smrz(smrz) * f_ft(ft)
def k_mult(params, tsoil, smsf):
'''
Calculate environmental constraint multiplier for soil heterotrophic
respiration (RH), K_mult, based on current model parameters. The expected
parameter names are "tsoil" for the Arrhenius function of soil temperature
and "smsf0" and "smsf1" for the lower and upper bounds of the ramp
function on surface soil moisture.
Parameters
----------
params : dict
A dict-like data structure with named model parameters
tsoil : numpy.ndarray
(T x N) vector of soil temperature (deg K), where T is the number of
time steps, N the number of sites
smsf : numpy.ndarray
(T x N) vector of surface soil wetness (%), where T is the number of
time steps, N the number of sites
Returns
-------
numpy.ndarray
'''
f_tsoil = partial(arrhenius, beta0 = params['tsoil'])
f_smsf = linear_constraint(params['smsf0'], params['smsf1'])
return f_tsoil(tsoil) * f_smsf(smsf)
def litterfall_casa(lai, years, dt = 1/365):
'''
Calculates daily litterfall fraction after the CASA model (Randerson et
al. 1996). Computes the fraction of evergreen versus deciduous canopy and
allocates a constant daily fraction (out of the year) for evergreen canopy
but a varying daily fraction for deciduous, where the fraction varies with
"leaf loss," a function of leaf area index (LAI). Canopies are assumed to
be a mix of evergreen and deciduous, so the litterfall fraction is a sum
of these two approaches.
<NAME>., <NAME>, <NAME>., <NAME>., &
<NAME>. (1996). Substrate limitations for heterotrophs: Implications
for models that estimate the seasonal cycle of atmospheric CO2.
*Global Biogeochemical Cycles,* 10(4), 585–602.
The approach here is a bit different from Randerson et al. (1996) because
we re- calculate the evergreen fraction each year; however, this is a
reasonable elaboration that, incidentally, accounts for potential changes
in the evergreen-vs-deciduous mix of the canopy. The result is an array
of daily litterfall fractions, i.e., the result multiplied by the annual
NPP sum (for a given site and year) obtains the daily litterfall.
Parameters
----------
lai : numpy.ndarray
The (T x N) leaf-area index (LAI) array, for T time steps and N sites
years : numpy.ndarray
A length-T 1D array indexing the years, e.g., [2001, 2001, 2001, ...];
used to identify which of T time steps belong to a year, so that
litterfall fractions sum to one over a year
dt : float
The fraction of a year that each time step represents, e.g., for daily
time steps, should be close to 1/365 (Default: 1/365)
Returns
-------
numpy.ndarray
The fraction of available inputs (e.g., annual NPP) that should be
allocated to litterfall at each time step
'''
def leaf_loss(lai):
# Leaf loss function from CASA, a triangular averaging function
# centered on the current date, where the right limb of the
# triangle is subtracted from the left limb (leading minus
# lagged LAI is equated to leaf loss)
ll = generic_filter(
lai, lambda x: (0.5 * x[0] + x[1]) - (x[3] + 0.5 * x[4]),
size = 5, mode = 'mirror')
return np.where(ll < 0, 0, ll) # Leaf loss cannot be < 0
# Get leaf loss at each site (column-wise)
ll = np.apply_along_axis(leaf_loss, 0, lai)
ll = np.where(np.isnan(ll), 0, ll) # Fill NaNs with zero leaf loss
unique_years = np.unique(years).tolist()
unique_years.sort()
for each_year in unique_years:
# For those dates in this year...
idx = years == each_year
# Calculate the evergreen fraction (ratio of min LAI to mean LAI over
# the course of a year)
efrac = np.apply_along_axis(
lambda x: np.nanmin(x) / np.nanmean(x), 0, lai[idx,:])
# Calculate sum of 1/AnnualNPP (Evergreen input) plus daily leaf loss
# fraction (Deciduous input); Evergreen canopies have constant daily
# inputs
ll[idx,:] = (efrac * dt) + (1 - efrac) * np.divide(
ll[idx,:], ll[idx,:].sum(axis = 0))
return ll
def mean_residence_time(
hdf, units = 'years', subset_id = None, nodata = -9999):
'''
Calculates the mean residence time (MRT) of soil organic carbon (SOC)
pools as the quotient of SOC stock size and heterotrophic respiration
(RH). Chen et al. (2013, Global and Planetary Change), provide a formal
equation for mean residence time: (SOC/R_H).
Parameters
----------
hdf : h5py.File
The HDF5 file / h5py.File object
units : str
Either "years" (default) or "days"
subset_id : str
(Optional) Can provide keyword designating the desired subset area
nodata : float
(Optional) The NoData or Fill value (Default: -9999)
Returns
-------
tuple
Tuple of: subset array, xoff, yoff, i.e., (numpy.ndarray, Int, Int)
'''
assert units in ('days', 'years'), 'The units argument must be one of: "days" or "years"'
soc_field = HDF_PATHS['SPL4CMDL']['4']['SOC']
rh_field = HDF_PATHS['SPL4CMDL']['4']['RH']
if subset_id is not None:
# Get X- and Y-offsets while we're at it
soc, xoff, yoff = subset(
hdf, soc_path, None, None, subset_id = subset_id)
rh, _, _ = subset(
hdf, rh_path, None, None, subset_id = subset_id)
else:
xoff = yoff = 0
soc = hdf[soc_path][:]
rh = hdf[rh_path][:]
# Find those areas of NoData in either array
mask = np.logical_or(soc == nodata, rh == nodata)
mrt = np.divide(soc, rh)
if units == 'years':
# NOTE: No need to guard against NaNs/ NoData here because of mask
mrt = np.divide(mrt, 365.0)
np.place(mrt, mask, nodata) # Put NoData values back in
return (mrt, xoff, yoff)
def npp(
hdf, use_subgrid = False, subset_id = None, subset_bbox = None,
nodata = -9999):
'''
Calculates net primary productivity (NPP), based on the carbon use
efficiency (CUE) of each plant functional type (PFT). NPP is derived
as: `NPP = GPP * CUE`, where `CUE = NPP/GPP`.
Parameters
----------
hdf : h5py.File
The HDF5 file / h5py.File object
use_subgrid : bool
True to use the 1-km subgrid; requires iterating through the PFT means
subset_id : str
(Optional) Can provide keyword designating the desired subset area
subset_bbox : list or tuple
(Optional) Can provide a bounding box to define a desired subset area
nodata : float
The NoData value to mask (Default: -9999)
Returns
-------
numpy.ndarray
NPP values on an EASE-Grid 2.0 array
'''
grid = 'M01' if use_subgrid else 'M09'
cue_array = cue(get_pft_array(grid, subset_id, subset_bbox))
if not use_subgrid:
if subset_id is not None or subset_bbox is not None:
gpp, _, _ = subset(
hdf, 'GPP/gpp_mean', subset_id = subset_id,
subset_bbox = subset_bbox)
else:
gpp = hdf['GPP/gpp_mean'][:]
else:
raise NotImplementedError('No support for the 1-km subgrid')
gpp[gpp == nodata] = np.nan
return np.multiply(gpp, cue_array)
def ordinals365(dates):
'''
Returns a length-T sequence of ordinals on [1,365]. Can be used for
indexing a 365-day climatology; see `climatology365()`.
Parameters
----------
dates : list or tuple
Sequence of datetime.datetime or datetime.date instances
Returns
-------
list
'''
return [
t - 1 if (year % 4 == 0 and t >= 60) else t
for t, year in [(int(t.strftime('%j')), t.year) for t in dates]
]
def rescale_smrz(smrz0, smrz_min, smrz_max = 100):
'''
Rescales root-zone soil-moisture (SMRZ); original SMRZ is in percent
saturation units. NOTE: Although Jones et al. (2017) write "SMRZ_wp is
the plant wilting point moisture level determined by ancillary soil
texture data provided by L4SM..." in actuality it is just `smrz_min`.
Parameters
----------
smrz0 : numpy.ndarray
(T x N) array of original SMRZ data, in percent (%) saturation units
for N sites and T time steps
smrz_min : numpy.ndarray or float
Site-level long-term minimum SMRZ (percent saturation)
smrz_max : numpy.ndarray or float
Site-level long-term maximum SMRZ (percent saturation); can optionally
provide a fixed upper-limit on SMRZ; useful for calculating SMRZ100.
Returns
-------
numpy.ndarray
'''
if smrz_min.ndim == 1:
smrz_min = smrz_min[np.newaxis,:]
assert smrz0.ndim == 2,\
'Expected smrz0 to be a 2D array'
assert smrz0.shape[1] == smrz_min.shape[1],\
'smrz_min should have one value per site'
# Clip input SMRZ to the lower, upper bounds
smrz0 = np.where(smrz0 < smrz_min, smrz_min, smrz0)
smrz0 = np.where(smrz0 > smrz_max, smrz_max, smrz0)
smrz_norm = np.add(np.multiply(100, np.divide(
np.subtract(smrz0, smrz_min),
np.subtract(smrz_max, smrz_min))), 1)
# Log-transform normalized data and rescale to range between
# 5.0 and 100 ()% saturation)
return np.add(
np.multiply(95, np.divide(np.log(smrz_norm), np.log(101))), 5)
def soc_analytical_spinup(litterfall, k_mult, fmet, fstr, decay_rates):
r'''
Using the solution to the differential equations governing change in the
soil organic carbon (SOC) pools, calculates the steady-state size of each
SOC pool.
The analytical steady-state value for the metabolic ("fast") pool is:
$$
C_{met} = \frac{f_{met} \sum NPP}{R_{opt} \sum K_{mult}}
$$
The analytical steady-state value for the structural ("medium") pool is:
$$
C_{str} = \frac{(1 - f_{met})\sum NPP}{R_{opt}\, k_{str} \sum K_{mult}}
$$
The analytical steady-state value for the recalcitrant ("slow") pool is:
$$
C_{rec} = \frac{f_{str}\, k_{str}\, C_{str}}{k_{rec}}
$$
Parameters
----------
litterfall : numpy.ndarray
Average daily litterfall
k_mult : numpy.ndarray
The K_mult climatology, i.e., a (365 x N x 81) array of the long-term
average K_mult value at each of N sites (with 81 1-km subgrid sites)
fmet : numpy.ndarray
The f_metabolic model parameter, as an (N x 81) array
fstr : numpy.ndarray
The f_structural model parameter, as an (N x 81) array
decay_rates : numpy.ndarray
The optimal decay rates for each SOC pool, as a (3 x N x 81) array
Returns
-------
tuple
A 3-element tuple, each element the steady-state values for that pool,
i.e., `(metabolic, structural, recalcitrant)`
'''
# NOTE: litterfall is average daily litterfall (see upstream where we
# divided by 365), so, to obtain annual sum, multiply by 365
c0 = np.divide(
fmet * (litterfall * 365),
decay_rates[0,...] * np.sum(k_mult, axis = 0))
c1 = np.divide(
(1 - fmet) * (litterfall * 365),
decay_rates[1,...] * np.sum(k_mult, axis = 0))
c2 = np.divide(
fstr * decay_rates[1,...] * c1,
decay_rates[2,...])
c0[np.isnan(c0)] = 0
c1[np.isnan(c1)] = 0
c2[np.isnan(c2)] = 0
return (c0, c1, c2)
def tridiag_solver(tri, r, kl = 1, ku = 1, banded = None):
'''
Solution to the tridiagonal equation by solving the system of equations
in sparse form. Creates a banded matrix consisting of the diagonals,
starting with the lowest diagonal and moving up, e.g., for matrix:
A = [[10., 2., 0., 0.],
[ 3., 10., 4., 0.],
[ 0., 1., 7., 5.],
[ 0., 0., 3., 4.]]
banded = [[ 3., 1., 3., 0.],
[10., 10., 7., 4.],
[ 0., 2., 4., 5.]]
The banded matrix is what should be provided to the optoinal "banded"
argument, which should be used if the banded matrix can be created faster
than `scipy.sparse.dia_matrix()`.
Parameters
----------
tri : numpy.ndarray
A tridiagonal matrix (N x N)
r : numpy.ndarray
Vector of solutions to the system, Ax = r, where A is the tridiagonal
matrix
kl : int
Lower bandwidth (number of lower diagonals) (Default: 1)
ku : int
Upper bandwidth (number of upper diagonals) (Default: 1)
banded : numpy.ndarray
(Optional) Provide the banded matrix with diagonals along the rows;
this can be faster than scipy.sparse.dia_matrix()
Returns
-------
numpy.ndarray
'''
assert tri.ndim == 2 and (tri.shape[0] == tri.shape[1]),\
'Only supports 2-dimensional square matrices'
if banded is None:
banded = dia_matrix(tri).data
# If it is necessary, in a future implementation, to extract diagonals;
# this is a starting point for problems where kl = ku = 1
# n = tri.shape[0]
# a, b, c = [ # (n-1, n, n-1) refer to the lengths of each vector
# sparse[(i+1),(max(0,i)):j]
# for i, j in zip(range(-1, 2), (n-1, n, n+1))
# ]
return solve_banded((kl, ku), np.flipud(banded), r)
def vpd(qv2m, ps, temp_k):
r'''
Calculates vapor pressure deficit (VPD); unfortunately, the provenance
of this formula cannot be properly attributed. It is taken from the
SMAP L4C Science code base, so it is exactly how L4C calculates VPD.
$$
\mathrm{VPD} = 610.7 \times \mathrm{exp}\left(
\frac{17.38 \times T_C}{239 + T_C}
\right) - \frac{(P \times [\mathrm{QV2M}]}{0.622 + (0.378 \times [\mathrm{QV2M}])}
$$
Where P is the surface pressure (Pa), QV2M is the water vapor mixing
ratio at 2-meter height, and T is the temperature in degrees C (though
this function requires units of Kelvin when called).
NOTE: A variation on this formula can be found in the text:
<NAME>. and <NAME>. 1990.
Principles of Environmental Physics, 2nd. Ed. Edward Arnold Publisher.
See also:
https://glossary.ametsoc.org/wiki/Mixing_ratio
Parameters
----------
qv2m : numpy.ndarray or float
QV2M, the water vapor mixing ratio at 2-m height
ps : numpy.ndarray or float
The surface pressure, in Pascals
temp_k : numpy.ndarray or float
The temperature at 2-m height in degrees Kelvin
Returns
-------
numpy.ndarray or float
VPD in Pascals
'''
temp_c = temp_k - 273.15 # Convert temperature to degrees C
avp = np.divide(np.multiply(qv2m, ps), 0.622 + (0.378 * qv2m))
x = np.divide(17.38 * temp_c, (239 + temp_c))
esat = 610.7 * np.exp(x)
return np.subtract(esat, avp)
| 2.453125 | 2 |
src/scipp/plotting/view2d.py | mlund/scipp | 0 | 12773037 | # SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2022 Scipp contributors (https://github.com/scipp)
# @author <NAME>
from .view import PlotView
from ..core import zeros, scalar
import numpy as np
from matplotlib.collections import PathCollection
class PlotView2d(PlotView):
"""
View object for 2 dimensional plots. Contains a `PlotFigure2d`.
The difference between `PlotView2d` and `PlotFigure2d` is that
`PlotView2d` also handles the communications with the `PlotController` that
are to do with the `PlotProfile` plot displayed below the `PlotFigure2d`.
In addition, `PlotView2d` provides a dynamic image resampling for large
input data.
"""
def __init__(self, figure, formatters):
super().__init__(figure=figure, formatters=formatters)
self._axes = ['y', 'x']
self._marker_index = []
self._marks_scatter = None
self._lim_updated = False
self.current_lims = {}
self.global_lims = {}
for event in ['xlim_changed', 'ylim_changed']:
self.figure.ax.callbacks.connect(event, self._lims_changed)
def _make_data(self, new_values, mask_info):
dims = new_values.dims
for dim in dims:
xmin = new_values.coords[dim].values[0]
xmax = new_values.coords[dim].values[-1]
if dim not in self.global_lims:
self.global_lims[dim] = [xmin, xmax]
self.current_lims[dim] = [xmin, xmax]
values = new_values.values
slice_values = {
"values":
values,
"extent":
np.array([self.current_lims[dims[1]],
self.current_lims[dims[0]]]).flatten()
}
mask_info = next(iter(mask_info.values()))
if len(mask_info) > 0:
# Use automatic broadcasting in Scipp variables
msk = zeros(sizes=new_values.sizes, dtype='int32', unit=None)
for m, val in mask_info.items():
if val:
msk += new_values.masks[m].astype(msk.dtype)
slice_values["masks"] = msk.values
return slice_values
def _lims_changed(self, *args):
"""
Update limits and resample the image according to new viewport.
When we use the zoom tool, the event listener on the displayed axes
limits detects two separate events: one for the x axis and another for
the y axis. We use a small locking mechanism here to trigger only a
single resampling update by waiting for the y limits to also change.
"""
for dim in self.dims:
if dim not in self.global_lims:
return
if not self._lim_updated:
self._lim_updated = True
return
self._lim_updated = False
# Make sure we don't overrun the original array bounds
dimx = self.dims[1]
dimy = self.dims[0]
xylims = {
dimx: np.clip(self.figure.ax.get_xlim(), *sorted(self.global_lims[dimx])),
dimy: np.clip(self.figure.ax.get_ylim(), *sorted(self.global_lims[dimy]))
}
dx = np.abs(self.current_lims[dimx][1] - self.current_lims[dimx][0])
dy = np.abs(self.current_lims[dimy][1] - self.current_lims[dimy][0])
diffx = np.abs(self.current_lims[dimx] - xylims[dimx]) / dx
diffy = np.abs(self.current_lims[dimy] - xylims[dimy]) / dy
diff = diffx.sum() + diffy.sum()
# Only resample image if the changes in axes limits are large enough to
# avoid too many updates while panning.
if diff > 0.1:
self.current_lims.update(xylims)
self.controller.update_data(slices=self.current_limits)
# If we are zooming, rescale to data?
# TODO This will trigger a second call to view.refresh and thus
# self.update_data. Why does the controller have to call refresh
# to make view.rescale_to_data take effect?
if self.figure.rescale_on_zoom():
self.controller.rescale_to_data()
@property
def current_limits(self):
limits = {}
for dim in self.dims:
low, high = self.current_lims[dim]
unit = self._data.coords[dim].unit
limits[dim] = [scalar(low, unit=unit), scalar(high, unit=unit)]
return limits
@property
def global_limits(self):
limits = {}
for dim in self.dims:
low, high = self.global_lims[dim]
unit = self._data.coords[dim].unit
limits[dim] = [scalar(low, unit=unit), scalar(high, unit=unit)]
return limits
def _update_axes(self):
"""
Update the current and global axes limits, before updating the figure
axes.
"""
super()._update_axes()
self.clear_marks()
def clear_marks(self):
"""
Reset all scatter markers when a profile is reset.
"""
if self._marks_scatter is not None:
self._marks_scatter = None
self.figure.ax.collections = []
self.figure.draw()
def _do_handle_pick(self, event):
"""
Return the index of the picked scatter point, None if something else
is picked.
"""
if isinstance(event.artist, PathCollection):
return self._marker_index[event.ind[0]]
def _do_mark(self, index, color, x, y):
"""
Add a marker (colored scatter point).
"""
if self._marks_scatter is None:
self._marks_scatter = self.figure.ax.scatter([x], [y],
c=[color],
edgecolors="w",
picker=5,
zorder=10)
else:
new_offsets = np.concatenate((self._marks_scatter.get_offsets(), [[x, y]]),
axis=0)
new_colors = np.concatenate((self._marks_scatter.get_facecolors(), [color]),
axis=0)
self._marks_scatter.set_offsets(new_offsets)
self._marks_scatter.set_facecolors(new_colors)
self._marker_index.append(index)
self.figure.draw()
def remove_mark(self, index):
"""
Remove a marker (scatter point).
"""
i = self._marker_index.index(index)
xy = np.delete(self._marks_scatter.get_offsets(), i, axis=0)
c = np.delete(self._marks_scatter.get_facecolors(), i, axis=0)
self._marks_scatter.set_offsets(xy)
self._marks_scatter.set_facecolors(c)
self._marker_index.remove(index)
self.figure.draw()
| 2.484375 | 2 |
osxnotification.py | ianribas/extrator-an | 0 | 12773038 | import Foundation
import objc
import AppKit
import sys
NSUserNotificationCenter = objc.lookUpClass('NSUserNotificationCenter')
NSUserNotification = objc.lookUpClass('NSUserNotification')
def notify(title, subtitle, info_text, delay=0, sound=False, userInfo={}):
notification = NSUserNotification.alloc().init()
notification.setTitle_(title)
notification.setSubtitle_(subtitle)
notification.setInformativeText_(info_text)
notification.setUserInfo_(userInfo)
notification.setHasActionButton_(True)
notification.setActionButtonTitle_('Action!!')
notification.setHasReplyButton_(True)
if sound:
notification.setSoundName_("NSUserNotificationDefaultSoundName")
notification.setDeliveryDate_(Foundation.NSDate.dateWithTimeInterval_sinceDate_(delay, Foundation.NSDate.date()))
NSUserNotificationCenter.defaultUserNotificationCenter().scheduleNotification_(notification)
| 2.3125 | 2 |
tasks/lgutil/pre_processing_wv/create_bible_ldg.py | HimmelStein/lg-flask | 0 | 12773039 | <gh_stars>0
# -*- coding: utf-8 -*-
import sys
import time
import random
import os
import codecs
import re
import csv
from pprint import pprint
from nltk.parse.stanford import StanfordDependencyParser
from subprocess import Popen, PIPE
util_loc = "/Users/tdong/git/lg-flask/tasks/lgutil"
model_en_path = os.path.join(util_loc,"edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz")
dep_parser_en = StanfordDependencyParser(model_path = model_en_path)
parzu_loc = '/Users/tdong/components/ParZu/parzu'
try:
from . import config_wv
from . import pre_util
except:
sys.path.append("/Users/tdong/git/lg-flask/tasks/lgutil/pre_processing_wv/config_wv")
import config_wv
import pre_util
sys.path.append('/usr/local/bin/')
pprint(sys.path)
def get_all_ldgs_of_sentences(csvfname, encoding='utf-8', parser='hit', format='conll'):
picklefname = csvfname[:-3]+'pickle'
dic = pre_util.load_pickle(picklefname)
existing_keys = list(dic.keys())
for rowLst in pre_util.get_all_rows_from_csv(csvfname, encoding=encoding):
key = rowLst[0]+"_"+"_"+rowLst[1]+"_"+rowLst[2]
snt = rowLst[3]
if len(snt.strip())==0:
continue
if key not in existing_keys:
print("get ldg for ", key, snt)
ldg = get_ldg(snt, parser=parser, format=format)
if ldg == 'need_long_wait':
print('beginning long waiting...')
time.sleep(25)
print('try again...')
get_all_ldgs_of_sentences(csvfname, encoding=encoding, parser=parser, format=format)
dic[key] = ldg
pre_util.dump_pickle(picklefname, dic)
def get_ldg(snt, parser='none', format = 'conll'):
if parser == 'hit':
return get_ch_ldg(snt, ch_parser=parser, format=format)
elif parser == 'stanford':
return get_en_ldg(snt, format=format)
elif parser == 'parzu':
return get_de_ldg(snt, de_parser=parser, format=format)
def get_de_ldg(snt, de_parser='parzu', format='conll'):
"""
:param snt:
:param en_parser:
:param format:
:return:
"""
if de_parser == 'parzu':
echo_process = Popen(['echo', snt], stdout=PIPE)
parzu_process = Popen([parzu_loc],
stdin=echo_process.stdout, stdout=PIPE)
echo_process.stdout.close()
out, err = parzu_process.communicate()
pprint(out.decode("utf-8"))
return out.decode("utf-8")
else:
print('de parser is not recognized')
def get_en_ldg(snt, en_parser='stanford', format='conll'):
"""
:param snt:
:param en_parser:
:param format:
:return:
"""
result = dep_parser_en.raw_parse(snt)
dep = next(result)
if format == 'conll':
print(dep.to_conll(10))
return dep.to_conll(10)
def get_ch_ldg(snt, ch_parser='hit', format='conll'):
"""
:param snt: one sentence
:param ch_parser:
:param format: output format
:return:
"""
if ch_parser == 'hit':
time.sleep(2)
import urllib.request
import urllib.parse
from urllib.parse import quote
url_get_base = "http://api.ltp-cloud.com/analysis/?"
api_key = random.choice(["<KEY>",
"<KEY>"])
pattern = "dp"
url = url_get_base + 'api_key=' + api_key + '&text=' + quote(
snt) + '&format=' + format + '&pattern=' + pattern
try:
req = urllib.request.Request(url)
with urllib.request.urlopen(req, timeout=30) as response:
content = ''
for line in response:
line = line.decode('utf8') # Decoding the binary data to text.
content = content + line
except:
return 'need_long_wait'
return content
def create_ldg_from_text(fname, encoding = 'ISO-8859-1'):
fullname, ext = os.path.splitext(fname)
output = fullname + '.ldg'
print(output)
cmd = parzu_loc + " -l -o conll < " + fname + " > " + output
os.system(cmd)
def create_ldg_pickle_from_ldg_text(fnamecsv, fnameldgtxt, encoding = 'ISO-8859-1'):
"""
:param fnamecsv: csv file, each line is a snt
:param fnameldgtxt: txt file,
:param encoding:
:return:
"""
picklefname = fnamecsv[:-3] + 'pickle'
dic = pre_util.load_pickle(picklefname)
allRowLst = pre_util.get_all_rows_from_csv(fnamecsv, encoding=encoding)
with open(fnameldgtxt, 'r') as fh:
data = fh.read()
ldgLst = data.split('\n\n')
j = 0
for i in range(len(allRowLst)):
rowLst = allRowLst[i]
key = rowLst[0] + "_" + "_" + rowLst[1] + "_" + rowLst[2]
snt = rowLst[3]
if len(snt.strip()) == 0:
continue
else:
ldg = ldgLst[j]
dic[key] = ldg
j += 1
pre_util.dump_pickle(picklefname, dic)
print(len(dic.keys()))
if __name__ == '__main__':
#get_all_ldgs_of_sentences(config_wv.EnBibleCsv, parser='stanford')
#get_all_ldgs_of_sentences(config_wv.ChBibleCsv, parser='hit', encoding='gb2312')
#get_all_ldgs_of_sentences(config_wv.DeBibleCsv, parser='parzu', encoding='ISO-8859-1')
#dic = pre_util.load_pickle(config_wv.ChBiblePickle)
#pprint(dic)
#create_ldg_from_text(config_wv.DeBibleLst)
create_ldg_pickle_from_ldg_text(config_wv.DeBibleCsv, config_wv.DeBibleLDG) | 2.171875 | 2 |
tests/test_model_components/test_mc_tab_resnet.py | 5uperpalo/pytorch-widedeep | 0 | 12773040 | <gh_stars>0
import string
import numpy as np
import torch
import pytest
from pytorch_widedeep.models import TabResnet
colnames = list(string.ascii_lowercase)[:10]
embed_cols = [np.random.choice(np.arange(5), 10) for _ in range(5)]
cont_cols = [np.random.rand(10) for _ in range(5)]
X_tab = torch.from_numpy(np.vstack(embed_cols + cont_cols).transpose())
X_tab_emb = X_tab[:, :5]
X_tab_cont = X_tab[:, 5:]
###############################################################################
# Embeddings and no continuous_cols
###############################################################################
embed_input = [(u, i, j) for u, i, j in zip(colnames[:5], [5] * 5, [16] * 5)]
model1 = TabResnet(
blocks_dims=[32, 16],
blocks_dropout=0.5,
mlp_dropout=0.5,
column_idx={k: v for v, k in enumerate(colnames[:5])},
embed_input=embed_input,
)
def test_tab_resnet_embed():
out = model1(X_tab_emb)
assert out.size(0) == 10 and out.size(1) == 16
###############################################################################
# Continous Cols and Embeddings
###############################################################################
continuous_cols = colnames[-5:]
model2 = TabResnet(
blocks_dims=[32, 16, 8],
blocks_dropout=0.5,
mlp_dropout=0.5,
column_idx={k: v for v, k in enumerate(colnames)},
embed_input=embed_input,
continuous_cols=continuous_cols,
)
def test_tab_resnet_dense():
out = model2(X_tab)
assert out.size(0) == 10 and out.size(1) == 8
###############################################################################
# Continous Cols concatenated with Embeddings or with the output of the
# dense_resnet
###############################################################################
continuous_cols = colnames[-5:]
@pytest.mark.parametrize(
"concat_cont_first",
[
True,
False,
],
)
def test_cont_contat(concat_cont_first):
model3 = TabResnet(
blocks_dims=[32, 16, 8],
blocks_dropout=0.5,
mlp_dropout=0.5,
column_idx={k: v for v, k in enumerate(colnames)},
embed_input=embed_input,
continuous_cols=continuous_cols,
concat_cont_first=concat_cont_first,
)
out = model3(X_tab)
assert out.size(0) == 10 and out.size(1) == model3.output_dim
###############################################################################
# Test full set up
###############################################################################
@pytest.mark.parametrize(
"concat_cont_first",
[
True,
False,
],
)
def test_full_setup(concat_cont_first):
model4 = TabResnet(
embed_input=embed_input,
column_idx={k: v for v, k in enumerate(colnames)},
blocks_dims=[32, 16, 8],
blocks_dropout=0.5,
mlp_dropout=0.5,
mlp_hidden_dims=[32, 16],
mlp_batchnorm=True,
mlp_batchnorm_last=False,
embed_dropout=0.1,
continuous_cols=continuous_cols,
batchnorm_cont=True,
concat_cont_first=concat_cont_first,
)
out = model4(X_tab)
true_mlp_inp_dim = list(model4.tab_resnet_mlp.mlp.dense_layer_0.parameters())[
2
].size(1)
if concat_cont_first:
expected_mlp_inp_dim = model4.blocks_dims[-1]
else:
expected_mlp_inp_dim = model4.blocks_dims[-1] + len(continuous_cols)
assert (
out.size(0) == 10
and out.size(1) == model4.output_dim
and expected_mlp_inp_dim == true_mlp_inp_dim
)
| 1.867188 | 2 |
test/persistence_test.py | aquanauts/tellus | 0 | 12773041 | import json
import pathlib
import datetime as dt
from io import StringIO
import jsonpickle
import pytest
from tellus import __version__
from tellus.configuration import TELLUS_GO, TELLUS_INTERNAL
from tellus.persistence import (
PickleFilePersistor,
TELLUS_SAVE_DIR,
PersistenceSetupException,
PERSISTOR_HEADER_KEY,
PERSISTOR_HEADER_VERSION,
PERSISTOR_HEADER_SAVED,
PERSISTOR_HEADER_SAVE_COUNTS,
)
from tellus.persistable import ZAuditInfo, Persistable
# pylint: disable=unused-argument
# pylint gets cranky about the fake file system fixtures
from tellus.tell import Tell
TELLUS_PICKLE_SAVE_FILE_NO_HEADER = """{"_alias": "tellus", "_categories": {"py/reduce": [{"py/type": "sortedcontainers.sortedset.SortedSet"}, {"py/tuple": [{"py/set": ["tellus-go"]}, null]}]}, "_go_url": "/tellus", "_tags": {"py/reduce": [{"py/type": "sortedcontainers.sortedset.SortedSet"}, {"py/tuple": [{"py/set": []}, null]}]}, "py/object": "tellus.tells.Tell"}
{"_alias": "vfh", "_categories": {"py/reduce": [{"py/type": "sortedcontainers.sortedset.SortedSet"}, {"py/tuple": [{"py/set": ["tellus-go"]}, null]}]}, "_go_url": "http://veryfinehat.com", "_tags": {"py/reduce": [{"py/type": "sortedcontainers.sortedset.SortedSet"}, {"py/tuple": [{"py/set": []}, null]}]}, "py/object": "tellus.tells.Tell"}
{"_alias": "a", "_categories": {"py/reduce": [{"py/type": "sortedcontainers.sortedset.SortedSet"}, {"py/tuple": [{"py/set": ["tellus-go"]}, null]}]}, "_go_url": "BORKED", "_tags": {"py/reduce": [{"py/type": "sortedcontainers.sortedset.SortedSet"}, {"py/tuple": [{"py/set": []}, null]}]}, "py/object": "tellus.tells.Tell"}"""
TELLUS_PICKLE_SAVE_FILE_WITH_EARLIER_HEADER = f"""{{"persistor": "PickleFilePersistor","tellus-version": "{__version__}"}}
{TELLUS_PICKLE_SAVE_FILE_NO_HEADER}
"""
PERSISTENCE_DATA = ""
PERSISTENCE_TEST_USER = "persistenceTest"
def create_current_save_file():
"""
:return: a string that looks like a current, valid save file, whatever we are using
"""
persistor = PickleFilePersistor(
persist_root=None, save_file_name="current_pickle", testing=True,
)
buffer = StringIO()
persistor.write_save_file(
buffer,
[
Tell("tellus", go_url="/tellus", category=TELLUS_GO),
Tell("vfh", go_url="http://veryfinehat.com", category=TELLUS_GO),
Tell("quislet", category=TELLUS_INTERNAL),
],
)
return buffer.getvalue()
class MiniPersistable(Persistable):
def __init__(self, values=None):
super().__init__(PERSISTENCE_TEST_USER)
self.values = values
def to_json_pickle(self):
return jsonpickle.encode(self)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
class MiniHolder(object):
def __init__(self):
self.persistables = []
def load_me(self, load_string):
self.persistables.append(jsonpickle.decode(load_string))
def test_persist_no_root(fs):
persistor = PickleFilePersistor(persist_root=None, save_file_name="test")
assert persistor.persistence_file() == pathlib.Path.cwd() / TELLUS_SAVE_DIR / "test"
def test_persistence_file_name(fs):
try:
PickleFilePersistor(persist_root=None, save_file_name=None)
pytest.fail("Creating a Persistor with no file name should throw an exception.")
except PersistenceSetupException as exception:
print(exception)
def test_verify_save_file(fs):
persistor = PickleFilePersistor(
persist_root=None, save_file_name="current_pickle", testing=True,
)
buffer = StringIO()
persistor.write_save_file(buffer, [MiniPersistable()])
header = PickleFilePersistor.verify_save_file(StringIO(buffer.getvalue()))
assert len(header) == 4
assert header[PERSISTOR_HEADER_KEY] == "PickleFilePersistor"
assert header[PERSISTOR_HEADER_VERSION] == f"{__version__}"
assert header[PERSISTOR_HEADER_SAVED] is not None
assert header[PERSISTOR_HEADER_SAVE_COUNTS] == 1
assert buffer.tell() > 0
fs.create_file(
"earlier_pickle", contents=TELLUS_PICKLE_SAVE_FILE_WITH_EARLIER_HEADER
)
with open("earlier_pickle", "r") as save_file:
header = PickleFilePersistor.verify_save_file(save_file)
assert header == {
"persistor": "PickleFilePersistor",
"tellus-version": f"{__version__}",
}
assert save_file.tell() > 0
fs.create_file("old_pickle", contents=TELLUS_PICKLE_SAVE_FILE_NO_HEADER)
with open("old_pickle", "r") as save_file:
header = PickleFilePersistor.verify_save_file(save_file)
assert header is None
assert save_file.tell() == 0, "This case should reset the file pointer."
def test_pickle_persistence(fs):
persistor = PickleFilePersistor(
persist_root="/test-location", save_file_name="test-file.txt"
)
persistable = MiniPersistable({"test-key": "test-value"})
items_to_persist = [persistable]
persistor.persist(items_to_persist)
hodor = MiniHolder() # too soon?
persistor.load(hodor.load_me)
loaded = hodor.persistables
assert len(hodor.persistables) == 1, "Should have loaded our one test value"
assert (
persistable.to_json_pickle() == loaded[0].to_json_pickle()
), "Our loaded value should equal our existing persistable."
def test_pickle_persistence_cycle(fs):
persistor = PickleFilePersistor(
persist_root="/test-location", save_file_name="test-file.txt"
)
items_to_persist = [
MiniPersistable({"test-key1": "test-value1"}),
MiniPersistable({"test-key2": "test-value2"}),
MiniPersistable({"test-key3": "test-value3"}),
]
persistor.persist(items_to_persist)
hodor = MiniHolder()
persistor.load(hodor.load_me)
loaded = hodor.persistables
assert len(loaded) == len(items_to_persist)
for persisted, loaded in zip(items_to_persist, loaded):
assert (
persisted == loaded
), f"{persisted.to_json_pickle()} should equal {loaded.to_json_pickle()}"
def test_audit_info():
user = "rjbrande"
now = dt.datetime.now(dt.timezone.utc)
audit_info = ZAuditInfo("rjbrande")
assert audit_info.created_by == user
assert dt.datetime.fromisoformat(audit_info.created) == audit_info.created_datetime
assert audit_info.last_modified_by == user
assert (
audit_info.last_modified == audit_info.created
), "Initially, last_modified should == created"
assert (
audit_info.created_datetime - now
).seconds < 1, "created_datetime should roughly be 'now'"
def test_audit_to_simple_dict_and_json():
audit_info = ZAuditInfo("saturngirl")
audit_info.modified("cosmicboy")
test_dict = audit_info.to_simple_data_dict()
assert test_dict["created_by"] == "saturngirl"
assert test_dict["last_modified_by"] == "cosmicboy"
assert test_dict["created"] == audit_info.created
assert test_dict["last_modified"] == audit_info.last_modified
assert json.dumps(test_dict) == audit_info.to_simple_json()
| 1.882813 | 2 |
src/test/data/pa3/AdditionalTestCase/UnitTest/IfExpr_else.py | Leo-Enrique-Wu/chocopy_compiler_code_generation | 0 | 12773042 | print("then" if (8 > 9) else "else") | 2.8125 | 3 |
rabi_amp.py | rikyborg/presto-measure | 0 | 12773043 | # -*- coding: utf-8 -*-
"""
Measure Rabi oscillation by changing the amplitude of the control pulse.
The control pulse has a sin^2 envelope, while the readout pulse is square.
"""
import ast
import math
import os
import time
import h5py
import numpy as np
from numpy.typing import ArrayLike
from mla_server import set_dc_bias
from presto.hardware import AdcFSample, AdcMode, DacFSample, DacMode
from presto import pulsed
from presto.utils import get_sourcecode, sin2
class RabiAmp:
def __init__(
self,
readout_freq: float,
control_freq: float,
readout_port: int,
control_port: int,
readout_amp: float,
readout_duration: float,
control_duration: float,
sample_duration: float,
sample_port: int,
control_amp_arr: ArrayLike,
wait_delay: float,
readout_sample_delay: float,
num_averages: int,
jpa_params=None,
):
self.readout_freq = readout_freq
self.control_freq = control_freq
self.readout_port = readout_port
self.control_port = control_port
self.readout_amp = readout_amp
self.readout_duration = readout_duration
self.control_duration = control_duration
self.sample_duration = sample_duration
self.sample_port = sample_port
self.control_amp_arr = control_amp_arr
self.wait_delay = wait_delay
self.readout_sample_delay = readout_sample_delay
self.num_averages = num_averages
self.rabi_n = len(control_amp_arr)
self.t_arr = None # replaced by run
self.store_arr = None # replaced by run
self.jpa_params = jpa_params
def run(
self,
presto_address,
presto_port=None,
ext_ref_clk=False,
):
# Instantiate interface class
with pulsed.Pulsed(
address=presto_address,
port=presto_port,
ext_ref_clk=ext_ref_clk,
adc_mode=AdcMode.Mixed,
adc_fsample=AdcFSample.G2,
dac_mode=[DacMode.Mixed42, DacMode.Mixed02, DacMode.Mixed02, DacMode.Mixed02],
dac_fsample=[DacFSample.G10, DacFSample.G6, DacFSample.G6, DacFSample.G6],
) as pls:
pls.hardware.set_adc_attenuation(self.sample_port, 0.0)
pls.hardware.set_dac_current(self.readout_port, 32_000)
pls.hardware.set_dac_current(self.control_port, 32_000)
pls.hardware.set_inv_sinc(self.readout_port, 0)
pls.hardware.set_inv_sinc(self.control_port, 0)
pls.hardware.configure_mixer(
freq=self.readout_freq,
in_ports=self.sample_port,
out_ports=self.readout_port,
sync=False, # sync in next call
)
pls.hardware.configure_mixer(
freq=self.control_freq,
out_ports=self.control_port,
sync=True, # sync here
)
if self.jpa_params is not None:
pls.hardware.set_lmx(self.jpa_params['jpa_pump_freq'], self.jpa_params['jpa_pump_pwr'])
set_dc_bias(self.jpa_params['jpa_bias_port'], self.jpa_params['jpa_bias'])
time.sleep(1.0)
# ************************************
# *** Setup measurement parameters ***
# ************************************
# Setup lookup tables for frequencies
pls.setup_freq_lut(
output_ports=self.readout_port,
group=0,
frequencies=0.0,
phases=0.0,
phases_q=0.0,
)
pls.setup_freq_lut(
output_ports=self.control_port,
group=0,
frequencies=0.0,
phases=0.0,
phases_q=0.0,
)
# Setup lookup tables for amplitudes
pls.setup_scale_lut(
output_ports=self.readout_port,
group=0,
scales=self.readout_amp,
)
pls.setup_scale_lut(
output_ports=self.control_port,
group=0,
scales=self.control_amp_arr,
)
# Setup readout and control pulses
# use setup_long_drive to create a pulse with square envelope
# setup_long_drive supports smooth rise and fall transitions for the pulse,
# but we keep it simple here
readout_pulse = pls.setup_long_drive(
output_port=self.readout_port,
group=0,
duration=self.readout_duration,
amplitude=1.0,
amplitude_q=1.0,
rise_time=0e-9,
fall_time=0e-9,
)
# For the control pulse we create a sine-squared envelope,
# and use setup_template to use the user-defined envelope
control_ns = int(round(self.control_duration *
pls.get_fs("dac"))) # number of samples in the control template
control_envelope = sin2(control_ns)
control_pulse = pls.setup_template(
output_port=self.control_port,
group=0,
template=control_envelope,
template_q=control_envelope,
envelope=True,
)
# Setup sampling window
pls.set_store_ports(self.sample_port)
pls.set_store_duration(self.sample_duration)
# ******************************
# *** Program pulse sequence ***
# ******************************
T = 0.0 # s, start at time zero ...
# Control pulse
pls.reset_phase(T, self.control_port)
pls.output_pulse(T, control_pulse)
# Readout pulse starts right after control pulse
T += self.control_duration
pls.reset_phase(T, self.readout_port)
pls.output_pulse(T, readout_pulse)
# Sampling window
pls.store(T + self.readout_sample_delay)
# Move to next Rabi amplitude
T += self.readout_duration
pls.next_scale(T, self.control_port) # every iteration will have a different amplitude
# Wait for decay
T += self.wait_delay
# **************************
# *** Run the experiment ***
# **************************
# repeat the whole sequence `rabi_n` times
# then average `num_averages` times
pls.run(
period=T,
repeat_count=self.rabi_n,
num_averages=self.num_averages,
print_time=True,
)
t_arr, (data_I, data_Q) = pls.get_store_data()
if self.jpa_params is not None:
pls.hardware.set_lmx(0.0, 0.0)
set_dc_bias(self.jpa_params['jpa_bias_port'], 0.0)
self.t_arr = t_arr
self.store_arr = data_I + 1j * data_Q
return self.save()
def save(self, save_filename=None):
# *************************
# *** Save data to HDF5 ***
# *************************
if save_filename is None:
script_path = os.path.realpath(__file__) # full path of current script
current_dir, script_basename = os.path.split(script_path)
script_filename = os.path.splitext(script_basename)[0] # name of current script
timestamp = time.strftime("%Y%m%d_%H%M%S", time.localtime()) # current date and time
save_basename = f"{script_filename:s}_{timestamp:s}.h5" # name of save file
save_path = os.path.join(current_dir, "data", save_basename) # full path of save file
else:
save_path = os.path.realpath(save_filename)
source_code = get_sourcecode(__file__) # save also the sourcecode of the script for future reference
with h5py.File(save_path, "w") as h5f:
dt = h5py.string_dtype(encoding='utf-8')
ds = h5f.create_dataset("source_code", (len(source_code), ), dt)
for ii, line in enumerate(source_code):
ds[ii] = line
for attribute in self.__dict__:
print(f"{attribute}: {self.__dict__[attribute]}")
if attribute.startswith("_"):
# don't save private attributes
continue
if attribute == "jpa_params":
h5f.attrs[attribute] = str(self.__dict__[attribute])
elif np.isscalar(self.__dict__[attribute]):
h5f.attrs[attribute] = self.__dict__[attribute]
else:
h5f.create_dataset(attribute, data=self.__dict__[attribute])
print(f"Data saved to: {save_path}")
return save_path
@classmethod
def load(cls, load_filename):
with h5py.File(load_filename, "r") as h5f:
num_averages = h5f.attrs["num_averages"]
control_freq = h5f.attrs["control_freq"]
readout_freq = h5f.attrs["readout_freq"]
readout_duration = h5f.attrs["readout_duration"]
control_duration = h5f.attrs["control_duration"]
readout_amp = h5f.attrs["readout_amp"]
sample_duration = h5f.attrs["sample_duration"]
# rabi_n = h5f.attrs["rabi_n"]
wait_delay = h5f.attrs["wait_delay"]
readout_sample_delay = h5f.attrs["readout_sample_delay"]
control_amp_arr = h5f["control_amp_arr"][()]
t_arr = h5f["t_arr"][()]
store_arr = h5f["store_arr"][()]
# source_code = h5f["source_code"][()]
# these were added later
try:
readout_port = h5f.attrs["readout_port"]
except KeyError:
readout_port = 0
try:
control_port = h5f.attrs["control_port"]
except KeyError:
control_port = 0
try:
sample_port = h5f.attrs["sample_port"]
except KeyError:
sample_port = 0
try:
jpa_params = ast.literal_eval(h5f.attrs["jpa_params"])
except KeyError:
jpa_params = None
self = cls(
readout_freq,
control_freq,
readout_port,
control_port,
readout_amp,
readout_duration,
control_duration,
sample_duration,
sample_port,
control_amp_arr,
wait_delay,
readout_sample_delay,
num_averages,
jpa_params,
)
self.control_amp_arr = control_amp_arr
self.t_arr = t_arr
self.store_arr = store_arr
return self
def analyze(self, all_plots=False):
if self.t_arr is None:
raise RuntimeError
if self.store_arr is None:
raise RuntimeError
import matplotlib.pyplot as plt
from presto.utils import rotate_opt
ret_fig = []
t_low = 1500 * 1e-9
t_high = 2000 * 1e-9
# t_span = t_high - t_low
idx_low = np.argmin(np.abs(self.t_arr - t_low))
idx_high = np.argmin(np.abs(self.t_arr - t_high))
idx = np.arange(idx_low, idx_high)
# nr_samples = len(idx)
if all_plots:
# Plot raw store data for first iteration as a check
fig1, ax1 = plt.subplots(2, 1, sharex=True, tight_layout=True)
ax11, ax12 = ax1
ax11.axvspan(1e9 * t_low, 1e9 * t_high, facecolor="#dfdfdf")
ax12.axvspan(1e9 * t_low, 1e9 * t_high, facecolor="#dfdfdf")
ax11.plot(1e9 * self.t_arr, np.abs(self.store_arr[0, 0, :]))
ax12.plot(1e9 * self.t_arr, np.angle(self.store_arr[0, 0, :]))
ax12.set_xlabel("Time [ns]")
fig1.show()
ret_fig.append(fig1)
# Analyze Rabi
resp_arr = np.mean(self.store_arr[:, 0, idx], axis=-1)
data = rotate_opt(resp_arr)
# Fit data
popt_x, perr_x = _fit_period(self.control_amp_arr, np.real(data))
period = popt_x[3]
period_err = perr_x[3]
pi_amp = period / 2
pi_2_amp = period / 4
print("Tau pulse amplitude: {} +- {} FS".format(period, period_err))
print("Pi pulse amplitude: {} +- {} FS".format(pi_amp, period_err / 2))
print("Pi/2 pulse amplitude: {} +- {} FS".format(pi_2_amp, period_err / 4))
if all_plots:
fig2, ax2 = plt.subplots(4, 1, sharex=True, figsize=(6.4, 6.4), tight_layout=True)
ax21, ax22, ax23, ax24 = ax2
ax21.plot(self.control_amp_arr, np.abs(data))
ax22.plot(self.control_amp_arr, np.angle(data))
ax23.plot(self.control_amp_arr, np.real(data))
ax23.plot(self.control_amp_arr, _func(self.control_amp_arr, *popt_x), '--')
ax24.plot(self.control_amp_arr, np.imag(data))
ax21.set_ylabel("Amplitude [FS]")
ax22.set_ylabel("Phase [rad]")
ax23.set_ylabel("I [FS]")
ax24.set_ylabel("Q [FS]")
ax2[-1].set_xlabel("Pulse amplitude [FS]")
fig2.show()
ret_fig.append(fig2)
data_max = np.abs(data).max()
unit = ""
mult = 1.0
if data_max < 1e-6:
unit = "n"
mult = 1e9
elif data_max < 1e-3:
unit = "μ"
mult = 1e6
elif data_max < 1e0:
unit = "m"
mult = 1e3
fig3, ax3 = plt.subplots(tight_layout=True)
ax3.plot(self.control_amp_arr, mult * np.real(data), '.')
ax3.plot(self.control_amp_arr, mult * _func(self.control_amp_arr, *popt_x), '--')
ax3.set_ylabel(f"I quadrature [{unit:s}FS]")
ax3.set_xlabel("Pulse amplitude [FS]")
fig3.show()
ret_fig.append(fig3)
return ret_fig
def _func(t, offset, amplitude, T2, period, phase):
frequency = 1 / period
return offset + amplitude * np.exp(-t / T2) * np.cos(math.tau * frequency * t + phase)
def _fit_period(x: list[float], y: list[float]) -> tuple[list[float], list[float]]:
from scipy.optimize import curve_fit
# from scipy.optimize import least_squares
pkpk = np.max(y) - np.min(y)
offset = np.min(y) + pkpk / 2
amplitude = 0.5 * pkpk
T2 = 0.5 * (np.max(x) - np.min(x))
freqs = np.fft.rfftfreq(len(x), x[1] - x[0])
fft = np.fft.rfft(y)
frequency = freqs[1 + np.argmax(np.abs(fft[1:]))]
period = 1 / frequency
first = (y[0] - offset) / amplitude
if first > 1.:
first = 1.
elif first < -1.:
first = -1.
phase = np.arccos(first)
p0 = (
offset,
amplitude,
T2,
period,
phase,
)
res = curve_fit(_func, x, y, p0=p0)
popt = res[0]
pcov = res[1]
perr = np.sqrt(np.diag(pcov))
offset, amplitude, T2, period, phase = popt
return popt, perr
# def _residuals(p, x, y):
# offset, amplitude, T2, period, phase = p
# return _func(x, offset, amplitude, T2, period, phase) - y
# res = least_squares(_residuals, p0, args=(x, y))
# # offset, amplitude, T2, period, phase = res.x
# return res.x, np.zeros_like(res.x)
if __name__ == "__main__":
WHICH_QUBIT = 2 # 1 (higher resonator) or 2 (lower resonator)
USE_JPA = False
WITH_COUPLER = False
# Presto's IP address or hostname
# ADDRESS = "172.16.17.32"
# PORT = 42874
ADDRESS = "127.0.0.1"
PORT = 7878
EXT_REF_CLK = False # set to True to lock to an external reference clock
jpa_bias_port = 1
if WHICH_QUBIT == 1:
if WITH_COUPLER:
readout_freq = 6.167_009 * 1e9 # Hz, frequency for resonator readout
control_freq = 3.556_520 * 1e9 # Hz
else:
readout_freq = 6.166_600 * 1e9 # Hz, frequency for resonator readout
control_freq = 3.557_866 * 1e9 # Hz
control_port = 3
jpa_pump_freq = 2 * 6.169e9 # Hz
jpa_pump_pwr = 11 # lmx units
jpa_bias = +0.437 # V
elif WHICH_QUBIT == 2:
if WITH_COUPLER:
readout_freq = 6.029_130 * 1e9 # Hz, frequency for resonator readout
control_freq = 4.093_042 * 1e9 # Hz
else:
readout_freq = 6.028_450 * 1e9 # Hz, frequency for resonator readout
control_freq = 4.093_372 * 1e9 # Hz
control_port = 4
jpa_pump_freq = 2 * 6.031e9 # Hz
jpa_pump_pwr = 9 # lmx units
jpa_bias = +0.449 # V
else:
raise ValueError
# cavity drive: readout
readout_amp = 0.4 # FS
readout_duration = 2e-6 # s, duration of the readout pulse
readout_port = 1
# qubit drive: control
control_duration = 20e-9 # s, duration of the control pulse
# cavity readout: sample
sample_duration = 4 * 1e-6 # s, duration of the sampling window
sample_port = 1
# Rabi experiment
num_averages = 1_000
rabi_n = 128 # number of steps when changing duration of control pulse
control_amp_arr = np.linspace(0.0, 1.0, rabi_n) # FS, amplitudes for control pulse
wait_delay = 200e-6 # s, delay between repetitions to allow the qubit to decay
readout_sample_delay = 290 * 1e-9 # s, delay between readout pulse and sample window to account for latency
jpa_params = {
'jpa_bias': jpa_bias,
'jpa_bias_port': jpa_bias_port,
'jpa_pump_freq': jpa_pump_freq,
'jpa_pump_pwr': jpa_pump_pwr,
} if USE_JPA else None
rabi = RabiAmp(
readout_freq,
control_freq,
readout_port,
control_port,
readout_amp,
readout_duration,
control_duration,
sample_duration,
sample_port,
control_amp_arr,
wait_delay,
readout_sample_delay,
num_averages,
jpa_params,
)
rabi.run(ADDRESS, PORT, EXT_REF_CLK)
| 2.734375 | 3 |
cakechat/utils/env.py | 4R7I5T/cakechat | 1 | 12773044 | <gh_stars>1-10
import os
def _use_gpu_env():
try:
use_gpu = os.environ['USE_GPU']
return int(use_gpu)
except (KeyError, ValueError):
return None
def is_dev_env():
try:
is_dev = os.environ['IS_DEV']
return bool(int(is_dev))
except (KeyError, ValueError):
return False
def _init_cuda_env():
# Set GPU device order the same as in nvidia-smi
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
def init_theano_env(gpu_id=_use_gpu_env(), cnmem=0, float_precision='float32', is_dev=is_dev_env()):
"""
:param gpu_id: ID of GPU to use, default is None (No GPU support, CPU-only);
:param cnmem: The value represents the start size (either in MB or the fraction of total GPU memory) of the memory
pool. Default: 0 (Preallocation of size 0, only cache the allocation)
:param float_precision: String specifying floating point precision. Can be 'float64', 'float32', or 'float16'
:param is_dev: Apply just a few graph optimizations and only use Python implementations. Default is False.
GPU is disabled, CPU only. Drastically speeds up theano graph compilation. Use for development purposes.
:return:
"""
_init_cuda_env()
theano_flags = 'floatX={}'.format(float_precision)
if is_dev:
# Use fast_compile only in dev-env because it doesn't works on GPU with libgpuarray
theano_flags += ',device=cpu,mode=FAST_COMPILE'
elif gpu_id is None:
theano_flags += ',device=cpu'
else:
theano_flags += ',device=cuda{},gpuarray.preallocate={:0.2}'.format(gpu_id, float(cnmem))
if 'THEANO_FLAGS' in os.environ:
os.environ['THEANO_FLAGS'] = theano_flags + ',' + os.environ['THEANO_FLAGS']
else:
os.environ['THEANO_FLAGS'] = theano_flags
| 2.40625 | 2 |
ex096.py | igormba/python-exercises | 0 | 12773045 | <reponame>igormba/python-exercises<filename>ex096.py
'''Faça um programa que tenha uma função chamada área(), que receba as dimensões de um terreno retangular (largura e comprimento) e mostre a área do terreno.'''
def linha():
print('=' * 30)
def area():
resultado = l * c
print('=' * 50)
print(f'A área total do terreno {l}X{c}é {resultado} metros.')
linha()
print(' CONTROLE DE TERRENOS')
linha()
l = float(input('Largura (M): '))
c = float(input('Comprimento (M): '))
area()
| 4.21875 | 4 |
e_ggp/evolving_gp.py | anonym-4989/e-ggp | 1 | 12773046 | <reponame>anonym-4989/e-ggp
# Copyright 2021 (c) anonymous-4989 - All Rights Reserved
#
from gpytorch import means
from gpytorch.models import ExactGP
from gpytorch.distributions import MultivariateNormal
class eGGP(ExactGP):
def __init__(self, train_x, train_y, likelihood, kernel):
super(eGGP, self).__init__(train_x, train_y, likelihood)
self.mean_module = means.ConstantMean()
self.covar_module = kernel
# self.k_b = k_b
# self.k_c = k_c
def forward(self, x, adj_list_1, adj_list_2=None):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x, adj_list_1=adj_list_1, adj_list_2=adj_list_2)
return MultivariateNormal(mean_x, covar_x)
| 2.46875 | 2 |
src/application.py | djFooFoo/aws-lambda-add-article | 0 | 12773047 | <gh_stars>0
import os
from images import image_service
from rss import feed_reader
from scraping import scraper
def handler(event, context):
latest_article_url = feed_reader.get_latest_article_url(os.environ['ARTICLE_RSS_FEED_URL'])
article = scraper.get_article(latest_article_url)
image_id = image_service.store_article_metadata_in_database(article)
image_service.store_image_in_s3(article, image_id)
return {"statusCode": 200, "body": image_id}
if __name__ == "__main__":
print(handler(None, None))
| 2.53125 | 3 |
tests/test_forms.py | RobSpectre/Mobile-App-Distribution-with-SMS | 2 | 12773048 | <filename>tests/test_forms.py
import unittest
from werkzeug.datastructures import MultiDict
from .context import forms
class TestSmsInviteForm(unittest.TestCase):
def test_e164(self):
test_formats = ['(555) 555 5555', '555.555.5555', '(555)555.5555',
'5555555555']
for test_format in test_formats:
test_form = forms.SMSInviteForm(MultiDict([('phone_number',
test_format)]))
self.assertTrue(test_form.e164 == "+15555555555",
"e164 formatting did work for %s, instead got: %s" %
(test_format, test_form.e164))
def test_e164Negative(self):
test_form = forms.SMSInviteForm(MultiDict([('phone_number',
'+15555555555')]))
self.assertTrue(test_form.e164 == '+15555555555', 'Form reformatted ' \
'a number already in e.164: %s' % test_form.e164)
def test_characterValidation(self):
test_formats = ['(asd)555-5555', 'asdf555_555-5555', 'asd.555.5555']
for test_format in test_formats:
test_form = forms.SMSInviteForm(MultiDict([('phone_number',
test_format)]))
test_form.validate()
self.assertTrue(test_form.errors, "SMSInviteForm validated the " \
"following invalid format: %s" % test_format)
def test_characterValidationNegative(self):
test_form = forms.SMSInviteForm(MultiDict([('phone_number',
'+15555555555')]))
self.assertFalse(test_form.errors, "SMSInviteForm invalidated the " \
"following valid format: %s" % '+15555555555')
| 3.09375 | 3 |
main.py | Gesporidgers/Logarifmer | 0 | 12773049 | import math
print('Вас приветствует логарифмер.')
print('Выберите тип (1 - Двоичный, 2 - Стандартный десятичный)')
a = int(input())
b = float(input('Введите число: '))
if a == 1 :
print(math.log(b, 2))
elif a == 2 :
print(math.log(b))
else:
print('Ошибка!')
| 3.765625 | 4 |
tests/test_install.py | TarzanZhao/alpa | 0 | 12773050 | """Some basic tests to test installation."""
import os
import unittest
from flax import linen as nn
from flax.training.train_state import TrainState
import jax
import jax.numpy as jnp
import numpy as np
import optax
import ray
from alpa import (init, parallelize, grad, ShardParallel,
automatic_layer_construction, PipeshardParallel)
from alpa.device_mesh import get_global_cluster
from alpa.testing import assert_allclose
def create_train_state_and_batch(batch_size, hidden_size):
class Model(nn.Module):
@nn.compact
def __call__(self, x):
x = nn.Dense(hidden_size, use_bias=True)(x)
x = nn.Dense(hidden_size, use_bias=True)(x)
x = nn.Dense(hidden_size, use_bias=True)(x)
x = nn.Dense(hidden_size, use_bias=True)(x)
x = nn.Dense(hidden_size, use_bias=True)(x)
x = nn.Dense(hidden_size, use_bias=True)(x)
return x
rngkey = jax.random.PRNGKey(0)
batch = {
"x":
jax.random.normal(rngkey, (batch_size, hidden_size),
dtype=jnp.float32),
"y":
jax.random.normal(rngkey, (batch_size, hidden_size),
dtype=jnp.float32)
}
# Init model and optimizer
model = Model()
rngkey = jax.random.PRNGKey(0)
params = model.init(rngkey, batch["x"])
tx = optax.sgd(learning_rate=1e-3)
state = TrainState.create(apply_fn=model.apply, params=params, tx=tx)
return state, batch
class InstallationTest(unittest.TestCase):
def setUp(self):
os.environ["XLA_PYTHON_CLIENT_ALLOCATOR"] = "platform"
def test_1_shard_parallel(self):
state, batch = create_train_state_and_batch(256, 256)
def train_step(state, batch):
def loss_func(params):
out = state.apply_fn(params, batch['x'])
return jnp.mean((out - batch['y'])**2)
grads = grad(loss_func)(state.params)
new_state = state.apply_gradients(grads=grads)
return new_state
# Serial execution
expected_state = train_step(state, batch)
# Parallel execution
p_train_step = parallelize(train_step,
method=ShardParallel(num_micro_batches=2))
actual_state = p_train_step(state, batch)
# Check results
assert_allclose(expected_state.params, actual_state.params)
def test_2_pipeline_parallel(self):
init(cluster="ray")
layer_num = min(get_global_cluster().num_devices, 2)
state, batch = create_train_state_and_batch(256, 256)
def train_step(state, batch):
@automatic_layer_construction(layer_num=layer_num)
def loss_func(params):
out = state.apply_fn(params, batch['x'])
return jnp.mean((out - batch['y'])**2)
grads = grad(loss_func)(state.params)
new_state = state.apply_gradients(grads=grads)
return new_state
# Serial execution
expected_state = train_step(state, batch)
# Parallel execution
p_train_step = parallelize(
train_step, method=PipeshardParallel(num_micro_batches=2))
actual_state = p_train_step(state, batch)
# Check results
assert_allclose(expected_state.params, actual_state.params)
def suite():
suite = unittest.TestSuite()
suite.addTest(InstallationTest("test_1_shard_parallel"))
suite.addTest(InstallationTest("test_2_pipeline_parallel"))
return suite
if __name__ == "__main__":
runner = unittest.TextTestRunner()
runner.run(suite())
| 2.140625 | 2 |