text
stringlengths 8
6.05M
|
|---|
# <<Instagram hashtag crawler>>
# leejihee950430@gmail.com
#
# Copyright (c) 2018, Jihee Lee
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <ORGANIZATION> nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
class instagramCrawlHashtagPipeline(object):
def __init__(self):
self.count = 1
def open_spider(self, spider):
file_name = 'hashtag_' + str(spider.search_tag) + '.json'
self.file = open(file_name, 'w', encoding='utf-8')
self.file.write('{\n')
def process_item(self, item, spider):
line = '\t"tag' + str(self.count) + '":' + json.dumps(dict(item), ensure_ascii=False) + ",\n"
self.file.write(line)
self.count += 1
return item
def close_spider(self, spider):
print('######크롤링 끝!######')
self.file.seek(self.file.tell() - 3)
self.file.write("\n}") # 파일에 기록
self.file.close()
|
import math as math
from sklearn.cluster import Birch
from .generic_clustering import GenericClustering
__author__ = "Konstantin Bogdanoski"
__copyright__ = "Copyright 2020, BlanketClusterer"
__credits__ = ["Konstantin Bogdanoski", "Prof. PhD. Dimitar Trajanov", "MSc. Kostadin Mishev"]
__license__ = "MIT"
__version__ = "2.0.0"
__maintainer__ = "Konstantin Bogdanoski"
__email__ = "konstantin.b@live.com"
__status__ = "Production"
class BirchClustering(GenericClustering):
def clusterize_cluster(self, this_cluster):
"""
Function used to clusterize a cluster
:param this_cluster:
Cluster needed to be clusterized
:return:
Dictionary of new clusters
"""
if (len(this_cluster)) <= self.items_in_cluster:
return this_cluster
if (len(this_cluster)) > (self.items_in_cluster * self.items_in_cluster):
birch = Birch(n_clusters=self.n_clusters, branching_factor=self.items_in_cluster)
else:
birch = Birch(n_clusters=(math.ceil(len(this_cluster) / self.items_in_cluster)),
branching_factor=self.items_in_cluster)
birch.fit(this_cluster)
labels = birch.fit_predict(this_cluster)
this_clusters = {}
n = 0
for item in labels:
if item in this_clusters:
this_clusters[item].append(this_cluster[n])
else:
this_clusters[item] = [this_cluster[n]]
n += 1
return this_clusters
|
class Knight:
def __init__(self, name):
self.__name = " ".join(["Sir", name])
self.__bruises = 0
self.__curr_pos = 0
self.__tactical_card = -1
self.__accept_heavy_blows = True
self.__points = 0
self.__fail_start = False
self.__fail_start_count = 0
self.__won_rps = False
self.__strike_modifier = 0
self.__unhorsed = False
def get_name(self):
return self.__name
def move(self, spaces):
self.__curr_pos += spaces
def get_current_position(self):
return self.__curr_pos
def add_bruise(self):
self.__bruises += 1
def get_bruises(self):
return self.__bruises
def set_tactical_card(self, card):
self.__tactical_card = card
def get_tactical_card(self):
return self.__tactical_card
def set_accept_heavy_blows(self, accept):
self.__accept_heavy_blows = accept
def get_accept_heavy_blows(self):
return self.__accept_heavy_blows
def add_fail_start(self):
self.__fail_start_count += 1
def determine_failed_to_start(self):
if self.get_current_position() < 7:
self.__fail_start = True
def get_failed_to_start(self):
return self.__fail_start
def add_points(self, points):
self.__points += points
def get_points(self):
return self.__points
def get_disqualified(self):
return self.__fail_start_count >= 2
def set_won_rps(self, won_rps):
self.__won_rps = won_rps
def get_won_rps(self):
return self.__won_rps
def set_strike_modifier(self, modifier):
self.__strike_modifier = modifier
def get_strike_modifier(self):
return self.__strike_modifier
def set_unhorsed(self, unhorsed):
self.__unhorsed = unhorsed
def get_unhorsed(self):
return self.__unhorsed
def reset_for_round(self):
self.__curr_pos = 0
self.__fail_start = False
|
#B
I,PA=input().split()
if (int(I)+int(PA))%2==0:
print('even')
else:
print('odd')
|
from django.contrib.gis.db import models
class EventType(models.Model):
name = models.CharField(max_length=120)
description = models.TextField(blank=True)
class Event(models.Model):
name = models.CharField(max_length=120)
location = models.PointField(blank=True, null=True)
eventType = models.ForeignKey(EventType, on_delete=models.SET_DEFAULT, default = 1)
date = models.IntegerField()
|
import click
import pika
from pika.exceptions import AMQPConnectionError
from rabbitmq_util import RABBITMQ_DEFAULT_HOST
from rabbitmq_util import RABBITMQ_DEFAULT_PORT
@click.command()
@click.option('--mode', type=str,
prompt='"send" or "recv" from server',
help='Specify if send-ing or recv-ing messages from server.')
@click.option('--host', type=str,
default=RABBITMQ_DEFAULT_HOST,
prompt='Enter hostname',
help='Server address for rabbitmq.')
@click.option('--port', type=int,
default=RABBITMQ_DEFAULT_PORT,
prompt='Server port number to use.',
help='The port to use for host server.')
@click.option('-m', '--message',
default="Hello, RabbitMQ!",
prompt='Enter a message to send (ignore if recv)',
help='Message to send to the queue.')
def send_or_recv(mode, host, port, message):
if mode == 'send':
send(host, port, message)
elif mode == 'recv':
recv(host, port)
else:
click.echo('Invalid mode "%s", exiting.' % mode)
def send(host, port, message):
connection = pika.BlockingConnection(pika.ConnectionParameters(
host=str(host),
port=port))
channel = connection.channel()
channel.queue_declare(queue='hello')
channel.basic_publish(exchange='',
routing_key='hello',
body=message)
print(" [x] Sent '%s'" % message)
connection.close()
def recv(host, port):
try:
connection = pika.BlockingConnection(pika.ConnectionParameters(
host=str(host),
port=port))
channel = connection.channel()
channel.queue_declare(queue='hello')
def callback(ch, method, prolsperties, body):
print(" [x] Received %r" % body)
try:
channel.basic_consume(callback, queue='hello', no_ack=True)
print(' [*] Waiting for messages. To exit press CTRL+C')
channel.start_consuming()
except KeyboardInterrupt:
exit('KeyboardInterrupt detected! Exiting.')
except pika.exceptions.AMQPConnectionError, e:
exit(str(e))
if __name__ == '__main__':
send_or_recv()
|
import unittest
from katas.kyu_7.the_most_amicable_of_numbers import amicable_numbers
class AmicableNumbersTestCase(unittest.TestCase):
def test_true(self):
self.assertTrue(amicable_numbers(220, 284))
def test_true_2(self):
self.assertTrue(amicable_numbers(1184, 1210))
def test_true_3(self):
self.assertTrue(amicable_numbers(10744, 10856))
def test_true_4(self):
self.assertTrue(amicable_numbers(122265, 139815))
def test_true_5(self):
self.assertTrue(amicable_numbers(220, 284))
def test_true_6(self):
self.assertTrue(amicable_numbers(220, 284))
def test_false(self):
self.assertFalse(amicable_numbers(220, 280))
def test_false_2(self):
self.assertFalse(amicable_numbers(220221, 282224))
def test_false_3(self):
self.assertFalse(amicable_numbers(299920, 9284))
def test_false_4(self):
self.assertFalse(amicable_numbers(999220, 2849))
|
import numpy as np
import subprocess
import argparse
import h5py
import tempfile
import os
def fetch_tomtom_args():
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--modisco_h5py", required=True, type=str, help="path to the output .h5py file generated by the run_modisco.py script")
parser.add_argument("-o", "--output_prefix", required=True, type=str, help="Path and name of the TSV file to store the tomtom output")
parser.add_argument("-d", "--meme_motif_db", required=True, type=str, help="path to motif database")
parser.add_argument("-n", "--top_n_matches", type=int, default=3, help="Max number of matches to return from TomTom")
parser.add_argument("-tt", "--tomtom_exec", type=str, default='tomtom', help="Command to use to execute tomtom")
parser.add_argument("-th", "--trim_threshold", type=float, default=0.3, help="Trim threshold for trimming long motif, trim to those with at least prob trim_threshold on both ends")
parser.add_argument("-tm", "--trim_min_length", type=int, default=3, help="Minimum acceptable length of motif after trimming")
args = parser.parse_args()
return args
def write_meme_file(ppm, bg, fname):
f = open(fname, 'w')
f.write('MEME version 4\n\n')
f.write('ALPHABET= ACGT\n\n')
f.write('strands: + -\n\n')
f.write('Background letter frequencies (from unknown source):\n')
f.write('A %.3f C %.3f G %.3f T %.3f\n\n' % tuple(list(bg)))
f.write('MOTIF 1 TEMP\n\n')
f.write('letter-probability matrix: alength= 4 w= %d nsites= 1 E= 0e+0\n' % ppm.shape[0])
for s in ppm:
f.write('%.5f %.5f %.5f %.5f\n' % tuple(s))
f.close()
def fetch_tomtom_matches(ppm, cwm, background=[0.25, 0.25, 0.25, 0.25], tomtom_exec_path='tomtom', motifs_db='HOCOMOCOv11_core_HUMAN_mono_meme_format.meme', n=5, trim_threshold=0.3, trim_min_length=3):
"""Fetches top matches from a motifs database using TomTom.
Args:
ppm: position probability matrix- numpy matrix of dimension (N,4)
background: list with ACGT background probabilities
tomtom_exec_path: path to TomTom executable
motifs_db: path to motifs database in meme format
n: number of top matches to return, ordered by p-value
temp_dir: directory for storing temp files
trim_threshold: the ppm is trimmed from left till first position for which
probability for any base pair >= trim_threshold. Similarly from right.
Returns:
list: a list of up to n results returned by tomtom, each entry is a
dictionary with keys 'Target ID', 'p-value', 'E-value', 'q-value'
"""
_, fname = tempfile.mkstemp()
score = np.sum(np.abs(cwm), axis=1)
trim_thresh = np.max(score) * trim_threshold # Cut off anything less than 30% of max score
pass_inds = np.where(score >= trim_thresh)[0]
trimmed = ppm[np.min(pass_inds): np.max(pass_inds) + 1]
# can be None of no base has prob>t
if trimmed is None:
return []
# trim and prepare meme file
write_meme_file(trimmed, background, fname)
# run tomtom
cmd = '%s -no-ssc -oc . -verbosity 1 -text -min-overlap 5 -mi 1 -dist pearson -evalue -thresh 10.0 %s %s' % (tomtom_exec_path, fname, motifs_db)
#print(cmd)
out = subprocess.check_output(cmd, shell=True)
# prepare output
dat = [x.split('\\t') for x in str(out).split('\\n')]
schema = dat[0]
# meme v4 vs v5:
if 'Target ID' in schema:
tget_idx = schema.index('Target ID')
else:
tget_idx = schema.index('Target_ID')
pval_idx, eval_idx, qval_idx =schema.index('p-value'), schema.index('E-value'), schema.index('q-value')
r = []
for t in dat[1:min(1+n, len(dat)-1)]:
if t[0]=='':
break
mtf = {}
mtf['Target_ID'] = t[tget_idx]
mtf['p-value'] = float(t[pval_idx])
mtf['E-value'] = float(t[eval_idx])
mtf['q-value'] = float(t[qval_idx])
r.append(mtf)
os.system('rm ' + fname)
return r
def main():
args = fetch_tomtom_args()
modisco_results = h5py.File(args.modisco_h5py, 'r')
# get pfms
ppms = []
cwms = []
seqlet_tally = []
names = []
for metacluster_name in modisco_results['metacluster_idx_to_submetacluster_results']:
metacluster = modisco_results['metacluster_idx_to_submetacluster_results'][metacluster_name]
all_pattern_names = [x.decode("utf-8") for x in list(metacluster["seqlets_to_patterns_result"]["patterns"]["all_pattern_names"][:])]
for pattern_name in all_pattern_names:
ppm = np.array(metacluster['seqlets_to_patterns_result']['patterns'][pattern_name]['sequence']['fwd'])
num_seqlets = len(metacluster['seqlets_to_patterns_result']['patterns'][pattern_name]['seqlets_and_alnmts']['seqlets'])
cwm = np.array(metacluster['seqlets_to_patterns_result']['patterns'][pattern_name]["task0_contrib_scores"]['fwd'])
ppms.append(ppm)
seqlet_tally.append(num_seqlets)
cwms.append(cwm)
names.append(metacluster_name + '.' + pattern_name)
modisco_results.close()
res = []
for i,x in enumerate(ppms):
res.append(fetch_tomtom_matches(x, cwms[i], tomtom_exec_path=args.tomtom_exec, motifs_db=args.meme_motif_db,
n=args.top_n_matches, trim_threshold=args.trim_threshold, trim_min_length=args.trim_min_length))
# write output. Skipping those patterns which disappear after trimming or have no matches
with open(args.output_prefix, 'w') as f:
# write header
f.write("Pattern")
f.write("\tNum_Seqlets")
for i in range(args.top_n_matches):
f.write("\tMatch_{}\tq-value".format(i+1))
f.write("\n")
assert len(res) == len(names)
for i,r in enumerate(res):
f.write(names[i])
f.write("\t{}".format(seqlet_tally[i]))
for match in r:
f.write("\t{}\t{}".format(match['Target_ID'], match['q-value']))
# when fewer than n matches are found
if len(r) != args.top_n_matches:
f.write("\t\t"*(args.top_n_matches-len(r)))
f.write("\n")
if __name__=="__main__":
main()
|
# Generated by Django 3.1.6 on 2021-02-11 12:01
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('spareparts', '0003_auto_20210208_1217'),
]
operations = [
migrations.AlterField(
model_name='local_comparison_sparepart',
name='part_number',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comparison_sparepart', to='spareparts.sparepart', unique=True),
),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: cylisery@outlook.com
# depth first search
# return True if value is found, else False
def dfs(node, val):
stack = [node]
visited = []
while len(stack) != 0:
item = stack.pop()
print "%s -> " % item.val
if item.val == val:
return True
visited.append(item)
for subitem in item.connections:
if subitem not in visited and subitem not in stack:
stack.append(subitem)
return False
|
import pickle
import os
from datetime import datetime
import getopt
import sys
def get_pickle_file_content(full_path_pickle_file):
pickle_file = open(full_path_pickle_file,'rb')
pickle_list = pickle.load(pickle_file, encoding='latin1')
pickle_file.close()
return pickle_list
def get_all_pickle_filenames(pickle_file_dir):
files = os.listdir(pickle_file_dir)
tar_files = list()
for f in files:
if f.endswith(".pickle"):
tar_files.append(f)
return tar_files
def save_vocab_size(full_path_vocab_file, vocab_size):
file = open(full_path_vocab_file,'w+')
file.write(str(vocab_size))
file.close()
def save_sequence_length(full_path_seq_file, biggest):
file = open(full_path_seq_file,'w+')
file.write(str(biggest))
file.close()
def build_vocab_dict_from_set(vocab_set):
vocab_dict = dict()
c = 1
for w in vocab_set:
vocab_dict[w] = c
c += 1
return vocab_dict
def save_vocab(full_path_vocabfile, unique_vocab):
pickle_file = open(full_path_vocabfile,'wb+')
pickle.dump(unique_vocab, pickle_file)
pickle_file.close()
def parseArgs():
short_opts = 'hp:'
long_opts = ['pickle-dir=']
config = dict()
config['pickle_dir'] = '/tmp/save_dir'
try:
args, rest = getopt.getopt(sys.argv[1:], short_opts, long_opts)
except getopt.GetoptError as msg:
print(msg)
print(f'Call with argument -h to see help')
exit()
for option_key, option_value in args:
if option_key in ('-p', '--pickle-dir'):
print(f'found p')
config['pickle_dir'] = option_value[1:]
elif option_key in ('-h'):
print(f'<optional> -p or --pickle-dir The directory with disassemblies,etc. Default: /tmp/save_dir')
return config
def main():
start=datetime.now()
config = parseArgs()
bag_styled_file_dir = config['pickle_dir']
full_path_vocab_file = "/tmp/vocab_size.txt"
full_path_seq_file = "/tmp/sequence_length.txt"
full_path_vocabfile = "/tmp/vocab.pickle"
unique_vocab = set()
print(f'Read out all tokenized pickle files in >{bag_styled_file_dir}<')
all_files = get_all_pickle_filenames(bag_styled_file_dir)
if len(all_files) == 0:
print(f'Error: No tokenized files in dir >{bag_styled_file_dir}<')
exit()
counter = 0
biggest = 0
longest_disas = 30000
shortest_disas = 50
len_all_files = len(all_files)
len_all_files_counter = 1
for file in all_files:
content = get_pickle_file_content(bag_styled_file_dir + '/' + file)
print(f'Building vocab from file >{file}< nr >{len_all_files_counter}/{len_all_files}<', end='\r')
len_all_files_counter += 1
for disas,ret_type in content:
#print(f'len disas >{len(disas)}<')
### we filter out some
#if (len(disas) <= longest_disas) and ( len(disas) >= shortest_disas):
if (len(disas) >= shortest_disas):
for disas_item in disas.split():
unique_vocab.add(disas_item)
if len(disas) > biggest:
biggest = len(disas)
#break
stop = datetime.now()
#vocab_dict = build_vocab_dict_from_set(unique_vocab)
print(f'Run took:{stop-start} Hour:Min:Sec')
print(f'We save Vocabulary in file >{full_path_vocab_file}<')
print(f'Vocab size is >{len(unique_vocab)}<')
print(f'Biggest sequence length is >{biggest}<')
save_vocab(full_path_vocabfile, unique_vocab)
save_vocab_size(full_path_vocab_file, len(unique_vocab))
save_sequence_length(full_path_seq_file, biggest)
#print(unique_vocab)
if __name__ == "__main__":
main()
|
# Generated by Django 3.0.6 on 2020-06-04 13:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('musicRun', '0004_auto_20200604_1401'),
]
operations = [
migrations.AlterField(
model_name='song',
name='artists',
field=models.CharField(max_length=64),
),
migrations.AlterField(
model_name='song',
name='bpm',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='song',
name='danceability',
field=models.FloatField(),
),
migrations.AlterField(
model_name='song',
name='energy',
field=models.FloatField(),
),
migrations.AlterField(
model_name='song',
name='name',
field=models.CharField(max_length=64),
),
migrations.AlterField(
model_name='song',
name='valence',
field=models.FloatField(),
),
]
|
# -*- coding: utf-8 -*-
from sys import hexversion
import random
from .context import sortedcontainers
from sortedcontainers import SortedListWithKey
from nose.tools import raises
if hexversion < 0x03000000:
from itertools import izip as zip
range = xrange
def modulo(val):
return val % 10
def test_init():
slt = SortedListWithKey(key=modulo)
slt._check()
slt = SortedListWithKey(load=10000, key=modulo)
assert slt._load == 10000
assert slt._twice == 20000
assert slt._half == 5000
slt._check()
slt = SortedListWithKey(range(10000), key=modulo)
assert all(tup[0] == tup[1] for tup in zip(slt, sorted(range(10000), key=modulo)))
slt.clear()
assert slt._len == 0
assert slt._maxes == []
assert slt._lists == []
slt._check()
def test_key():
slt = SortedListWithKey(range(10000), key=lambda val: val % 10)
slt._check()
values = sorted(range(10000), key=lambda val: (val % 10, val))
assert slt == values
assert all(val in slt for val in range(10000))
def test_key2():
class Incomparable:
pass
a = Incomparable()
b = Incomparable()
slt = SortedListWithKey(key=lambda val: 1)
slt.add(a)
slt.add(b)
assert slt == [a, b]
def test_add():
random.seed(0)
slt = SortedListWithKey(key=modulo)
for val in range(1000):
slt.add(val)
slt._check()
slt = SortedListWithKey(key=modulo)
for val in range(1000, 0, -1):
slt.add(val)
slt._check()
slt = SortedListWithKey(key=modulo)
for val in range(1000):
slt.add(random.random())
slt._check()
def test_update():
slt = SortedListWithKey(key=modulo)
slt.update(range(1000))
assert all(tup[0] == tup[1] for tup in zip(slt, sorted(range(1000), key=modulo)))
assert len(slt) == 1000
slt._check()
slt.update(range(10000))
assert len(slt) == 11000
slt._check()
def test_contains():
slt = SortedListWithKey(key=modulo, load=7)
assert 0 not in slt
slt.update(range(100))
for val in range(100):
assert val in slt
assert 100 not in slt
slt._check()
slt = SortedListWithKey(range(100), key=modulo, load=4)
assert all(val not in slt for val in range(100, 200))
def test_discard():
slt = SortedListWithKey(key=modulo)
assert slt.discard(0) == None
assert len(slt) == 0
slt._check()
slt = SortedListWithKey([1, 2, 2, 2, 3, 3, 5], load=4, key=modulo)
slt.discard(6)
slt._check()
slt.discard(4)
slt._check()
slt.discard(2)
slt._check()
slt.discard(11)
slt.discard(12)
slt.discard(13)
slt.discard(15)
assert all(tup[0] == tup[1] for tup in zip(slt, [1, 2, 2, 3, 3, 5]))
def test_remove():
slt = SortedListWithKey(key=modulo)
assert slt.discard(0) == None
assert len(slt) == 0
slt._check()
slt = SortedListWithKey([1, 2, 2, 2, 3, 3, 5], load=4, key=modulo)
slt.remove(2)
slt._check()
assert all(tup[0] == tup[1] for tup in zip(slt, [1, 2, 2, 3, 3, 5]))
@raises(ValueError)
def test_remove_valueerror1():
slt = SortedListWithKey(key=modulo)
slt.remove(0)
@raises(ValueError)
def test_remove_valueerror2():
slt = SortedListWithKey(range(100), load=10, key=modulo)
slt.remove(100)
@raises(ValueError)
def test_remove_valueerror3():
slt = SortedListWithKey([1, 2, 2, 2, 3, 3, 5], key=modulo)
slt.remove(4)
@raises(ValueError)
def test_remove_valueerror4():
slt = SortedListWithKey([1, 1, 1, 2, 2, 2], key=modulo)
slt.remove(13)
@raises(ValueError)
def test_remove_valueerror5():
slt = SortedListWithKey([1, 1, 1, 2, 2, 2], key=modulo)
slt.remove(12)
def test_delete():
slt = SortedListWithKey(range(20), load=4, key=modulo)
slt._check()
for val in range(20):
slt.remove(val)
slt._check()
assert len(slt) == 0
assert slt._maxes == []
assert slt._lists == []
def test_getitem():
random.seed(0)
slt = SortedListWithKey(load=17, key=modulo)
slt.append(5)
slt._build_index()
slt._check()
slt.clear()
lst = list(random.random() for rpt in range(100))
slt.update(lst)
lst.sort(key=modulo)
assert all(slt[idx] == lst[idx] for idx in range(100))
assert all(slt[idx - 99] == lst[idx - 99] for idx in range(100))
def test_getitem_slice():
random.seed(0)
slt = SortedListWithKey(load=17, key=modulo)
lst = list()
for rpt in range(100):
val = random.random()
slt.add(val)
lst.append(val)
lst.sort(key=modulo)
assert all(slt[start:] == lst[start:]
for start in [-75, -25, 0, 25, 75])
assert all(slt[:stop] == lst[:stop]
for stop in [-75, -25, 0, 25, 75])
assert all(slt[::step] == lst[::step]
for step in [-5, -1, 1, 5])
assert all(slt[start:stop] == lst[start:stop]
for start in [-75, -25, 0, 25, 75]
for stop in [-75, -25, 0, 25, 75])
assert all(slt[:stop:step] == lst[:stop:step]
for stop in [-75, -25, 0, 25, 75]
for step in [-5, -1, 1, 5])
assert all(slt[start::step] == lst[start::step]
for start in [-75, -25, 0, 25, 75]
for step in [-5, -1, 1, 5])
assert all(slt[start:stop:step] == lst[start:stop:step]
for start in [-75, -25, 0, 25, 75]
for stop in [-75, -25, 0, 25, 75]
for step in [-5, -1, 1, 5])
def test_getitem_slice_big():
slt = SortedListWithKey(range(4), key=modulo)
lst = sorted(range(4), key=modulo)
itr = ((start, stop, step)
for start in [-6, -4, -2, 0, 2, 4, 6]
for stop in [-6, -4, -2, 0, 2, 4, 6]
for step in [-3, -2, -1, 1, 2, 3])
for start, stop, step in itr:
assert slt[start:stop:step] == lst[start:stop:step]
@raises(ValueError)
def test_getitem_slicezero():
slt = SortedListWithKey(range(100), load=17, key=modulo)
slt[::0]
@raises(IndexError)
def test_getitem_indexerror1():
slt = SortedListWithKey(key=modulo)
slt[5]
@raises(IndexError)
def test_getitem_indexerror2():
slt = SortedListWithKey(range(100), key=modulo)
slt[200]
@raises(IndexError)
def test_getitem_indexerror3():
slt = SortedListWithKey(range(100), key=modulo)
slt[-101]
def test_delitem():
random.seed(0)
slt = SortedListWithKey(range(100), load=17, key=modulo)
while len(slt) > 0:
del slt[random.randrange(len(slt))]
slt._check()
def test_delitem_slice():
slt = SortedListWithKey(range(100), load=17, key=modulo)
del slt[10:40:1]
del slt[10:40:-1]
del slt[10:40:2]
del slt[10:40:-2]
def test_setitem():
random.seed(0)
slt = SortedListWithKey(range(0, 100), load=17, key=modulo)
slt[0] = 100
slt[99] = 99
slt[55] = 45
def test_setitem_slice():
slt = SortedListWithKey(range(100), load=17, key=modulo)
slt[:10] = [90, 80, 70, 60, 50, 40, 30, 20, 10, 0]
slt[:10:2] = [0, 10, 20, 30, 40]
slt[:] = sorted(range(100), key=modulo)
slt[90:] = []
slt[:10] = []
assert len(slt) == 80
@raises(ValueError)
def test_setitem_slice_bad():
slt = SortedListWithKey(range(100), load=17, key=modulo)
slt[:10] = list(reversed(range(10)))
@raises(ValueError)
def test_setitem_slice_bad1():
slt = SortedListWithKey(range(100), load=17, key=modulo)
slt[10:20] = range(20, 30)
@raises(ValueError)
def test_setitem_slice_bad2():
slt = SortedListWithKey(range(100), load=17, key=modulo)
slt[20:30] = range(10, 20)
@raises(ValueError)
def test_setitem_extended_slice_bad1():
slt = SortedListWithKey(range(100), load=17, key=modulo)
slt[20:80:3] = list(range(10))
@raises(ValueError)
def test_setitem_extended_slice_bad2():
slt = SortedListWithKey(range(100), load=17, key=modulo)
slt[40:90:5] = list(range(10))
@raises(ValueError)
def test_setitem_valueerror1():
slt = SortedListWithKey(range(10), key=modulo)
slt[9] = 10
@raises(ValueError)
def test_setitem_valueerror2():
slt = SortedListWithKey(range(10), key=modulo)
slt[0] = 9
def test_iter():
slt = SortedListWithKey(range(10000), key=modulo)
itr = iter(slt)
assert all(tup[0] == tup[1] for tup in zip(sorted(range(10000), key=modulo), itr))
def test_reversed():
slt = SortedListWithKey(range(10000), key=modulo)
rev = reversed(slt)
assert all(tup[0] == tup[1] for tup in zip(reversed(sorted(range(10000), key=modulo)), rev))
def test_len():
slt = SortedListWithKey(key=modulo)
for val in range(10000):
slt.add(val)
assert len(slt) == (val + 1)
def test_bisect_left():
slt = SortedListWithKey(key=modulo)
assert slt.bisect_left(0) == 0
slt = SortedListWithKey(range(100), load=17, key=modulo)
slt.update(range(100))
slt._check()
assert slt.bisect_left(50) == 0
assert slt.bisect_left(0) == 0
def test_bisect():
slt = SortedListWithKey(key=modulo)
assert slt.bisect(10) == 0
slt = SortedListWithKey(range(100), load=17, key=modulo)
slt.update(range(100))
slt._check()
assert slt.bisect(10) == 20
assert slt.bisect(0) == 20
def test_bisect_right():
slt = SortedListWithKey(key=modulo)
assert slt.bisect_right(10) == 0
slt = SortedListWithKey(range(100), load=17, key=modulo)
slt.update(range(100))
slt._check()
assert slt.bisect_right(10) == 20
assert slt.bisect_right(0) == 20
def test_bisect_key_left():
slt = SortedListWithKey(key=modulo)
assert slt.bisect_key_left(10) == 0
slt = SortedListWithKey(range(100), load=17, key=modulo)
slt.update(range(100))
slt._check()
assert slt.bisect_key_left(0) == 0
assert slt.bisect_key_left(5) == 100
assert slt.bisect_key_left(10) == 200
def test_bisect_key_right():
slt = SortedListWithKey(key=modulo)
assert slt.bisect_key_right(0) == 0
slt = SortedListWithKey(range(100), load=17, key=modulo)
slt.update(range(100))
slt._check()
assert slt.bisect_key_right(0) == 20
assert slt.bisect_key_right(5) == 120
assert slt.bisect_key_right(10) == 200
def test_bisect_key():
slt = SortedListWithKey(key=modulo)
assert slt.bisect_key(0) == 0
slt = SortedListWithKey(range(100), load=17, key=modulo)
slt.update(range(100))
slt._check()
assert slt.bisect_key(0) == 20
assert slt.bisect_key(5) == 120
assert slt.bisect_key(10) == 200
def test_copy():
slt = SortedListWithKey(range(100), load=7, key=modulo)
two = slt.copy()
slt.add(100)
assert len(slt) == 101
assert len(two) == 100
def test_copy_copy():
import copy
slt = SortedListWithKey(range(100), load=7, key=modulo)
two = copy.copy(slt)
slt.add(100)
assert len(slt) == 101
assert len(two) == 100
def test_count():
slt = SortedListWithKey(load=7, key=modulo)
assert slt.count(0) == 0
for iii in range(100):
for jjj in range(iii):
slt.add(iii)
slt._check()
for iii in range(100):
assert slt.count(iii) == iii
slt = SortedListWithKey(range(8), key=modulo)
assert slt.count(9) == 0
def test_append():
slt = SortedListWithKey(load=4, key=modulo)
slt.append(0)
for val in range(1, 10):
slt.append(val)
slt._check()
@raises(ValueError)
def test_append_valueerror():
slt = SortedListWithKey(range(100), key=modulo)
slt.append(5)
def test_extend():
slt = SortedListWithKey(load=4, key=modulo)
slt.extend(range(5))
slt._check()
slt.extend(range(6, 10))
slt._check()
@raises(ValueError)
def test_extend_valueerror1():
slt = SortedListWithKey(key=modulo)
slt.extend([1, 2, 3, 5, 4, 6])
@raises(ValueError)
def test_extend_valueerror2():
slt = SortedListWithKey(range(20), load=4, key=modulo)
slt.extend([17, 18, 19, 20, 21, 22, 23])
def test_insert():
slt = SortedListWithKey(range(10), load=4, key=modulo)
slt.insert(-100, 0)
slt._check()
slt.insert(-1, 9)
slt._check()
slt.insert(0, 10)
slt._check()
slt = SortedListWithKey(load=4, key=modulo)
slt.insert(0, 5)
slt._check()
slt = SortedListWithKey(range(5, 15), load=4, key=modulo)
for rpt in range(8):
slt.insert(0, 10)
slt._check()
slt = SortedListWithKey(range(10), load=4, key=modulo)
slt.insert(8, 8)
slt._check()
@raises(ValueError)
def test_insert_valueerror1():
slt = SortedListWithKey(range(10), load=4, key=modulo)
slt.insert(10, 5)
@raises(ValueError)
def test_insert_valueerror2():
slt = SortedListWithKey(range(10), load=4, key=modulo)
slt.insert(0, 9)
@raises(ValueError)
def test_insert_valueerror3():
slt = SortedListWithKey(range(10), load=4, key=modulo)
slt.insert(5, 3)
@raises(ValueError)
def test_insert_valueerror4():
slt = SortedListWithKey(range(10), load=4, key=modulo)
slt.insert(5, 7)
def test_pop():
slt = SortedListWithKey(range(10), load=4, key=modulo)
slt._check()
assert slt.pop() == 9
slt._check()
assert slt.pop(0) == 0
slt._check()
assert slt.pop(-2) == 7
slt._check()
assert slt.pop(4) == 5
slt._check()
@raises(IndexError)
def test_pop_indexerror1():
slt = SortedListWithKey(range(10), load=4, key=modulo)
slt.pop(-11)
@raises(IndexError)
def test_pop_indexerror2():
slt = SortedListWithKey(range(10), load=4, key=modulo)
slt.pop(10)
def test_index():
slt = SortedListWithKey(range(100), load=7, key=modulo)
for pos, val in enumerate(sorted(range(100), key=modulo)):
assert val == slt.index(pos)
assert slt.index(9, 0, 1000) == 90
slt = SortedListWithKey((0 for rpt in range(100)), load=7, key=modulo)
for start in range(100):
for stop in range(start, 100):
assert slt.index(0, start, stop + 1) == start
for start in range(100):
assert slt.index(0, -(100 - start)) == start
assert slt.index(0, -1000) == 0
@raises(ValueError)
def test_index_valueerror1():
slt = SortedListWithKey([0] * 10, load=4, key=modulo)
slt.index(0, 10)
@raises(ValueError)
def test_index_valueerror2():
slt = SortedListWithKey([0] * 10, load=4, key=modulo)
slt.index(0, 0, -10)
@raises(ValueError)
def test_index_valueerror3():
slt = SortedListWithKey([0] * 10, load=4, key=modulo)
slt.index(0, 7, 3)
@raises(ValueError)
def test_index_valueerror4():
slt = SortedListWithKey([0] * 10, load=4, key=modulo)
slt.index(1)
@raises(ValueError)
def test_index_valueerror5():
slt = SortedListWithKey(key=modulo)
slt.index(1)
@raises(ValueError)
def test_index_valueerror6():
slt = SortedListWithKey(range(100), load=4, key=modulo)
slt.index(91, 0, 15)
@raises(ValueError)
def test_index_valueerror7():
slt = SortedListWithKey([0] * 10 + [1] * 10 + [2] * 10, load=4, key=modulo)
slt.index(1, 0, 10)
@raises(ValueError)
def test_index_valueerror8():
slt = SortedListWithKey(range(10), load=4, key=modulo)
slt.index(4, 5)
@raises(ValueError)
def test_index_valueerror9():
slt = SortedListWithKey(load=4, key=modulo)
slt.index(5)
@raises(ValueError)
def test_index_valueerror10():
slt = SortedListWithKey(range(10), load=4, key=modulo)
slt.index(19)
def test_mul():
this = SortedListWithKey(range(10), load=4, key=modulo)
that = this * 5
this._check()
that._check()
assert this == sorted(range(10), key=modulo)
assert that == sorted(list(range(10)) * 5, key=modulo)
assert this != that
def test_imul():
this = SortedListWithKey(range(10), load=4, key=modulo)
this *= 5
this._check()
assert this == sorted(list(range(10)) * 5, key=modulo)
def test_op_add():
this = SortedListWithKey(range(10), load=4, key=modulo)
assert (this + this + this) == (this * 3)
that = SortedListWithKey(range(10), load=4, key=modulo)
that += that
that += that
assert that == (this * 4)
def test_eq():
this = SortedListWithKey(range(10), load=4, key=modulo)
that = SortedListWithKey(range(20), load=4, key=modulo)
assert not (this == that)
that.clear()
that.update(range(10))
assert this == that
def test_lt():
this = SortedListWithKey(range(10), load=4, key=modulo)
that = SortedListWithKey(range(10, 20), load=5, key=modulo)
assert this < that
assert not (that < this)
that = SortedListWithKey(range(1, 10), load=4, key=modulo)
assert not (this < that)
def test_lte():
this = SortedListWithKey(range(10), load=4, key=modulo)
that = SortedListWithKey(range(10), load=5, key=modulo)
assert this <= that
assert that <= this
del this[-1]
assert this <= that
assert not (that <= this)
def test_gt():
this = SortedListWithKey(range(10), load=4, key=modulo)
that = SortedListWithKey(range(10, 20), load=5, key=modulo)
assert that > this
assert not (this > that)
that = SortedListWithKey(range(1, 10), load=4, key=modulo)
assert not (that > this)
def test_gte():
this = SortedListWithKey(range(10), load=4, key=modulo)
that = SortedListWithKey(range(10), load=5, key=modulo)
assert this >= that
assert that >= this
del this[-1]
assert that >= this
assert not (this >= that)
def test_repr():
this = SortedListWithKey(range(10), load=4, key=modulo)
assert repr(this).startswith('SortedListWithKey([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], key=<function modulo at ')
def test_repr_subclass():
class CustomSortedListWithKey(SortedListWithKey):
pass
this = CustomSortedListWithKey(range(10), load=4, key=modulo)
assert repr(this).startswith('CustomSortedListWithKey([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], key=<function modulo at ')
@raises(AssertionError)
def test_check():
slt = SortedListWithKey(range(10), load=4, key=modulo)
slt._len = 5
slt._check()
if __name__ == '__main__':
import nose
nose.main()
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import threading
import time
from datetime import datetime
from pytz import timezone, UTC
import requests
import pysolr
from shapely import wkt
SOLR_CON_LOCK = threading.Lock()
thread_local = threading.local()
EPOCH = timezone('UTC').localize(datetime(1970, 1, 1))
SOLR_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
ISO_8601 = '%Y-%m-%dT%H:%M:%S%z'
class SolrProxy(object):
def __init__(self, config):
self.solrUrl = config.get("solr", "host")
self.solrCore = config.get("solr", "core")
solr_kargs = {}
if config.has_option("solr", "time_out"):
solr_kargs["timeout"] = config.get("solr", "time_out")
self.logger = logging.getLogger('nexus')
with SOLR_CON_LOCK:
solrcon = getattr(thread_local, 'solrcon', None)
if solrcon is None:
solr_url = '%s/solr/%s' % (self.solrUrl, self.solrCore)
self.logger.info("connect to solr, url {} with option(s) = {}".format(solr_url, solr_kargs))
solrcon = pysolr.Solr(solr_url, **solr_kargs)
thread_local.solrcon = solrcon
self.solrcon = solrcon
def find_tile_by_id(self, tile_id):
search = 'id:%s' % tile_id
params = {
'rows': 1
}
results, start, found = self.do_query(*(search, None, None, True, None), **params)
assert len(results) == 1, "Found %s results, expected exactly 1" % len(results)
return [results[0]]
def find_tiles_by_id(self, tile_ids, ds=None, **kwargs):
if ds is not None:
search = 'dataset_s:%s' % ds
else:
search = '*:*'
additionalparams = {
'fq': [
"{!terms f=id}%s" % ','.join(tile_ids)
]
}
self._merge_kwargs(additionalparams, **kwargs)
results = self.do_query_all(*(search, None, None, False, None), **additionalparams)
assert len(results) == len(tile_ids), "Found %s results, expected exactly %s" % (len(results), len(tile_ids))
return results
def find_min_date_from_tiles(self, tile_ids, ds=None, **kwargs):
if ds is not None:
search = 'dataset_s:%s' % ds
else:
search = '*:*'
kwargs['rows'] = 1
kwargs['fl'] = 'tile_min_time_dt'
kwargs['sort'] = ['tile_min_time_dt asc']
additionalparams = {
'fq': [
"{!terms f=id}%s" % ','.join(tile_ids) if len(tile_ids) > 0 else ''
]
}
self._merge_kwargs(additionalparams, **kwargs)
results, start, found = self.do_query(*(search, None, None, True, None), **additionalparams)
return self.convert_iso_to_datetime(results[0]['tile_min_time_dt'])
def find_max_date_from_tiles(self, tile_ids, ds=None, **kwargs):
if ds is not None:
search = 'dataset_s:%s' % ds
else:
search = '*:*'
kwargs['rows'] = 1
kwargs['fl'] = 'tile_max_time_dt'
kwargs['sort'] = ['tile_max_time_dt desc']
additionalparams = {
'fq': [
"{!terms f=id}%s" % ','.join(tile_ids) if len(tile_ids) > 0 else ''
]
}
self._merge_kwargs(additionalparams, **kwargs)
results, start, found = self.do_query(*(search, None, None, True, None), **additionalparams)
return self.convert_iso_to_datetime(results[0]['tile_max_time_dt'])
def find_min_max_date_from_granule(self, ds, granule_name, **kwargs):
search = 'dataset_s:%s' % ds
kwargs['rows'] = 1
kwargs['fl'] = 'tile_min_time_dt'
kwargs['sort'] = ['tile_min_time_dt asc']
additionalparams = {
'fq': [
"granule_s:%s" % granule_name
]
}
self._merge_kwargs(additionalparams, **kwargs)
results, start, found = self.do_query(*(search, None, None, False, None), **additionalparams)
start_time = self.convert_iso_to_datetime(results[0]['tile_min_time_dt'])
kwargs['fl'] = 'tile_max_time_dt'
kwargs['sort'] = ['tile_max_time_dt desc']
additionalparams = {
'fq': [
"granule_s:%s" % granule_name
]
}
self._merge_kwargs(additionalparams, **kwargs)
results, start, found = self.do_query(*(search, None, None, False, None), **additionalparams)
end_time = self.convert_iso_to_datetime(results[0]['tile_max_time_dt'])
return start_time, end_time
def get_data_series_list(self):
datasets = self.get_data_series_list_simple()
for dataset in datasets:
min_date = self.find_min_date_from_tiles([], ds=dataset['title'])
max_date = self.find_max_date_from_tiles([], ds=dataset['title'])
dataset['start'] = (min_date - EPOCH).total_seconds()
dataset['end'] = (max_date - EPOCH).total_seconds()
dataset['iso_start'] = min_date.strftime(ISO_8601)
dataset['iso_end'] = max_date.strftime(ISO_8601)
return datasets
def get_data_series_list_simple(self):
search = "*:*"
params = {
'rows': 0,
"facet": "true",
"facet.field": "dataset_s",
"facet.mincount": "1",
"facet.limit": "-1"
}
response = self.do_query_raw(*(search, None, None, False, None), **params)
l = []
for g, v in zip(*[iter(response.facets["facet_fields"]["dataset_s"])]*2):
l.append({
"shortName": g,
"title": g,
"tileCount": v
})
l = sorted(l, key=lambda entry: entry["title"])
return l
def get_data_series_stats(self, ds):
search = "dataset_s:%s" % ds
params = {
"facet": "true",
"facet.field": ["dataset_s", "tile_max_time_dt"],
"facet.limit": "-1",
"facet.mincount": "1",
"facet.pivot": "{!stats=piv1}dataset_s",
"stats": "on",
"stats.field": ["{!tag=piv1 min=true max=true sum=false}tile_max_time_dt","{!tag=piv1 min=true max=false sum=false}tile_min_val_d","{!tag=piv1 min=false max=true sum=false}tile_max_val_d"]
}
response = self.do_query_raw(*(search, None, None, False, None), **params)
stats = {}
for g in response.facets["facet_pivot"]["dataset_s"]:
if g["value"] == ds:
stats["start"] = self.convert_iso_to_timestamp(g["stats"]["stats_fields"]["tile_max_time_dt"]["min"])
stats["end"] = self.convert_iso_to_timestamp(g["stats"]["stats_fields"]["tile_max_time_dt"]["max"])
stats["minValue"] = g["stats"]["stats_fields"]["tile_min_val_d"]["min"]
stats["maxValue"] = g["stats"]["stats_fields"]["tile_max_val_d"]["max"]
stats["availableDates"] = []
for dt in response.facets["facet_fields"]["tile_max_time_dt"][::2]:
stats["availableDates"].append(self.convert_iso_to_timestamp(dt))
stats["availableDates"] = sorted(stats["availableDates"])
return stats
def find_tile_by_polygon_and_most_recent_day_of_year(self, bounding_polygon, ds, day_of_year):
search = 'dataset_s:%s' % ds
params = {
'fq': [
"{!field f=geo}Intersects(%s)" % bounding_polygon.wkt,
"tile_count_i:[1 TO *]",
"day_of_year_i:[* TO %s]" % day_of_year
],
'rows': 1
}
results, start, found = self.do_query(
*(search, None, None, True, ('day_of_year_i desc',)), **params)
return [results[0]]
def find_days_in_range_asc(self, min_lat, max_lat, min_lon, max_lon, ds, start_time, end_time, **kwargs):
search = 'dataset_s:%s' % ds
search_start_s = datetime.utcfromtimestamp(start_time).strftime(SOLR_FORMAT)
search_end_s = datetime.utcfromtimestamp(end_time).strftime(SOLR_FORMAT)
additionalparams = {
'fq': [
"geo:[%s,%s TO %s,%s]" % (min_lat, min_lon, max_lat, max_lon),
"{!frange l=0 u=0}ms(tile_min_time_dt,tile_max_time_dt)",
"tile_count_i:[1 TO *]",
"tile_min_time_dt:[%s TO %s] " % (search_start_s, search_end_s)
],
'rows': 0,
'facet': 'true',
'facet.field': 'tile_min_time_dt',
'facet.mincount': '1',
'facet.limit': '-1'
}
self._merge_kwargs(additionalparams, **kwargs)
response = self.do_query_raw(*(search, None, None, False, None), **additionalparams)
daysinrangeasc = sorted(
[(datetime.strptime(a_date, SOLR_FORMAT) - datetime.utcfromtimestamp(0)).total_seconds() for a_date
in response.facets['facet_fields']['tile_min_time_dt'][::2]])
return daysinrangeasc
def find_all_tiles_in_box_sorttimeasc(self, min_lat, max_lat, min_lon, max_lon, ds, start_time=0,
end_time=-1, **kwargs):
search = 'dataset_s:%s' % ds
additionalparams = {
'fq': [
"geo:[%s,%s TO %s,%s]" % (min_lat, min_lon, max_lat, max_lon),
"tile_count_i:[1 TO *]"
]
}
if 0 <= start_time <= end_time:
search_start_s = datetime.utcfromtimestamp(start_time).strftime(SOLR_FORMAT)
search_end_s = datetime.utcfromtimestamp(end_time).strftime(SOLR_FORMAT)
time_clause = "(" \
"tile_min_time_dt:[%s TO %s] " \
"OR tile_max_time_dt:[%s TO %s] " \
"OR (tile_min_time_dt:[* TO %s] AND tile_max_time_dt:[%s TO *])" \
")" % (
search_start_s, search_end_s,
search_start_s, search_end_s,
search_start_s, search_end_s
)
additionalparams['fq'].append(time_clause)
self._merge_kwargs(additionalparams, **kwargs)
return self.do_query_all(
*(search, None, None, False, 'tile_min_time_dt asc, tile_max_time_dt asc'),
**additionalparams)
def find_all_tiles_in_polygon_sorttimeasc(self, bounding_polygon, ds, start_time=0, end_time=-1, **kwargs):
search = 'dataset_s:%s' % ds
additionalparams = {
'fq': [
"{!field f=geo}Intersects(%s)" % bounding_polygon.wkt,
"tile_count_i:[1 TO *]"
]
}
if 0 <= start_time <= end_time:
search_start_s = datetime.utcfromtimestamp(start_time).strftime(SOLR_FORMAT)
search_end_s = datetime.utcfromtimestamp(end_time).strftime(SOLR_FORMAT)
time_clause = "(" \
"tile_min_time_dt:[%s TO %s] " \
"OR tile_max_time_dt:[%s TO %s] " \
"OR (tile_min_time_dt:[* TO %s] AND tile_max_time_dt:[%s TO *])" \
")" % (
search_start_s, search_end_s,
search_start_s, search_end_s,
search_start_s, search_end_s
)
additionalparams['fq'].append(time_clause)
self._merge_kwargs(additionalparams, **kwargs)
return self.do_query_all(
*(search, None, None, False, 'tile_min_time_dt asc, tile_max_time_dt asc'),
**additionalparams)
def find_all_tiles_in_polygon(self, bounding_polygon, ds, start_time=0, end_time=-1, **kwargs):
search = 'dataset_s:%s' % ds
additionalparams = {
'fq': [
"{!field f=geo}Intersects(%s)" % bounding_polygon.wkt,
"tile_count_i:[1 TO *]"
]
}
if 0 <= start_time <= end_time:
search_start_s = datetime.utcfromtimestamp(start_time).strftime(SOLR_FORMAT)
search_end_s = datetime.utcfromtimestamp(end_time).strftime(SOLR_FORMAT)
time_clause = "(" \
"tile_min_time_dt:[%s TO %s] " \
"OR tile_max_time_dt:[%s TO %s] " \
"OR (tile_min_time_dt:[* TO %s] AND tile_max_time_dt:[%s TO *])" \
")" % (
search_start_s, search_end_s,
search_start_s, search_end_s,
search_start_s, search_end_s
)
additionalparams['fq'].append(time_clause)
self._merge_kwargs(additionalparams, **kwargs)
return self.do_query_all(
*(search, None, None, False, None),
**additionalparams)
def find_distinct_bounding_boxes_in_polygon(self, bounding_polygon, ds, start_time=0, end_time=-1, **kwargs):
search = 'dataset_s:%s' % ds
additionalparams = {
'fq': [
"{!field f=geo}Intersects(%s)" % bounding_polygon.wkt,
"tile_count_i:[1 TO *]"
],
'rows': 0,
'facet': 'true',
'facet.field': 'geo_s',
'facet.limit': -1,
'facet.mincount': 1
}
if 0 <= start_time <= end_time:
search_start_s = datetime.utcfromtimestamp(start_time).strftime(SOLR_FORMAT)
search_end_s = datetime.utcfromtimestamp(end_time).strftime(SOLR_FORMAT)
time_clause = "(" \
"tile_min_time_dt:[%s TO %s] " \
"OR tile_max_time_dt:[%s TO %s] " \
"OR (tile_min_time_dt:[* TO %s] AND tile_max_time_dt:[%s TO *])" \
")" % (
search_start_s, search_end_s,
search_start_s, search_end_s,
search_start_s, search_end_s
)
additionalparams['fq'].append(time_clause)
self._merge_kwargs(additionalparams, **kwargs)
response = self.do_query_raw(*(search, None, None, False, None), **additionalparams)
distinct_bounds = [wkt.loads(key).bounds for key in response.facets["facet_fields"]["geo_s"][::2]]
return distinct_bounds
def find_tiles_by_exact_bounds(self, minx, miny, maxx, maxy, ds, start_time=0, end_time=-1, **kwargs):
search = 'dataset_s:%s' % ds
additionalparams = {
'fq': [
"tile_min_lon:\"%s\"" % minx,
"tile_min_lat:\"%s\"" % miny,
"tile_max_lon:\"%s\"" % maxx,
"tile_max_lat:\"%s\"" % maxy,
"tile_count_i:[1 TO *]"
]
}
if 0 <= start_time <= end_time:
search_start_s = datetime.utcfromtimestamp(start_time).strftime(SOLR_FORMAT)
search_end_s = datetime.utcfromtimestamp(end_time).strftime(SOLR_FORMAT)
time_clause = "(" \
"tile_min_time_dt:[%s TO %s] " \
"OR tile_max_time_dt:[%s TO %s] " \
"OR (tile_min_time_dt:[* TO %s] AND tile_max_time_dt:[%s TO *])" \
")" % (
search_start_s, search_end_s,
search_start_s, search_end_s,
search_start_s, search_end_s
)
additionalparams['fq'].append(time_clause)
self._merge_kwargs(additionalparams, **kwargs)
return self.do_query_all(
*(search, None, None, False, None),
**additionalparams)
def find_all_tiles_in_box_at_time(self, min_lat, max_lat, min_lon, max_lon, ds, search_time, **kwargs):
search = 'dataset_s:%s' % ds
the_time = datetime.utcfromtimestamp(search_time).strftime(SOLR_FORMAT)
time_clause = "(" \
"tile_min_time_dt:[* TO %s] " \
"AND tile_max_time_dt:[%s TO *] " \
")" % (
the_time, the_time
)
additionalparams = {
'fq': [
"geo:[%s,%s TO %s,%s]" % (min_lat, min_lon, max_lat, max_lon),
"tile_count_i:[1 TO *]",
time_clause
]
}
self._merge_kwargs(additionalparams, **kwargs)
return self.do_query_all(*(search, None, None, False, None), **additionalparams)
def find_all_tiles_in_polygon_at_time(self, bounding_polygon, ds, search_time, **kwargs):
search = 'dataset_s:%s' % ds
the_time = datetime.utcfromtimestamp(search_time).strftime(SOLR_FORMAT)
time_clause = "(" \
"tile_min_time_dt:[* TO %s] " \
"AND tile_max_time_dt:[%s TO *] " \
")" % (
the_time, the_time
)
additionalparams = {
'fq': [
"{!field f=geo}Intersects(%s)" % bounding_polygon.wkt,
"tile_count_i:[1 TO *]",
time_clause
]
}
self._merge_kwargs(additionalparams, **kwargs)
return self.do_query_all(*(search, None, None, False, None), **additionalparams)
def find_all_tiles_within_box_at_time(self, min_lat, max_lat, min_lon, max_lon, ds, time, **kwargs):
search = 'dataset_s:%s' % ds
the_time = datetime.utcfromtimestamp(time).strftime(SOLR_FORMAT)
time_clause = "(" \
"tile_min_time_dt:[* TO %s] " \
"AND tile_max_time_dt:[%s TO *] " \
")" % (
the_time, the_time
)
additionalparams = {
'fq': [
"geo:\"Within(ENVELOPE(%s,%s,%s,%s))\"" % (min_lon, max_lon, max_lat, min_lat),
"tile_count_i:[1 TO *]",
time_clause
]
}
self._merge_kwargs(additionalparams, **kwargs)
return self.do_query_all(*(search, "product(tile_avg_val_d, tile_count_i),*", None, False, None),
**additionalparams)
def find_all_boundary_tiles_at_time(self, min_lat, max_lat, min_lon, max_lon, ds, time, **kwargs):
search = 'dataset_s:%s' % ds
the_time = datetime.utcfromtimestamp(time).strftime(SOLR_FORMAT)
time_clause = "(" \
"tile_min_time_dt:[* TO %s] " \
"AND tile_max_time_dt:[%s TO *] " \
")" % (
the_time, the_time
)
additionalparams = {
'fq': [
"geo:\"Intersects(MultiLineString((%s %s, %s %s),(%s %s, %s %s),(%s %s, %s %s),(%s %s, %s %s)))\"" % (
min_lon, max_lat, max_lon, max_lat, min_lon, max_lat, min_lon, min_lat, max_lon, max_lat, max_lon,
min_lat, min_lon, min_lat, max_lon, min_lat),
"-geo:\"Within(ENVELOPE(%s,%s,%s,%s))\"" % (min_lon, max_lon, max_lat, min_lat),
"tile_count_i:[1 TO *]",
time_clause
]
}
self._merge_kwargs(additionalparams, **kwargs)
return self.do_query_all(*(search, None, None, False, None), **additionalparams)
def find_all_tiles_by_metadata(self, metadata, ds, start_time=0, end_time=-1, **kwargs):
"""
Get a list of tile metadata that matches the specified metadata, start_time, end_time.
:param metadata: List of metadata values to search for tiles e.g ["river_id_i:1", "granule_s:granule_name"]
:param ds: The dataset name to search
:param start_time: The start time to search for tiles
:param end_time: The end time to search for tiles
:return: A list of tile metadata
"""
search = 'dataset_s:%s' % ds
additionalparams = {
'fq': metadata
}
if 0 <= start_time <= end_time:
additionalparams['fq'].append(self.get_formatted_time_clause(start_time, end_time))
self._merge_kwargs(additionalparams, **kwargs)
return self.do_query_all(
*(search, None, None, False, None),
**additionalparams)
def get_formatted_time_clause(self, start_time, end_time):
search_start_s = datetime.utcfromtimestamp(start_time).strftime(SOLR_FORMAT)
search_end_s = datetime.utcfromtimestamp(end_time).strftime(SOLR_FORMAT)
time_clause = "(" \
"tile_min_time_dt:[%s TO %s] " \
"OR tile_max_time_dt:[%s TO %s] " \
"OR (tile_min_time_dt:[* TO %s] AND tile_max_time_dt:[%s TO *])" \
")" % (
search_start_s, search_end_s,
search_start_s, search_end_s,
search_start_s, search_end_s
)
return time_clause
def get_tile_count(self, ds, bounding_polygon=None, start_time=0, end_time=-1, metadata=None, **kwargs):
"""
Return number of tiles that match search criteria.
:param ds: The dataset name to search
:param bounding_polygon: The polygon to search for tiles
:param start_time: The start time to search for tiles
:param end_time: The end time to search for tiles
:param metadata: List of metadata values to search for tiles e.g ["river_id_i:1", "granule_s:granule_name"]
:return: number of tiles that match search criteria
"""
search = 'dataset_s:%s' % ds
additionalparams = {
'fq': [
"tile_count_i:[1 TO *]"
],
'rows': 0
}
if bounding_polygon:
min_lon, min_lat, max_lon, max_lat = bounding_polygon.bounds
additionalparams['fq'].append("geo:[%s,%s TO %s,%s]" % (min_lat, min_lon, max_lat, max_lon))
if 0 <= start_time <= end_time:
additionalparams['fq'].append(self.get_formatted_time_clause(start_time, end_time))
if metadata:
additionalparams['fq'].extend(metadata)
self._merge_kwargs(additionalparams, **kwargs)
results, start, found = self.do_query(*(search, None, None, True, None), **additionalparams)
return found
def do_query(self, *args, **params):
response = self.do_query_raw(*args, **params)
return response.docs, response.raw_response['response']['start'], response.hits
def do_query_raw(self, *args, **params):
if 'fl' not in list(params.keys()) and args[1]:
params['fl'] = args[1]
if 'sort' not in list(params.keys()) and args[4]:
params['sort'] = args[4]
# If dataset_s is specified as the search term,
# add the _route_ parameter to limit the search to the correct shard
if 'dataset_s:' in args[0]:
ds = args[0].split(':')[-1]
params['shard_keys'] = ds + '!'
with SOLR_CON_LOCK:
response = self.solrcon.search(args[0], **params)
return response
def do_query_all(self, *args, **params):
results = []
response = self.do_query_raw(*args, **params)
results.extend(response.docs)
limit = min(params.get('limit', float('inf')), response.hits)
while len(results) < limit:
params['start'] = len(results)
response = self.do_query_raw(*args, **params)
results.extend(response.docs)
assert len(results) == limit
return results
def convert_iso_to_datetime(self, date):
return datetime.strptime(date, "%Y-%m-%dT%H:%M:%SZ").replace(tzinfo=UTC)
def convert_iso_to_timestamp(self, date):
return (self.convert_iso_to_datetime(date) - EPOCH).total_seconds()
def ping(self):
solrAdminPing = '%s/solr/%s/admin/ping' % (self.solrUrl, self.solrCore)
try:
r = requests.get(solrAdminPing, params={'wt': 'json'})
results = json.loads(r.text)
return results
except:
return None
@staticmethod
def _merge_kwargs(additionalparams, **kwargs):
# Only Solr-specific kwargs are parsed
# And the special 'limit'
try:
additionalparams['limit'] = kwargs['limit']
except KeyError:
pass
try:
additionalparams['_route_'] = kwargs['_route_']
except KeyError:
pass
try:
additionalparams['rows'] = kwargs['rows']
except KeyError:
pass
try:
additionalparams['start'] = kwargs['start']
except KeyError:
pass
try:
kwfq = kwargs['fq'] if isinstance(kwargs['fq'], list) else list(kwargs['fq'])
except KeyError:
kwfq = []
try:
additionalparams['fq'].extend(kwfq)
except KeyError:
additionalparams['fq'] = kwfq
try:
kwfl = kwargs['fl'] if isinstance(kwargs['fl'], list) else [kwargs['fl']]
except KeyError:
kwfl = []
try:
additionalparams['fl'].extend(kwfl)
except KeyError:
additionalparams['fl'] = kwfl
try:
s = kwargs['sort'] if isinstance(kwargs['sort'], list) else [kwargs['sort']]
except KeyError:
s = None
try:
additionalparams['sort'].extend(s)
except KeyError:
if s is not None:
additionalparams['sort'] = s
|
def hot_singles(arr1, arr2):
output = []
for x in arr1:
if x not in output and x not in arr2:
output.append(x)
for x in arr2:
if x not in output and x not in arr1:
output.append(x)
return output
'''
Write a function that takes two arguments, and returns a new array populated
with the elements that only appear once, in either one array or the other,
taken only once; display order should follow what appears in arr1 first, then arr2:
hot_singles([1, 2, 3, 3], [3, 2, 1, 4, 5]) # [4, 5]
hot_singles(["tartar", "blanket", "cinnamon"], ["cinnamon", "blanket", "domino"]) # ["tartar", "domino"]
hot_singles([77, "ciao"], [78, 42, "ciao"]) # [77, 78, 42]
hot_singles([1, 2, 3, 3], [3, 2, 1, 4, 5, 4]) # [4,5]
'''
|
#!/usr/bin/env python
# encoding: utf-8
'''
@author: songyunlong
@license: (C) Copyright 2018-2021, Node Supply Chain Manager Corporation Limited.
@contact: 1243049371@qq.com
@software: pycharm
@file: ceshi
@time: 2019/3/3 11:13
@desc:
'''
import tensorflow as tf
import numpy as np
from tensorflow_own.Routine_operation import SaveFile, LoadFile
from tensorflow_own.TFrecord_operation import FileoOperation
if __name__ == '__main__':
for i in range(0, 10, 2):
print(i)
|
import pickle as pkl
import json
import random
import numpy as np
import torch
from sklearn import preprocessing
from sklearn.cluster import KMeans
def read_pickle(file_path):
with open(file_path, 'rb') as f:
vec = pkl.load(f)
return vec
def dump_pickle(file_path, obj):
with open(file_path, 'wb') as f:
pkl.dump(obj, f)
def read_json(file_path):
with open(file_path, 'r') as f:
return json.load(f)
def dump_json(file_path, obj):
with open(file_path, 'w') as f:
json.dump(obj, f)
def remove_unseen_relation(data, seen_relations, dataset='fewrel'):
cleaned_data = []
for data in data:
neg_cands = [cand for cand in data[1] if cand in seen_relations]
if len(neg_cands) > 0:
if dataset == 'fewrel':
cleaned_data.append([data[0], neg_cands, data[2], data[3], data[4], data[5]])
else:
cleaned_data.append([data[0], neg_cands, data[2]])
else:
if dataset == 'fewrel':
cleaned_data.append([data[0], data[1][-2:], data[2], data[3], data[4], data[5]])
else:
cleaned_data.append([data[0], data[1][-2:], data[2]])
return cleaned_data
def ranking_sequence(sequence):
word_lengths = torch.tensor([len(sentence) for sentence in sequence])
rankedi_word, indexs = word_lengths.sort(descending = True)
ranked_indexs, inverse_indexs = indexs.sort()
#print(indexs)
sequence = [sequence[i] for i in indexs]
return sequence, inverse_indexs
def get_que_embed(model, sample_list, all_relations, batch_size, device,
before_alignment=False):
ret_que_embeds = []
for i in range((len(sample_list)-1)//batch_size+1):
samples = sample_list[i*batch_size:(i+1)*batch_size]
questions = []
for item in samples:
this_question = torch.tensor(item[2], dtype=torch.long).to(device)
questions.append(this_question)
#print(len(questions))
model.init_hidden(device, len(questions))
ranked_questions, alignment_question_indexs = \
ranking_sequence(questions)
question_lengths = [len(question) for question in ranked_questions]
#print(ranked_questions)
pad_questions = torch.nn.utils.rnn.pad_sequence(ranked_questions)
que_embeds = model.compute_que_embed(pad_questions, question_lengths,
alignment_question_indexs, None, before_alignment)
ret_que_embeds.append(que_embeds.detach().cpu().numpy())
return np.concatenate(ret_que_embeds)
# get the embedding of relations. If before_alignment is False, then the
# embedding after the alignment model will be returned. Otherwise, the embedding
# before the alignment model will be returned
def get_rel_embed(model, sample_list, all_relations, alignment_model, batch_size, device,
before_alignment=False):
ret_rel_embeds = []
for i in range((len(sample_list)-1)//batch_size+1):
samples = sample_list[i*batch_size:(i+1)*batch_size]
relations = []
for item in samples:
this_relation = torch.tensor(all_relations[item[0]],
dtype=torch.long).to(device)
relations.append(this_relation)
#print(len(relations))
model.init_hidden(device, len(relations))
ranked_relations, alignment_relation_indexs = \
ranking_sequence(relations)
relation_lengths = [len(relation) for relation in ranked_relations]
#print(ranked_relations)
pad_relations = torch.nn.utils.rnn.pad_sequence(ranked_relations)
rel_embeds = model.compute_rel_embed(pad_relations, relation_lengths,
alignment_relation_indexs,
alignment_model, before_alignment)
ret_rel_embeds.append(rel_embeds.detach().cpu().numpy())
return np.concatenate(ret_rel_embeds)
def select_data(model, samples, num_sel_data, all_relations, batch_size, device):
que_embeds = get_que_embed(model, samples, all_relations, batch_size, device) # sentence embedding,400d
que_embeds = preprocessing.normalize(que_embeds) # sklearn normalize
#print(que_embeds[:5])
num_clusters = min(num_sel_data, len(samples)) # cluster samples into min(num_sel_data, len(samples))clusters, get one for each cluster as memory
distances = KMeans(n_clusters=num_clusters,
random_state=0).fit_transform(que_embeds)
selected_samples = []
for i in range(num_clusters):
sel_index = np.argmin(distances[:,i])
selected_samples.append(samples[sel_index])
return selected_samples
def random_select_data(current_train_data, task_memory_size):
return random.sample(current_train_data, task_memory_size)
# process the data by adding questions
def process_testing_samples(sample_list, all_relations, device):
questions = []
relations = []
gold_relation_indexs = []
relation_set_lengths = []
for sample in sample_list:
question = torch.tensor(sample[2], dtype=torch.long).to(device)
#print(relations[sample[0]])
#print(sample)
gold_relation_indexs.append(sample[0])
neg_relations = [torch.tensor(all_relations[index - 1],
dtype=torch.long).to(device)
for index in sample[1]]
relation_set_lengths.append(len(neg_relations))
relations += neg_relations
#questions += [question for i in range(relation_set_lengths[-1])]
questions += [question] * relation_set_lengths[-1]
return gold_relation_indexs, questions, relations, relation_set_lengths
def process_samples(sample_list, all_relations, device):
questions = []
relations = []
relation_set_lengths = []
for sample in sample_list:
question = torch.tensor(sample[2], dtype=torch.long).to(device)
#print(relations[sample[0]])
#print(sample)
pos_relation = torch.tensor(all_relations[sample[0] - 1],
dtype=torch.long).to(device) # pos tensor
neg_relations = [torch.tensor(all_relations[index - 1],
dtype=torch.long).to(device)
for index in sample[1]] # candidate neg tensor
relation_set_lengths.append(len(neg_relations)+1)
relations += [pos_relation] + neg_relations # merge
#questions += [question for i in range(relation_set_lengths[-1])]
questions += [question] * relation_set_lengths[-1]
return questions, relations, relation_set_lengths
def ranking_sequence(sequence):
word_lengths = torch.tensor([len(sentence) for sentence in sequence])
ranked_word, indexs = word_lengths.sort(descending = True)
ranked_indexs, inverse_indexs = indexs.sort()
#print(indexs)
sequence = [sequence[i] for i in indexs]
return sequence, inverse_indexs
def append_log(file_name, line):
with open(file_name, 'a+') as f:
f.writelines(line + '\n')
f.flush()
|
import Horcner as H
import numpy as np
import matplotlib.pyplot as plt
import math
def diff(y):
n = len(y)
delta = np.zeros((n, n))
delta[:, 0] = y
for i in range(1, n):
for j in range(0, n - i):
delta[j, i] = delta[j+1, i-1] - delta[j, i-1]
return delta
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 16 15:38:32 2018
@author: ppxee
"""
from astropy.io import fits #for handling fits
sem05B = fits.open('SE_outputs_yearstacks/05B_output.fits')
|
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.common.by import By
class WeatherUi:
def __init__(self, logger):
self.logger = logger
self.driver = webdriver.Chrome('C:\\Users\\mahes\\Downloads\\chromedriver')
self.url = 'https://weather.com/'
self.temp = dict()
def get_weather_details(self, cities):
self.logger.info('Starting to get weather details from web url {}'.format(self.url))
try:
self.driver.get(self.url)
self.driver.implicitly_wait(10)
for ct in cities:
WebDriverWait(self.driver, 10).until(ec.element_to_be_clickable((By.XPATH,
"//input[@id='LocationSearch_input']"
))).click()
search_elem = self.driver.find_element_by_id('LocationSearch_input')
search_elem.clear()
search_elem.send_keys(ct)
WebDriverWait(self.driver, 10).until(ec.visibility_of_all_elements_located((By.ID,
"LocationSearch_listbox")))
time.sleep(5)
search_elem.send_keys(Keys.RETURN)
temp_elem = self.driver.find_element_by_xpath('//span[@data-testid="TemperatureValue"]')
temperature = temp_elem.text[:-1]
self.temp[ct] = temperature
self.logger.info(self.temp)
return self.temp
except Exception:
self.logger.error('Failed to access web url')
raise Exception("Failed to access web url")
finally:
self.logger.info('Going to close browser')
self.driver.quit()
|
import re
import nltk
import math
from pickle import load
from random import randint
from keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Embedding
from keras.models import load_model
from keras.callbacks import ModelCheckpoint, EarlyStopping
import matplotlib.pyplot as plt
from pickle import dump
from nltk.corpus import gutenberg
import numpy as np
np.set_printoptions(threshold=np.inf)
def generate_seq(model, tokenizer, max_length, seed_text, n_words):
in_text = seed_text
for _ in range(n_words):
encoded = tokenizer.texts_to_sequences([in_text])[0]
encoded = pad_sequences([encoded], maxlen=max_length, padding='pre')
yhat = model.predict_classes(encoded, verbose=0)
out_word = ''
for word, index in tokenizer.word_index.items():
if index == yhat:
out_word = word
break
in_text += ' ' + out_word
return in_text
model = load_model('weights_word.best.hdf5')
mapping = load(open('mapping_words.pkl', 'rb'))
print(generate_seq(model, mapping, 179, 'she did', 10))
|
#!/usr/bin/python
# Import the required modules
import cv2, os
import numpy as np
from PIL import Image
import Train_Common as tc
import sys, getopt
help_message = '''
USAGE: Train_Recognizer_LBP.py [--path <Path>] [--model-name <ModelName>] [--label-name <LabelName>]
'''
if __name__ == "__main__":
print (help_message)
args, opts = getopt.getopt(sys.argv[1:], '', ['path=', 'model-name=', 'label-name='])
# For face recognition we will the the LBPH Face Recognizer
recognizer = cv2.createLBPHFaceRecognizer()
print "-----------------------------"
args = dict(args)
path = args.get('--path', "./FaceRecData_LBPH_Test")
ModelName = args.get('--model-name', "Model_default.yml")
LabelName = args.get('--label-name', "Label_default.txt")
format = ".png"
print "Gen list of all sample image.....\n"
tc.Gen_List_All_Sample_Image(path, format)
print "Gen list of label.....\n"
tc.Gen_List_Label(path, LabelName)
print "Assign image and label list.....\n"
images, labels = tc.get_images_and_labels_Assign_Format(path, format)
print "Train face reconition model.....\n"
recognizer = tc.TrainModel(recognizer, images, labels, ModelName)
|
'''
Usage:
test longestSubstrLen_3.py by using pytest
'''
import pytest
import os
import sys
# append parent path
sys.path.append(os.path.pardir)
from longestSubstrLen_3 import Solution
from longestSubstrLen_3 import Solution2
test_data = [
("abcabcbb", 3),
('bbbbbb', 1),
('c', 1),
('aab', 2),
('pwwkew', 3)
]
@pytest.mark.parametrize("strings,expected", test_data)
def test_solution(strings, expected):
cs = Solution()
assert cs.lengthOfLongestSubstring(strings) == expected
cs2 = Solution2()
assert cs2.lengthOfLongestSubstring(strings) == expected
|
#!/usr/bin/env python3
import numpy as np
import matplotlib
from matplotlib.collections import LineCollection
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
from mpl_toolkits.mplot3d.art3d import Line3DCollection
def plot(graph, **kwargs):
"""
Plots a 2d or 3d graph given an FEA LatticeGraph object (the output of the fea function)
Arguments:
graph: FEA Lattice Graph to Plot
Named Arguments:
figure: The figure to plot on
cmap: color map (defaults to inferno)
x: specify variable name for X-axis
y: specify variable name for Y-axis
z: specify variable name for Z-axis
c: specify variable name for color-axis
vertices: Default True. Whether or not to plot vertices
edges: Default True. Whether or not to plot edges
Returns:
Tuple of matplotlib figure and plot
Example:
solution = fea(model, [x,y,z])
solfigure, solplot = plot(solution, x='x', y='y', z='z')
solfigure.savefix('solution_figure.svg')
"""
figure=kwargs.get('figure',plt.figure())
cm=kwargs.get('cmap',matplotlib.cm.get_cmap('inferno'))
if kwargs.get('z',None) is not None:
plot=kwargs.get('plot',figure.add_subplot(111,projection='3d'))
plot.set_xlabel(str(kwargs.get('x','')))
plot.set_ylabel(str(kwargs.get('y','')))
plot.set_zlabel(str(kwargs.get('z','')))
dname=[str(kwargs['x']),str(kwargs['y']),str(kwargs['z'])]
else:
plot=kwargs.get('plot',figure.add_subplot(111))
plot.set_xlabel(str(kwargs.get('x','')))
plot.set_ylabel(str(kwargs.get('y','')))
dname=[str(kwargs.get('x','')),str(kwargs.get('y',''))]
dmat=np.zeros([len(dname),graph.N])
vnames=[str(v) for v in graph._variables]
if kwargs.get('c',None) in vnames:
c=None
cmat=np.zeros(graph.N)
cmat[vnames.index(kwargs['c'])]=1
else:
c=kwargs.get('c','k')
cmat=None
for i,n in enumerate(dname):
dmat[i,vnames.index(str(n))]=1
# Plot vertices (if desired)
if kwargs.get('vertices',True):
verts=list(graph.get_vertices(real=kwargs.get('real',True),complete=kwargs.get('complete',True)))
if c is None:
ccurr=[np.dot(cmat,v.point) for v in verts]
else:
ccurr=c
sc=plot.scatter(*np.transpose([np.dot(dmat,v.point) for v in verts]),c=ccurr,cmap=cm)
# Plot Edges (if desired)
if kwargs.get('edges',True):
edges=list(graph.get_nodes_of_level(1,real=kwargs.get('real',True),complete=kwargs.get('complete',True)))
edge_collection=[]
if c is None:
ccurr=[]
else:
ccurr=c
for e in edges:
verts=list(graph.get_vertices(e,real=kwargs.get('real',True),complete=kwargs.get('complete',True)))
if len(verts)==2:
if c is None:
dat_range=np.array([np.dot(dmat,v.point) for v in verts])
c_range=np.array([np.dot(cmat,v.point) for v in verts])
steps=50
points=np.array([np.linspace(r[0],r[1],steps) for r in dat_range.T]).T.reshape(-1,1,len(dname))
segs=np.concatenate([points[:-1],points[1:]], axis=1)
edge_collection.extend(list(segs))
ccurr.extend(np.linspace(c_range[0],c_range[1],steps-1))
else:
edge_collection.append([np.dot(dmat,v.point) for v in verts])
ax = plot.axes
if c is None:
if len(dname)==3:
lc=Line3DCollection(edge_collection, array=np.array(ccurr),cmap=cm)
ax.add_collection3d(lc)
else:
lc=LineCollection(np.array(edge_collection), array=np.array(ccurr),cmap=cm)
ax.add_collection(lc)
else:
if len(dname)==3:
lc=Line3DCollection(np.array(edge_collection))
ax.add_collection3d(lc)
else:
lc=LineCollection(np.array(edge_collection))
ax.add_collection(lc)
return figure,plot
# def fix_clims(f,p,bounds=None,mul=[1,1]):
# f.show()
# if bounds is None:
# print([c.get_clim() for c in p.collections])
# lims=np.array([c.get_clim() for c in p.collections])
# bounds=[min((i for i in lims.T[0] if i is not None)),max((i for i in lims.T[1] if i is not None))]
# bounds=np.multiply(bounds,mul)
# for c in p.collections:
# c.set_clim(*bounds)
# f.colorbar(p.collections[0])
# return bounds
__exports__ = [plot]
|
from django import template
register = template.Library()
@register.simple_tag(takes_context=True)
def set_global_context(context, key, value):
context.dicts[0][key] = value
return ''
|
__author__ = "Narwhale"
import string
# def func(name):
# return name.title()
#
# assert func("lilei") #title
# assert func("hanmeimei")
# assert func("Hanmeimei")
# def func(name,callback=None):
# if callback == None:
# return name.title()
# else:
# return callback(name)
#
# def ff(name):
# return name.lower()
# def f(name):
# return name.upper()
#
#
# print(func("LILEI",callback=ff) == "lilei")
# assert func("lilei") == "Lilei"
# assert func("LILEI",callback=ff) == "lilei"
# assert func("lilei",callback=f)== "LILEI"
def getitem(*kargs):
return kargs
l = getitem(5,3,4,5,6)
print(l)
|
from panda3d.core import Vec3
from .ObjectProperty import ObjectProperty
class TransformProperty(ObjectProperty):
def __init__(self, mapObject):
ObjectProperty.__init__(self, mapObject)
self.valueType = "vec3"
self.defaultValue = Vec3(0)
self.value = self.defaultValue
self.group = "Transform"
class OriginProperty(TransformProperty):
def __init__(self, mapObject):
TransformProperty.__init__(self, mapObject)
self.name = "origin"
def clone(self, mapObject):
prop = OriginProperty(mapObject)
self.copyBase(prop)
return prop
def getDisplayName(self):
return "Origin"
def getDescription(self):
return "Translational origin of the object."
def setValue(self, value):
TransformProperty.setValue(self, value)
self.mapObject.setOrigin(value)
def getValue(self):
return self.mapObject.getOrigin()
class AnglesProperty(TransformProperty):
def __init__(self, mapObject):
TransformProperty.__init__(self, mapObject)
self.name = "angles"
def clone(self, mapObject):
prop = AnglesProperty(mapObject)
self.copyBase(prop)
return prop
def getDisplayName(self):
return "Angles (Yaw Pitch Roll)"
def getDescription(self):
return "Orientation of the object, expressed in yaw/pitch/roll Euler angles."
def setValue(self, value):
TransformProperty.setValue(self, value)
self.mapObject.setAngles(value)
def getValue(self):
return self.mapObject.getAngles()
class ScaleProperty(TransformProperty):
def __init__(self, mapObject):
TransformProperty.__init__(self, mapObject)
self.defaultValue = Vec3(1)
self.name = "scale"
def clone(self, mapObject):
prop = ScaleProperty(mapObject)
self.copyBase(prop)
return prop
def getDisplayName(self):
return "Scale"
def getDescription(self):
return "Scale of the object. 0 is invalid"
def getMinValue(self):
# We can't have a scale of 0
return 0.00001
def testMinValue(self, value):
minVal = self.getMinValue()
return value.x >= minVal and value.y >= minVal and value.z >= minVal
def setValue(self, value):
TransformProperty.setValue(self, value)
self.mapObject.setScale(value)
def getValue(self):
return self.mapObject.getScale()
class ShearProperty(TransformProperty):
def __init__(self, mapObject):
TransformProperty.__init__(self, mapObject)
self.name = "shear"
def clone(self, mapObject):
prop = ShearProperty(mapObject)
self.copyBase(prop)
return prop
def getDisplayName(self):
return "Shear"
def getDescription(self):
return "Shear/skew of the object."
def setValue(self, value):
TransformProperty.setValue(self, value)
self.mapObject.setShear(value)
def getValue(self):
return self.mapObject.getShear()
|
'''
Created on Jan 24, 2016
@author: Andrei Padnevici
@note: This is an exercise: 9.1
'''
file = open("romeo.txt")
wordsDict = dict()
for line in file:
words = line.split()
for word in words:
wordsDict[word] = wordsDict.get(word, 0)
print("window: ","window"in wordsDict)
print("wdsfsindow: ","wdsfsindow"in wordsDict)
print(wordsDict)
|
# Submitter: loganw1(Wang, Logan)
# Defined below is a special exception for use with the Graph class methods
# Use it like any exception: e.g., raise GraphError('Graph.method" ...error indication...')
class GraphError(Exception):
pass # Inherit all methods, including __init__
class Graph:
# HELPER METHODS: used for checking legal arguments to methods below
def legal_tuple2(self, t):
return type(t) is tuple and len(t) == 2 and \
type(t[0]) is str and type(t[1]) is str
def legal_tuple3(self, t):
return type(t) is tuple and len(t) == 3 and \
type(t[0]) is str and type(t[1]) is str and self.is_legal_edge_value(t[2])
# __str__ and many bsc tests use the name self.edges for the outer/inner-dict.
# So __init__ should use self.edges for the name for this dictionary
# self should store NO other attributes: compute all results from self.edges ONLY
# Each value in an edges tuple represents either a
# (a) str : origin node, or
# (b) 3-tuple: (origin node, destination node, edge value)
def __init__(self, legal_edge_value_predicate, *edges):
self.__setattr__.__dict__['initialization_done'] = False
self.is_legal_edge_value = legal_edge_value_predicate
d = dict()
# constructs pieces for all keys in outer dictionary, does not include outlier edges
for edge in edges:
if self.legal_tuple3(edge) or type(edge) == str:
if type(edge) == str:
if edge not in d:
d[edge] = dict()
else:
raise GraphError('outlier edge already has existing dictionary')
else: # assumes if not outlider edge, must be a valid 3 tuple
if edge[0] not in d:
try:
if self.is_legal_edge_value(edge[2]):
d[edge[0]] = dict()
d[edge[0]][edge[1]] = edge[2]
if edge[1] not in d.keys():
d[edge[1]] = dict()
else:
raise GraphError('Graph.__init__: value is not a valid value: ' + str(edge[2]))
except:
raise GraphError('Graph.__init__: value is not a valid value: ' + str(edge[2]))
else:
if edge[1] in d[edge[0]]:
raise GraphError('Start/Destination pair already exists')
else:
try:
if self.is_legal_edge_value(edge[2]):
d[edge[0]][edge[1]] = edge[2]
if edge[1] not in d.keys():
d[edge[1]] = dict()
else:
raise GraphError('Graph.__init__: value is not a valid value: ' + str(edge[2]))
except:
raise GraphError('Graph.__init__: value is not a valid value: ' + str(edge[2]))
else:
raise GraphError('graph.py __init__: invalid edge type or invalid tuple: ' + str(type(edge)))
# lots of complexity but catches any edges involved in tuples that did not act as a starting point
edges_missed = []
for outer_val in d.values():
for inner_val in outer_val.keys():
if inner_val not in d.keys():
edges_missed.append(inner_val)
for i in edges_missed:
d[i] = dict()
self.edges = d
self.__setattr__.__dict__['initialization_done'] = True
# Put all other methods here
def __str__(self):
s = '\nGraph:\n'
for edge, endpoints in sorted([(x, y) for x, y in self.edges.items()]):
s += ' ' + str(edge) + ': '
for key, val in sorted([(u, z) for u, z in endpoints.items()]):
s += str(key) + '(' + str(val) + '), '
if len(endpoints) > 0:
s = s[:-2]
else:
s = s[:-1]
s += '\n'
s = s[:-1]
return s
def __getitem__(self, item):
if type(item) == None:
raise GraphError('Graph.__getitem__: item is not valid type to look up:' + str(type(item)))
if type(item) == str or self.legal_tuple2(item):
if type(item) == str:
if item in self.edges:
return self.edges[item]
else:
raise GraphError('Graph.__getitem__: ' + item + 'cannot be found in self.edges:' + str(self.edges))
else:
if item[0] in self.edges:
if item[1] in self.edges[item[0]]:
return self.edges[item[0]][item[1]]
else:
raise GraphError(
'Graph.__getitem__: ' + str(item[1]) + 'cannot be found in self.edges[' + str(
item[0]) + ']: ' + str(self.edges[item[0]]))
else:
raise GraphError(
'Graph.__getitem__: ' + str(item[0]) + 'cannot be found in self.edges:' + str(self.edges))
else:
raise GraphError('Graph.__getitem__: item is not valid type to look up:' + str(type(item)))
def __setitem__(self, key, value):
if self.legal_tuple2(key):
if key[0] in self.edges.keys():
if key[1] in self.edges.keys():
if self.is_legal_edge_value(value):
self.edges[key[0]][key[1]] = value
else:
raise GraphError('Graph.__setitem__: value is not a valid value: ' + str(value))
else:
if self.is_legal_edge_value(value):
self.edges[key[0]][key[1]] = value
self.edges[key[1]] = dict() # updating edges to include new point
else:
raise GraphError('Graph.__setitem__: value is not a valid value: ' + str(value))
else:
if self.is_legal_edge_value(value):
self.edges[key[0]] = dict()
self.edges[key[0]][key[1]] = value
if key[1] not in self.edges.keys():
self.edges[key[1]] = dict()
else:
raise GraphError('Graph.__setitem__: value is not a valid value: ' + str(value))
else:
raise GraphError('Graph.__setitem__: key is not a valid 2 tuple: ' + str(key))
def node_count(self):
return len(self.edges)
def __len__(self):
sum = 0
for edge in self.edges.values():
sum += len(edge)
return sum
def out_degree(self, node):
if type(node) == None:
raise GraphError('Graph.out_degree(): argument is of type None')
if type(node) == str and node in self.edges.keys():
return len(self.edges[node])
else:
raise GraphError('Graph.out_degree(): argument(' + str(
node) + ') is either not of type String or cannot be found in self.edges: ' + str(self.edges))
def in_degree(self, node):
if type(node) == None:
raise GraphError('Graph.in_degree(): argument is of type None')
if type(node) == str and node in self.edges.keys():
sum = 0
for edge in self.edges.values():
if node in edge:
sum += 1
return sum
else:
raise GraphError('Graph.in_degree(): argument(' + str(
node) + ') is either not of type String or cannot be found in self.edges: ' + str(self.edges))
def __contains__(self, item):
if type(item) not in (str, tuple):
raise GraphError(
'Graph.__contains__(): item(' + str(item) + ') is of invalid type, must be String or Tuple')
if type(item) == str:
if item in self.edges.keys():
return True
else:
return False
elif self.legal_tuple2(item):
if item[0] in self.edges.keys():
if item[1] in self.edges[item[0]].keys():
return True
else:
return False
else:
return False
elif self.legal_tuple3(item):
if item[0] in self.edges.keys():
if item[1] in self.edges[item[0]].keys():
if item[2] == self.edges[item[0]][item[1]]:
return True
else:
return False
else:
return False
else:
return False
else:
raise GraphError(
'Graph.__contains__(): item(' + str(item) + ') is of invalid type, must be String or Tuple')
def __delitem__(self, item):
if type(item) not in (str, tuple):
raise GraphError(
'Graph.__contains__(): item(' + str(item) + ') is of invalid type, must be String or Tuple')
if type(item) == str:
if item in self.edges:
del (self.edges[item]) # delets outer node
for node, edges in self.edges.items():
d = dict()
for dest, val in edges.items():
if item != dest:
d[dest] = val
self.edges[node] = d
elif self.legal_tuple2(item):
if item[0] in self.edges.keys():
if item[1] in self.edges[item[0]].keys():
del (self.edges[item[0]][item[1]])
else:
raise GraphError(
'Graph.__contains__(): item(' + str(item) + ') is of invalid type, must be String or Tuple')
def __call__(self, d):
if type(d) != str:
raise GraphError(
'Graph.__call__(): d(' + str(d) + ') is of invalid type, must be String')
if d in self.edges.keys():
temp_d = dict()
for node, value in self.edges.items():
if d in value.keys():
temp_d[node] = value[d]
return temp_d
else:
raise GraphError(
'Graph.__call__(): d(' + str(d) + ') is not a node in self.edges: ' + str(self.edges))
def clear(self):
l = [str(x) for x in self.edges.keys()]
for i in l:
del (self.edges[i])
# self.edges = dict()
def dump(self, open_file, sep=':', fnctn=str):
l = []
for edge, endpoints in sorted([(x, y) for x, y in self.edges.items()]):
s = fnctn(edge) + str(sep)
for key, val in sorted([(u, z) for u, z in endpoints.items()]):
s += fnctn(key) + str(sep) + fnctn(val) + str(sep)
s = s[:-1] + '\n'
l.append(s)
open_file.writelines(l)
open_file.close()
def load(self, open_file, sep=':', fnctn=int):
l = []
d = dict()
for text in open_file:
point_edge_split_text = text.strip().split(sep, 1)
if len(point_edge_split_text) > 1:
point, edges = point_edge_split_text
l.append((point, edges))
d[point] = dict()
else:
d[text.strip()] = dict()
for point, edges in l:
i = 0
s = edges.split(sep)
for p in range(0, len(s), 2):
d[point][s[i]] = fnctn(s[i + 1])
i += 2
self.clear()
for point, edges in d.items():
self.edges[point] = edges
open_file.close()
def reverse(self):
reverse_g = Graph(self.is_legal_edge_value)
for point, values in self.edges.items():
if len(values.items()) == 0:
if point not in reverse_g.edges.keys():
reverse_g.edges[point] = dict()
for dest, value in values.items():
reverse_g.__setitem__((dest, point), value)
return reverse_g
def natural_subgraph(self, *nodes):
g = Graph(self.is_legal_edge_value)
l=[]
for node in nodes:
if type(node) != str:
raise GraphError('Graph.natural_subgraph(): argument: ' + str(node) + ' is not of type String')
for key in nodes:
if key in self.edges.keys():
g.edges[key] = dict()
for point,edges in self.edges.items():
if point in nodes:
for edge,value in edges.items():
if edge in nodes:
g[point][edge] = value
return g
def __iter__(self):
def gen(l):
for item in l:
yield item
l = []
for edge, endpoints in sorted([(x, y) for x, y in self.edges.items()]):
if len(endpoints.items()) == 0:
r = False
for p, v in self.edges.items():
if edge in v.keys():
r = True
if r == False:
l.append(edge)
for key, val in sorted([(u, z) for u, z in endpoints.items()]):
l.append((edge, key, val))
return gen(l)
def __eq__(self, compareObj: 'Graph') -> bool:
if type(compareObj) == Graph:
if len(self.edges.keys()) == len(compareObj.edges.keys()):
for point, edges in self.edges.items():
if edges != compareObj.edges[point]:
return False
return True
else:
return False
else:
return False
def __le__(self, compareObj: 'Graph') -> bool:
if type(compareObj) == Graph:
for point, edges in self.edges.items():
for dest, val in edges.items():
if val != compareObj.edges[point][dest]:
return False
if len(edges.items()) == 0:
if point not in compareObj.edges.keys():
return False
return True
else:
return False # maybe raise Graph exception instead
def __setattr__(self, name, value):
if self.__setattr__.initialization_done:
raise AssertionError('Graph.__setattr__: Cannot assign or rebind attributes after __init__')
else:
self.__dict__[name] = value
if name == 'edges':
self.__setattr__.__dict__['initialization_done'] = True
def make_graph_copy(self):
d = dict()
for point,edges in self.edges.items():
d[point] = dict()
for edge,value in edges.items():
d[point][edge]=value
g = Graph(self.is_legal_edge_value)
for point, edges in d.items():
g.edges[point] = edges
return g #returns new instance of exact replica of current Graph
def __add__(self, other):
if type(other) not in (Graph,str,tuple):
raise GraphError('Graph.__add__: item being added not of type Graph,str, or tuple')
if type(other) == Graph:
g1 = self.make_graph_copy()
g2 = other.make_graph_copy()
for point,edges in g1.edges.items():
if point not in g2.edges.keys():
g2.edges[point] = dict()
for edge,value in edges.items():
g2.edges[point][edge] = value
return g2
elif type(other) == str:
g = self.make_graph_copy()
if other not in self.edges.keys():
g.edges[other] = dict()
return g
elif self.legal_tuple3(other):
g = self.make_graph_copy()
if other[0] in self.edges.keys():
if self.is_legal_edge_value(other[2]):
g.edges[other[0]][other[1]] = other[2]
if other[1] not in g.edges.keys():
g.edges[other[1]] = dict()
return g
else:
raise GraphError('Graph.__add__: value being added/updated: ' + str(other[2]) +' is not valid')
else:
g.edges[other[0]] = dict()
if self.is_legal_edge_value(other[2]):
g.edges[other[0]][other[1]] = other[2]
if other[1] not in g.edges.keys():
g.edges[other[1]] = dict()
return g
else:
raise GraphError('Graph.__add__: value being added/updated: ' + str(other[2]) + ' is not valid')
else:
raise GraphError('Graph.__add__: item being added not of type Graph,str, or legal 3 tuple')
def __radd__(self, other):
if type(other) not in (str,tuple):
raise GraphError('Graph.__add__: item being added not of type Graph,str, or tuple')
if type(other) == str:
g = self.make_graph_copy()
if other not in self.edges.keys():
g.edges[other] = dict()
return g
elif self.legal_tuple3(other):
g = self.make_graph_copy()
if other[0] in self.edges.keys():
if self.is_legal_edge_value(other[2]):
g.edges[other[0]][other[1]] = other[2]
if other[1] not in g.edges.keys():
g.edges[other[1]] = dict()
return g
else:
raise GraphError('Graph.__add__: value being added/updated: ' + str(other[2]) +' is not valid')
else:
g.edges[other[0]] = dict()
if self.is_legal_edge_value(other[2]):
g.edges[other[0]][other[1]] = other[2]
if other[1] not in g.edges.keys():
g.edges[other[1]] = dict()
return g
else:
raise GraphError('Graph.__add__: value being added/updated: ' + str(other[2]) + ' is not valid')
else:
raise GraphError('Graph.__add__: item being added not of type Graph,str, or legal 3 tuple')
def __iadd__(self, other):
if type(other) not in (Graph,str,tuple):
raise GraphError('Graph.__add__: item being added not of type Graph,str, or tuple')
if type(other) == Graph:
g1 = self.make_graph_copy()
g2 = other.make_graph_copy()
for point,edges in g1.edges.items():
if point not in g2.edges.keys():
g2.edges[point] = dict()
for edge,value in edges.items():
g2.edges[point][edge] = value
self.clear()
for point, edges in g2.edges.items():
self.edges[point] = edges
return self
elif type(other) == str:
if other not in self.edges.keys():
self.edges[other] = dict()
return self
elif self.legal_tuple3(other):
if other[0] in self.edges.keys():
if self.is_legal_edge_value(other[2]):
self.edges[other[0]][other[1]] = other[2]
if other[1] not in self.edges.keys():
self.edges[other[1]] = dict()
return self
else:
raise GraphError('Graph.__add__: value being added/updated: ' + str(other[2]) +' is not valid')
else:
self.edges[other[0]] = dict()
if self.is_legal_edge_value(other[2]):
self.edges[other[0]][other[1]] = other[2]
if other[1] not in self.edges.keys():
self.edges[other[1]] = dict()
return self
else:
raise GraphError('Graph.__add__: value being added/updated: ' + str(other[2]) + ' is not valid')
else:
raise GraphError('Graph.__add__: item being added not of type Graph,str, or legal 3 tuple')
if __name__ == '__main__':
# Simple tests before running driver
# Put your own test code here to test DictList before doing bsc tests
print('Start simple testing')
g = Graph((lambda x: type(x) is int), ('a', 'b', 1), ('a', 'c', 3), ('b', 'a', 2), ('d', 'b', 2), ('d', 'c', 1),
'e')
print(g)
print(g['a'])
print(g['a', 'b'])
print(g.node_count())
print(len(g))
print(g.out_degree('c'))
print(g.in_degree('a'))
print('c' in g)
print(('a', 'b') in g)
print(('a', 'b', 1) in g)
print(g('c'))
print(g.reverse())
print(g.natural_subgraph('a','b','c'))
print()
import driver
driver.default_file_name = 'bscp22W21.txt'
# driver.default_show_exception = True
# driver.default_show_exception_message = True
# driver.default_show_traceback = True
driver.driver()
|
from .midi_csv import midicsv, csvmidi
|
from django.apps import AppConfig
class GamerConfig(AppConfig):
name = 'Gamer'
|
import graphene
from copy import deepcopy
from graphene import relay
from graphene_django.filter import DjangoFilterConnectionField
from graphene_django.types import DjangoObjectType
from graphql import GraphQLError
from graphql_relay import from_global_id
from django.db import IntegrityError
from api.models import Interest, Category, AndelaUserProfile
class InterestNode(DjangoObjectType):
class Meta:
model = Interest
filter_fields = {}
interfaces = (relay.Node,)
class JoinCategory(relay.ClientIDMutation):
"""Join a category"""
class Input:
categories = graphene.List(graphene.ID)
joined_category_list = graphene.List(InterestNode)
@classmethod
def mutate_and_get_payload(cls, root, info, **input):
"""
create bulk category and add category for user
Args:
root(dict): root query field data
info(dict): authentication and user information
input(dict): the request input sent by the user
Returns:
return bulk category created
"""
category_id_list = [category for category in input.pop('categories')]
user = AndelaUserProfile.objects.get(user=info.context.user)
user_category_list = [Category.objects.get(pk=from_global_id(category_id)[1])
for category_id in category_id_list]
try:
joined_category_list = []
for user_category in user_category_list:
joined_category = Interest(follower=user, follower_category=user_category)
joined_category_list.append(joined_category)
Interest.objects.bulk_create(joined_category_list)
except IntegrityError:
raise GraphQLError(
'You have previously added an interest. Please try again'
)
return JoinCategory(joined_category_list=joined_category_list)
class UnJoinCategory(relay.ClientIDMutation):
"""Unsubscribe from a category"""
class Input:
categories = graphene.List(graphene.ID)
unjoined_categories = graphene.List(InterestNode)
@classmethod
def mutate_and_get_payload(cls, root, info, **input):
"""
remove user category/intrest
Args:
root(dict): root query field data
info(dict): authentication and user information
input(dict): the request input sent by the user
Returns: UnJoinCategory method
"""
categories = input.get('categories')
user = AndelaUserProfile.objects.get(user=info.context.user)
categories = list(map(lambda category_id: from_global_id(category_id)[1], categories))
unjoined_categories_qs = Interest.objects.filter(
follower_category_id__in=categories,
follower_id=user.id
)
unjoined_categories = deepcopy(unjoined_categories_qs)
if not unjoined_categories:
raise GraphQLError(
"Oops. We were not able to find some of the interests you are trying to remove")
unjoined_categories_qs.delete()
return UnJoinCategory(unjoined_categories=unjoined_categories)
class InterestQuery(object):
"""
Handle interest queries
"""
interest = relay.Node.Field(InterestNode)
interests_list = DjangoFilterConnectionField(InterestNode)
joined_categories = graphene.List(InterestNode)
def resolve_joined_categories(self, info):
"""
resolve user interest/categories
Args:
info(dict): authentication and user information
root(dict): root query field data
input(dict): the request input sent by the user
Returns: return user interests
"""
andela_user_profile = AndelaUserProfile.objects.get(user_id=user.id)
user = info.context.user
return Interest.objects.filter(
follower_id=andela_user_profile.id).all()
class InterestMutation(graphene.ObjectType):
""" Handles user mutation"""
join_category = JoinCategory.Field()
unjoin_category = UnJoinCategory.Field()
|
import random
print("WELCOME TO THE GAME OF ROCK , PAPER AND SCISSORS..............")
print("THE RULES ARE AS FOLLOWS::")
print("ROCK BLUNTS SCISSORS,PAPER COVERS ROCKS,SCISSORS CUTS PAPER")
print("WOULD YOU LIKE TO PLAY rock , paper , scissor?(y/n)")
reply=input()
while(reply=="y"):
player_score = 0
computer_score = 0
print("HOW MANY TIMES YOU WANT TO PLAY")
n = int(input())
for i in range (0,n):
player_choice = input("ENTER YOUR CHOICE OF rock,paper or scissor ")
print ("YOUR CHOICE = ", player_choice)
computer_choice = random.randint(1,3)
if (computer_choice == 1):
computer = "rock"
elif (computer_choice == 2):
computer = "paper"
elif(computer_choice == 3):
computer = "scissors"
print("COMPUTER CHOICE = ",computer)
if(player_choice == computer):
print("tie")
print("PLAYER SCORE = ",player_score)
print("COMPUTER SCORE = ",computer_score)
elif(player_choice == "rock"):
if (computer == "paper"):
print("COMPUTER WINS")
computer_score +=1
else:
print("YOU WIN")
player_score +=1
print("PLAYER SCORE = ",player_score)
print("COMPUTER SCORE = ",computer_score)
elif(player_choice == "paper"):
if (computer == "rock"):
print("YOU WIN")
player_score +=1
else:
print("COMPUTER WINS")
computer_score +=1
print("PLAYER SCORE = ",player_score)
print("COMPUTER SCORE = ",computer_score)
elif(player_choice == "scissor"):
if (computer == "rock"):
print("COMPUTER WINS")
computer_score +=1
else:
print("YOU WIN")
player_score +=1
print("PLAYER SCORE = ",player_score)
print("COMPUTER SCORE = ",computer_score)
if (player_score>computer_score):
print(" ######################## FINALLY YOU WON ##################### ")
elif(player_score==computer_score):
print(" ###################### THE MATCH IS FINALLY TIE ##################### ")
else:
print(" ###################### THE COMPUTER WON ####################### ")
print("WOULD YOU LIKE TO PLAY rock, paper, scissors? y/n")
reply=input()
if(reply=="n"):
print("****************************** THANKS FOR PLAYING *****************************")
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 3 04:58:22 2019
@author: kanchana
"""
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score,confusion_matrix, classification_report
import warnings
warnings.filterwarnings("ignore")
dataR2 = pd.read_csv('dataR2.csv')
X = dataR2.iloc[:,0:-1]
y = dataR2.iloc[:,-1]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=123)
from sklearn.neighbors import KNeighborsClassifier
classifier = KNeighborsClassifier(n_neighbors = 5, metric = 'minkowski', p = 2,weights='distance')
classifier.fit(X_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
print("Confusion Matrix")
print(confusion_matrix(y_test, y_pred))
print("Accuracy score: %f" %(accuracy_score(y_test, y_pred)))
print('-------------------------------------------------------')
target_names = ['Controls', 'Patients']
print(classification_report(y_test, y_pred, target_names=target_names))
|
# Dependencies
import json
import requests as req
# Save config information
api_key = "25bc90a1196e6f153eece0bc0b0fc9eb"
url = "http://api.openweathermap.org/data/2.5/weather?"
city = "London"
# Build query URL
query_url = url + "appid=" + api_key + "&q=" + city
# Get weather data
weather_response = req.get(query_url)
weather_json = weather_response.json()
# Get the temperature from the response
print("The weather API responded with: " + str(weather_json) + ".")
|
import os
import urllib2
import cookielib
import re
import htmlentitydefs
import codecs
import time
from BeautifulSoup import BeautifulSoup
print 'a'
URL_REQUEST_DELAY = 1
BASE = 'http://www.nytimes.com'
TXDATA = None
TXHEADERS = {'User-agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'}
OUTPUT_FILE = 'nyt_articles.txt'
def request_url(url, txdata, txheaders):
"""Gets a webpage's HTML."""
req = Request(url, txdata, txheaders)
handle = urlopen(req)
html = handle.read()
return html
def remove_html_tags(data):
"""Removes HTML tags"""
p = re.compile(r'< .*?>')
return p.sub('', data)
def unescape(text):
"""
Converts HTML character codes to Unicode code points.
@param text the HTML (or XML) source text in any encoding.
@return The plain text, as a Unicode string, if necessary.
"""
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
try:
if text[:3] == "&#x":
return unichr(int(text[3:-1], 16))
else:
return unichr(int(text[2:-1]))
except ValueError:
pass
else:
try:
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
except KeyError:
pass
return text
return re.sub("&#?\w+;", fixup, text)
urlopen = urllib2.urlopen
Request = urllib2.Request
# Install cookie jar in opener for fetching URL
cookiejar = cookielib.LWPCookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookiejar))
urllib2.install_opener(opener)
html = request_url('http://global.nytimes.com/', TXDATA, TXHEADERS)
# Use BeautifulSoup to easily navigate HTML tree
soup = BeautifulSoup(html)
# Retrieves html from each url on NYT Global homepage under "story" divs
# with h2, h3, or h5 headlines
urls = []
for story in soup.findAll('article', {'class': 'story'}):
# print story
for hTag in story.findAll({'h2': True, 'h3': True, 'h5': True}, recursive=False):
if hTag.find('a'):
urls.append(hTag.find('a')['href'])
# print "urls: " + str(urls)
# Removes URLs that aren't news articles.
# Create a copy of list b/c
# you can't modify a list while iterating over it.
for url in urls[:]:
if not url.startswith(BASE):
urls.remove(url)
# print "urls: " + str(urls)
# Extracts headline, byline, dateline and content; outputs to file
if os.path.exists(OUTPUT_FILE):
os.remove(OUTPUT_FILE)
output = codecs.open(OUTPUT_FILE, 'a', 'utf-8')
counter = 0
for url in urls:
print url
content = ''
html = request_url(url, TXDATA, TXHEADERS)
html = unicode(html, 'utf-8')
if (counter == 0):
output.write(html)
counter += 1
soup = BeautifulSoup(html)
# Gets HTML from single page link if article is > 1 page
# if soup.find('li', {'class': 'singlePage'}):
# single = soup.find('li', {'class': 'singlePage'})
# html = request_url(BASE + single.find('a')['href'], TXDATA, TXHEADERS)
# html = unicode(html, 'utf-8')
# soup = BeautifulSoup(html)
# # print html
if not soup.find('header'):
continue
headline = soup.find({'class': 'story-header'}).renderContents()
print headline
output.write(unicode(headline + "\n", 'utf-8'))
byline = soup.find('byline').find('h6').renderContents()
byline = remove_html_tags(byline)
print byline
output.write(unicode(byline + "\n", 'utf-8'))
dateline = soup.find('h6', {'class': 'dateline'}).renderContents()
output.write(unicode(dateline, 'utf-8'))
for p in soup.findAll('p', {'class': None, 'style': None}):
# Removes potential ad at the bottom of the page.
if p.findParents('div', {'class': 'singleAd'}):
continue
# Prevents contents of nested <p> tags from being printed twice.
if p.findParents('div', {'class': 'authorIdentification'}):
continue
content = content + "\n\n" + p.renderContents().strip()
content = remove_html_tags(content)
content = re.sub(" +", " ", content)
content = unescape(unicode(content, 'utf-8'))
content = content + "\n\n\n\n"
output.write(content)
time.sleep(URL_REQUEST_DELAY)
output.close()
|
import unittest
from katas.kyu_8.find_maximum_and_minimum_values_of_a_list import min, max
class MinMaxOfListTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(min([-52, 56, 30, 29, -54, 0, -110]), -110)
def test_equal_2(self):
self.assertEqual(min([42, 54, 65, 87, 0]), 0)
def test_equal_3(self):
self.assertEqual(min([1, 2, 3, 4, 5, 10]), 1)
def test_equal_4(self):
self.assertEqual(min([-1, -2, -3, -4, -5, -10]), -10)
def test_equal_5(self):
self.assertEqual(min([9]), 9)
def test_equal_6(self):
self.assertEqual(max([-52, 56, 30, 29, -54, 0, -110]), 56)
def test_equal_7(self):
self.assertEqual(max([4, 6, 2, 1, 9, 63, -134, 566]), 566)
def test_equal_8(self):
self.assertEqual(max([5]), 5)
def test_equal_9(self):
self.assertEqual(
max([534, 43, 2, 1, 3, 4, 5, 5, 443, 443, 555, 555]), 555
)
def test_equal_10(self):
self.assertEqual(max([9]), 9)
|
from django.shortcuts import render #render:渲染
from django.http import HttpResponse
from . import models
# Create your views here.
def index(request):
articles = models.Article.objects.all()
return render(request,'blog/index.html',{'articles':articles}) #HttpResponse('hello world!')
#{'hello':'hello wonderful world! guess why是噢简单'}#可以作为第三个参数
def article_page(request,article_id):
article = models.Article.objects.get(pk=article_id)
return render(request,'blog/article_page.html',{'article':article})
def edit_page(request,article_id):
if str(article_id)=='0':
return render(request,'blog/edit_page.html')
article = models.Article.objects.get(pk=article_id)
return render(request,'blog/edit_page.html',{'article':article})
def edit_action(request):
title = request.POST.get('title','TITLE')
content = request.POST.get('content','CONTENT')
article_id = request.POST.get('article_id','0')
if article_id == '0':
models.Article.objects.create(title=title, content=content)
articles = models.Article.objects.all()
return render(request,'blog/index.html',{'articles':articles})
article = models.Article.objects.get(pk=article_id)
article.title = title
article.content=content
article.save()
return render(request,'blog/article_page.html',{'article':article})
|
def calPrize(mydice):
sorted_dice = sorted(mydice)
count = len(set(sorted_dice))
if count == 1:
return 50000 + sorted_dice[0] * 5000
elif count == 2:
if sorted_dice[1] == sorted_dice[2]:
return 10000 + sorted_dice[1] * 1000
return 2000 + sorted_dice[1] * 500 + sorted_dice[2]*500
elif count == 3:
if sorted_dice[0] == sorted_dice[1] or sorted_dice[1] == sorted_dice[2]:
return 1000 + sorted_dice[1] * 100
elif sorted_dice[2] == sorted_dice[3]:
return 1000 + sorted_dice[2] * 100
else:
return max(sorted_dice) * 100
N = int(input())
prizes = []
for n in range(N):
prize = calPrize([int(x) for x in input().split()])
prizes.append(prize)
print(max(prizes))
|
import pygame as pg
from asset import FLOOR, get_sprite, get_player_sprites, OHNOES1, OHNOES2, get_light_halo
from config import SCREEN_HEIGHT, SCREEN_WIDTH, TILE_WIDTH, TILE_HEIGHT, PLAYER_WIDTH, PLAYER_HEIGHT
from events import schedule_event
from screen import Screen
class DefeatScreen(Screen):
def __init__(self):
self.step = 0
schedule_event(lambda: self.set_step(1), 8, oneshot=True)
schedule_event(lambda: self.set_step(2), 13, oneshot=True)
schedule_event(lambda: self.set_step(3), 21, oneshot=True)
self.light = 128
def set_step(self, step):
if step == 3:
schedule_event(self.decrease_light, 1, oneshot=False)
else:
self.step = step
def decrease_light(self):
if self.light > 8:
self.light -= 8
def draw(self, screen: pg.Surface, clock: pg.time.Clock):
cx, cy = SCREEN_WIDTH // 2, SCREEN_HEIGHT // 2
for i in range(-2, 3):
for j in range(-2, 3):
screen.blit(get_sprite(FLOOR), (cx + i*TILE_WIDTH, cy + j * TILE_HEIGHT))
if self.step == 1:
screen.blit(get_sprite(OHNOES1), (cx - TILE_WIDTH // 2, cy - TILE_HEIGHT // 2))
if self.step == 2:
screen.blit(get_sprite(OHNOES2), (cx - TILE_WIDTH // 2, cy - TILE_HEIGHT // 2))
screen.blit(get_player_sprites()[3][0], (cx, cy))
light_mask = pg.Surface((screen.get_width(), screen.get_height()), flags=pg.SRCALPHA)
light_mask.fill(pg.Color(0, 0, 0))
halo = get_light_halo(self.light)
light_mask.blit(halo, (cx - self.light + PLAYER_WIDTH // 2,
cy - self.light + PLAYER_HEIGHT // 2),
special_flags=pg.BLEND_RGBA_MIN)
screen.blit(light_mask, (0, 0))
if self.light <= 8:
from title_screen.title_screen import TitleScreen
return TitleScreen
|
from turtle import *
color('red')
for i in range(4):
if i == 2:
left(120)
else:
left(60)
fd(100)
left(60)
for i in range (4):
if i == 0:
left(30)
elif i == 2:
right(120)
else:
right(60)
fd(100)
left(30)
for i in range (4):
if i == 0:
right(30)
elif i == 2:
left(120)
else:
left(60)
fd(100)
right(120)
for i in range (4):
if i == 0:
right(30)
elif i == 2:
left(120)
else:
left(60)
fd(100)
mainloop()
|
import win32com
import win32com.client
import pythoncom
class XASessionEvents:
logInState = 0
def OnLogin(self, code, msg):
print("OnLogin method is called")
if str(code) == '0000':
XASessionEvents.logInState = 1
def OnLogout(self):
print("OnLogout method is called")
def OnDisconnect(self):
print("OnDisconnect method is called")
class XAQueryEvents:
queryState = 0
def OnReceiveData(self,szTrCode):
print("ReceiveData")
XAQueryEvents.queryState = 1
def OnReceiveMessage(self, systemError, messageCode, message):
print("ReceiveMessage")
if __name__ == "__main__":
server_addr = "hts.ebestsec.co.kr"
server_port = 20001
server_type = 0
user_id = "songdh10"
user_pass ="gusdl57"
user_certificate_pass="gusdlsla57"
inXASession = win32com.client.DispatchWithEvents("XA_Session.XASession", XASessionEvents)
inXASession.ConnectServer(server_addr, server_port)
inXASession.Login(user_id, user_pass, user_certificate_pass, server_type, 0)
while XASessionEvents.logInState == 0:
pythoncom.PumpWaitingMessages()
class XARealEvents:
def OnReceiveRealData(self, *args):
hotime = XAReal.GetFieldData('OutBlock', 'hotime')
offerho1 = XAReal.GetFieldData('OutBlock', 'offerho1')
bidho1 = XAReal.GetFieldData('OutBlock', 'bidho1')
print ("%s %s %s" % (hotime, offerho1, bidho1))
XAReal = win32com.client.DispatchWithEvents("XA_DataSet.XAReal", XARealEvents)
XAReal.LoadFromResFile("C:\\eBEST\\xingAPI\\Res\\" + "H1_.res")
XAReal.SetFieldData('InBlock', 'shcode', '000270')
XAReal.AdviseRealData()
XARealEvents().OnReceiveRealData()
while XAQueryEvents.queryState == 0:
pythoncom.PumpWaitingMessages()
|
from django.shortcuts import render
# Create your views here.
def upload(request):
return render(request,"upload/upload.html")
|
# -*- coding: utf-8 -*-
import logging
import markdown
import base64
from django.conf import settings
from django.shortcuts import resolve_url
from Crypto.Cipher import AES
from docutils.core import publish_parts
from custom.cryptographer import Wrapper
try:
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
CRYPT_COOKIE_SID = '_xxx_session_id'
CRYPT_SIGN_SALT = 'custom_crypt_cookies'
CRYPT_KEY = settings.SECRET_KEY[:16]
CRYPT_IV = settings.SECRET_KEY[-16:]
CRYPT_MODE = AES.MODE_CBC
logger = logging.getLogger(__name__)
_crypto_wrapper = Wrapper(AES)
def md2html(md):
"""解析markdown格式文本
Args:
md: raw markdown text
"""
markdown_processor = markdown.Markdown(extensions=['codehilite']) # Setup Markdown with the code highlighter
html = markdown_processor.convert(md) # Convert the md (markdown) into html
# to mimic the same output as Docutils, I'm wrapping the output in a classed div.
# html = '<div class="document">'+html+'</div>'
return html
def rst2html(str):
"""rst2html(str)
By default, docutils tries to read configuration files from various locations.
But this will fail in the App Engine environment.
So you have to disable it by overwriting the default settings.
changed to return 'html_body' instead of 'fragment'; 'fragment' doesn't
include beginning headings unless there is a paragraph before it.
Args:
str: string - raw text
"""
import docutils.parsers.rst.directives.sourcecode
parts = publish_parts(source=str, writer_name='html4css1',
settings_overrides={'_disable_config': True})
return "\n".join([parts['body_pre_docinfo'], parts['body']])
def encrypt_str(plain):
_crypto_wrapper.new(CRYPT_KEY, CRYPT_MODE, CRYPT_IV)
return _crypto_wrapper.encrypt(plain)
def decrypt_str(ciph):
_crypto_wrapper.new(CRYPT_KEY, CRYPT_MODE, CRYPT_IV)
return _crypto_wrapper.decrypt(ciph)
def set_user_cookie(response, vars, *args, **kwargs):
data_string = encrypt_str(pickle.dumps(vars))
response.set_cookie(CRYPT_COOKIE_SID,
base64.urlsafe_b64encode(data_string),
**kwargs)
return response
def unset_user_cookie(response, *args, **kwargs):
response.delete_cookie(CRYPT_COOKIE_SID, *args, **kwargs)
return response
def get_user_cookie(request):
cookie = request.COOKIES.get(CRYPT_COOKIE_SID, None)
if cookie is None:
return cookie
data_string = base64.urlsafe_b64decode(cookie)
return pickle.loads(decrypt_str(data_string))
def render_to_alter(request, msg, level=None, title=None, next_url=None):
"""输出警示窗口
Args:
msg: string, 警告信息
leve: string, 层级(error, info)
title: stirng, 标题
Returns:
object, HttpResponse
"""
if isinstance(msg, (dict, )):
vars = msg
else:
if level is None:
level = 'info'
if 'error' == level:
level = 'danger'
if title is None:
title = u'提示'
vars = locals()
from django.shortcuts import render
if not vars.get('next_url'):
vars['next_url'] = resolve_url('base.views.home')
return render(request, 'admin/alert.html', vars)
def signin_required(handler_method):
"""A decorator to require that a user be logged in to access a handler.
To use it, decorate your get() method like this:
@signin_required
def get():
...
We will redirect to a login page if the user is not logged in.
"""
def check_login(request, *args, **kwargs):
assert hasattr(request, 'account')
if request.account.get('is_login'):
return handler_method(request, *args, **kwargs)
else:
path = request.get_full_path()
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(path, 'admin.views.signin', 'next')
return check_login
|
# Generated by Django 2.2.24 on 2021-10-31 20:54
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('estudiantes', '0003_auto_20211031_1446'),
]
operations = [
migrations.RenameModel(
old_name='Clases',
new_name='Clase',
),
]
|
import RPi.GPIO as GPIO
import time
import sys
import socket
GPIO.setmode(GPIO.BCM)
GPIO_PIN = 24
GPIO.setup(GPIO_PIN,GPIO.IN,pull_up_down = GPIO.PUD_UP)
delay_time = 1.0
Host = "127.0.0.5"
Port = 5000
sensor_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
sensor_socket.connect((Host,Port))
print("Sensor de Linea KY-033 [Presione CTR + C para finalizar]")
try:
while True:
raw_data = GPIO.input(GPIO_PIN)
data = str(raw_data) + '\n'
sensor_socket.send(data)
time.sleep(delay_time)
except KeyboardInterrupt:
GPIO.cleanup()
|
import os
from easydict import EasyDict as edict
cfg2 = edict()
cfg2.PATH = edict()
cfg2.PATH.DATA = ['/home/liuhaiyang/dataset/CUB_200_2011/images.txt',
'/home/liuhaiyang/dataset/CUB_200_2011/train_test_split.txt',
'/home/liuhaiyang/dataset/CUB_200_2011/images/']
cfg2.PATH.LABEL = '/home/liuhaiyang/dataset/CUB_200_2011/image_class_labels.txt'
cfg2.PATH.EVAL = ['/home/liuhaiyang/dataset/CUB_200_2011/images.txt',
'/home/liuhaiyang/dataset/CUB_200_2011/train_test_split.txt',
'/home/liuhaiyang/dataset/CUB_200_2011/images/']
cfg2.PATH.TEST = '/home/liuhaiyang/liu_kaggle/cifar/dataset/cifar-10-batches-py/data_batch_1'
cfg2.PATH.RES_TEST = './res_imgs/'
cfg2.PATH.EXPS = './exps/'
cfg2.PATH.NAME = 'rest_cub_v3_stone'
cfg2.PATH.MODEL = '/model.pth'
cfg2.PATH.BESTMODEL = '/bestmodel.pth'
cfg2.PATH.LOG = '/log.txt'
cfg2.PATH.RESULTS = '/results/'
cfg2.DETERMINISTIC = edict()
cfg2.DETERMINISTIC.SEED = 60
cfg2.DETERMINISTIC.CUDNN = True
cfg2.TRAIN = edict()
cfg2.TRAIN.EPOCHS = 60
cfg2.TRAIN.BATCHSIZE = 8
cfg2.TRAIN.L1SCALING = 100
cfg2.TRAIN.TYPE = 'sgd'
cfg2.TRAIN.LR = 1e-3
cfg2.TRAIN.BETA1 = 0.9
cfg2.TRAIN.BETA2 = 0.999
cfg2.TRAIN.LR_TYPE = 'cos'
cfg2.TRAIN.LR_REDUCE = [26,36]
cfg2.TRAIN.LR_FACTOR = 0.1
cfg2.TRAIN.WEIGHT_DECAY = 1e-4
cfg2.TRAIN.NUM_WORKERS = 16
cfg2.TRAIN.WARMUP = 0
cfg2.TRAIN.LR_WARM = 1e-7
#-------- data aug --------#
cfg2.TRAIN.USE_AUG = True
cfg2.TRAIN.CROP = 224
cfg2.TRAIN.PAD = 0
cfg2.TRAIN.RESIZE = 300
cfg2.TRAIN.ROATION = 30
cfg2.MODEL = edict()
cfg2.MODEL.NAME = 'resnext'
cfg2.MODEL.IN_DIM = 3
cfg2.MODEL.CLASS_NUM = 200
cfg2.MODEL.USE_FC = True
cfg2.MODEL.PRETRAIN = None
cfg2.MODEL.PRETRAIN_PATH = './exps/pretrain/'
cfg2.MODEL.DROPOUT = 0
cfg2.MODEL.LOSS = 'bce_only_g'
#-------- for resnet --------#
cfg2.MODEL.BLOCK = 'bottleneck'
cfg2.MODEL.BLOCK_LIST = [3,4,6,3]
cfg2.MODEL.CONV1 = (7,2,3)
cfg2.MODEL.OPERATION = 'B'
cfg2.MODEL.STRIDE1 = 1
cfg2.MODEL.MAX_POOL = True
cfg2.MODEL.BASE = 64
#-------- for regnet --------#
cfg2.MODEL.REGNET = edict()
cfg2.MODEL.REGNET.STEM_TYPE = "simple_stem_in"
cfg2.MODEL.REGNET.STEM_W = 32
cfg2.MODEL.REGNET.BLOCK_TYPE = "res_bottleneck_block"
cfg2.MODEL.REGNET.STRIDE = 2
cfg2.MODEL.REGNET.SE_ON = True
cfg2.MODEL.REGNET.SE_R = 0.25
cfg2.MODEL.REGNET.BOT_MUL = 1.0
cfg2.MODEL.REGNET.DEPTH = 20
cfg2.MODEL.REGNET.W0 = 232
cfg2.MODEL.REGNET.WA = 115.89
cfg2.MODEL.REGNET.WM = 2.53
cfg2.MODEL.REGNET.GROUP_W = 232
#-------- for anynet -------#
cfg2.MODEL.ANYNET = edict()
cfg2.MODEL.ANYNET.STEM_TYPE = "res_stem_in"
cfg2.MODEL.ANYNET.STEM_W = 64
cfg2.MODEL.ANYNET.BLOCK_TYPE = "res_bottleneck_block"
cfg2.MODEL.ANYNET.STRIDES = [1,2,2,2]
cfg2.MODEL.ANYNET.SE_ON = False
cfg2.MODEL.ANYNET.SE_R = 0.25
cfg2.MODEL.ANYNET.BOT_MULS = [0.5,0.5,0.5,0.5]
cfg2.MODEL.ANYNET.DEPTHS = [3,4,6,3]
cfg2.MODEL.ANYNET.GROUP_WS = [4,8,16,32]
cfg2.MODEL.ANYNET.WIDTHS = [256,512,1024,2048]
#-------- for effnet --------#
cfg2.MODEL.EFFNET = edict()
cfg2.MODEL.EFFNET.STEM_W = 32
cfg2.MODEL.EFFNET.EXP_RATIOS = [1,6,6,6,6,6,6]
cfg2.MODEL.EFFNET.KERNELS = [3,3,5,3,5,5,3]
cfg2.MODEL.EFFNET.HEAD_W = 1408
cfg2.MODEL.EFFNET.DC_RATIO = 0.0
cfg2.MODEL.EFFNET.STRIDES = [1,2,2,2,1,2,1]
cfg2.MODEL.EFFNET.SE_R = 0.25
cfg2.MODEL.EFFNET.DEPTHS = [2, 3, 3, 4, 4, 5, 2]
cfg2.MODEL.EFFNET.GROUP_WS = [4,8,16,32]
cfg2.MODEL.EFFNET.WIDTHS = [16,24,48,88,120,208,352]
cfg2.GPUS = [0]
cfg2.PRINT_FRE = 300
cfg2.DATASET_TRPE = 'cub200_2011'
cfg2.SHORT_TEST = False
if __name__ == "__main__":
from utils import load_cfg2
logger = load_cfg2(cfg2)
print(cfg2)
|
invocations = list()
with open('message.txt') as f:
lines = f.readlines()
for line in lines :
if line not in invocations :
invocations.append(line)
outF = open("sortie.txt", "w")
for line in invocations :
outF.write(line)
print("Ligne : {}".format(line))
|
# -*- coding: utf-8 -*-
import inject
import logging
import psycopg2
import asyncio
from asyncio import coroutine
from autobahn.asyncio.wamp import ApplicationSession
from model.config import Config
from model.systems.task.task import Task
from model.profiles import Profiles
class WampTask(ApplicationSession):
def __init__(self, config=None):
logging.debug('instanciando')
ApplicationSession.__init__(self, config)
self.serverConfig = inject.instance(Config)
self.task = inject.instance(Task)
self.profiles = inject.instance(Profiles)
@coroutine
def onJoin(self, details):
logging.debug('registering methods')
yield from self.register(self.getTasks_async,'task.getTasks')
yield from self.register(self.createTask_async,'task.createTask')
yield from self.register(self.updateStatus_async,'task.updateStatus')
yield from self.register(self.removeTask_async,'task.removeTask')
yield from self.register(self.removeTaskByStatus_async,'task.removeTaskByStatus')
def _getDatabase(self):
host = self.serverConfig.configs['database_host']
dbname = self.serverConfig.configs['database_database']
user = self.serverConfig.configs['database_user']
passw = self.serverConfig.configs['database_password']
return psycopg2.connect(host=host, dbname=dbname, user=user, password=passw)
def getTasks(self, sessionId):
con = self._getDatabase()
try:
userId = self.profiles.getLocalUserId(sessionId)
return self.task.getTasks(con,userId)
except Exception as e:
logging.exception(e)
return None
finally:
con.close()
@coroutine
def getTasks_async(self, sessionId):
loop = asyncio.get_event_loop()
r = yield from loop.run_in_executor(None, self.getTasks, sessionId)
return r
def createTask(self, sessionId, text):
con = self._getDatabase()
try:
userId = self.profiles.getLocalUserId(sessionId)
id = self.task.createTask(con,userId,text)
con.commit()
task = self.task.find(con,id)
self.publish('task.newTaskEvent', task)
return id
except Exception as e:
logging.exception(e)
return None
finally:
con.close()
@coroutine
def createTask_async(self, sessionId, text):
loop = asyncio.get_event_loop()
r = yield from loop.run_in_executor(None, self.createTask, sessionId, text)
return r
def updateStatus(self, sessionId, taskId, status):
con = self._getDatabase()
try:
id = self.task.updateStatus(con,taskId,status)
con.commit()
task = self.task.find(con,id)
self.publish('task.changeTaskEvent', task)
return id
except Exception as e:
logging.exception(e)
return None
finally:
con.close()
@coroutine
def updateStatus_async(self, sessionId, taskId, status):
loop = asyncio.get_event_loop()
r = yield from loop.run_in_executor(None, self.updateStatus, sessionId, taskId, status)
return r
def removeTask(self, sessionId, taskId):
con = self._getDatabase()
try:
userId = self.profiles.getLocalUserId(sessionId)
id = self.task.removeTask(con,taskId)
con.commit()
self.publish('task.removeTaskEvent', id)
return id
except Exception as e:
logging.exception(e)
return None
finally:
con.close()
@coroutine
def removeTask_async(self, sessionId, taskId):
loop = asyncio.get_event_loop()
r = yield from loop.run_in_executor(None, self.removeTask, sessionId, taskId)
return r
def removeTaskByStatus(self, sessionId, status):
con = self._getDatabase()
try:
userId = self.profiles.getLocalUserId(sessionId)
ids = self.task.removeTaskByStatus(con,userId,status)
con.commit()
for id in ids:
self.publish('task.removeTaskEvent', id)
return ids
except Exception as e:
logging.exception(e)
return None
finally:
con.close()
@coroutine
def removeTaskByStatus_async(self, sessionId, status):
loop = asyncio.get_event_loop()
r = yield from loop.run_in_executor(None, self.removeTaskByStatus, sessionId, status)
return r
|
import unittest
import os
import numpy as np
from gmc.core.models import cnn
from gmc.dataset import musicset, reduce
from gmc.conf import settings
@unittest.skipIf(os.environ.get("DUMMY") == "TRUE",
"not necessary when real dataset not supplied")
class TestNN(unittest.TestCase):
def test_training(self):
dataset = musicset.MusicSet(dirname='features_m_10_fl100')
dataset.load_files()
dataset.load_train_data()
dataset.load_test_data()
if settings.PCA:
components = reduce.pca_comp(dataset.train.music)
k = components.shape[1]
dataset.train.music = np.matmul(components.T, dataset.train.music.T)
dataset.train.music = dataset.train.music.T
dataset.test.music = np.matmul(components.T, dataset.test.music.T)
dataset.test.music = dataset.test.music.T
cnn_t = cnn.CNN(dataset)
cnn_t.train(display_step=100)
|
from typing import Tuple
from sympy import Expr, Symbol, I, pi, cos, arg, sqrt, cancel, simplify
__all__ = [
"expend_cos",
"amp_and_shift",
]
def expend_cos(expr: Expr, x: Symbol) -> Tuple[Expr, Expr]:
while True:
term = expr.subs(x, pi / 2)
yield term
expr = cancel((expr - term) / cos(x))
if expr == 0:
return
def amp_and_shift(expr: Expr, x: Symbol) -> Tuple[Expr, Expr]:
amp = simplify(cancel(sqrt(expr ** 2 + expr.diff(x) ** 2).subs(x, 0)))
shift = arg(expr.subs(x, 0) + I * expr.diff(x).subs(x, 0))
return amp, shift
|
"""
This File generates the header files defining the trained and quantized network.
The following files are required
- [project_root]/data/config.json containing the QuantLab configuration how the network was trained
- [project_root]/data/net.npz, containing the entire network
"""
__author__ = "Tibor Schneider"
__email__ = "sctibor@student.ethz.ch"
__version__ = "0.0.2"
__date__ = "2020/01/28"
__license__ = "Apache 2.0"
__copyright__ = """
Copyright (C) 2020 ETH Zurich. All rights reserved.
Author: Tibor Schneider, ETH Zurich
SPDX-License-Identifier: Apache-2.0
Licensed under the Apache License, Version 2.0 (the License); you may
not use this file except in compliance with the License.
You may obtain a copy of the License at
www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an AS IS BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import json
import numpy as np
from header_file import HeaderFile, HeaderConstant, HeaderScalar, HeaderArray, HeaderComment
from header_file import align_array, align_array_size
import convert_torch_format as convert
DEFAULT_HEADER_NAME = "../src/cl/net/net.h"
DEFAULT_CONFIG_JSON = "config.json"
DEFAULT_NET_NPZ = "net.npz"
WEIGHT_L1_PAD = 4 * 0
def gen_net_header(net_file, config_file, output_file):
# load network
net = np.load(net_file)
# load configuration file
with open(config_file, "r") as _f:
config = json.load(_f)
# we only need the network parameters
net_params = config["indiv"]["net"]["params"]
# only allow nets with 255 levels
assert net_params["weightInqNumLevels"] == 255
assert net_params["actSTENumLevels"] == 255
assert net_params["F2"] % 4 == 0
assert net_params["N"] == 4
# prepare params
if net_params["F2"] is None:
net_params["F2"] = net_params["F1"] * net_params["D"]
# only allow F2 = F1 * D
assert net_params["F2"] == net_params["F1"] * net_params["D"]
# start the header file
header = HeaderFile(output_file, "__NET_NET_H__", with_c=True)
# add network dimensions
header.add(HeaderComment("Network Dimensions", blank_line=False))
header.add(HeaderConstant("NET_F1", net_params["F1"], blank_line=False))
header.add(HeaderConstant("NET_F2", net_params["F2"], blank_line=False))
header.add(HeaderConstant("NET_D", net_params["D"], blank_line=False))
header.add(HeaderConstant("NET_C", net_params["C"], blank_line=False))
header.add(HeaderConstant("NET_C_ALIGN", align_array_size(net_params["C"]), blank_line=False))
header.add(HeaderConstant("NET_T", net_params["T"], blank_line=False))
header.add(HeaderConstant("NET_T_ALIGN", align_array_size(net_params["T"]), blank_line=False))
header.add(HeaderConstant("NET_T8", net_params["T"] // 8, blank_line=False))
header.add(HeaderConstant("NET_T8_ALIGN", align_array_size(net_params["T"] // 8), blank_line=False))
header.add(HeaderConstant("NET_T64", (net_params["T"] // 8) // 8, blank_line=False))
header.add(HeaderConstant("NET_T64_ALIGN", align_array_size((net_params["T"] // 8) // 8), blank_line=False))
header.add(HeaderConstant("NET_N", net_params["N"], blank_line=True))
# Layer 1
input_scale = convert.ste_quant(net, "quant1")
weight, weight_scale = convert.inq_conv2d(net, "conv1")
weight = weight.reshape(net_params["F1"], 64)
weight_reverse, _ = convert.inq_conv2d(net, "conv1", store_reversed=True)
weight_reverse = weight_reverse.reshape(net_params["F1"], 64)
bn_scale, bn_offset = convert.batch_norm(net, "batch_norm1")
output_scale = convert.ste_quant(net, "quant2")
factor, offset = convert.div_factor_batch_norm(input_scale, weight_scale, output_scale, bn_scale, bn_offset)
# add padding to the weight vector of 4
if WEIGHT_L1_PAD > 0:
weight_reverse_pad = np.zeros((net_params["F1"], 64 + WEIGHT_L1_PAD))
weight_reverse_pad[:, :-WEIGHT_L1_PAD] = weight_reverse
else:
weight_reverse_pad = weight_reverse
header.add(HeaderComment("Layer 1\n"
"=======\n"
"Convolution + BN\n\n"
"Input: [C, T]\n"
"Weight: [F1, 64]\n"
"Output: [F1, C, T]",
mode="/*"))
header.add(HeaderConstant("NET_L1_PAD_START", 31))
header.add(HeaderConstant("NET_L1_PAD_END", 32))
header.add(HeaderConstant("NET_L1_PAD_INPUT_LEN", net_params["T"] + 31 + 32))
header.add(HeaderConstant("NET_L1_PAD_INPUT_LEN_ALIGN", align_array_size(net_params["T"] + 31 + 32)))
header.add(HeaderArray("net_l1_factor", "int32_t", factor.ravel()))
header.add(HeaderArray("net_l1_offset", "int32_t", offset.ravel()))
header.add(HeaderConstant("NET_L1_WEIGHT_LEN", weight.shape[-1]))
header.add(HeaderConstant("NET_L1_WEIGHT_LEN_ALIGN", weight_reverse_pad.shape[-1]))
header.add(HeaderArray("net_l1_weight", "int8_t", weight.ravel()))
header.add(HeaderArray("net_l1_weight_reverse", "int8_t", weight_reverse.ravel()))
header.add(HeaderArray("net_l1_weight_reverse_pad", "int8_t", weight_reverse_pad.ravel()))
# layer2
input_scale = convert.ste_quant(net, "quant2")
weight, weight_scale = convert.inq_conv2d(net, "conv2", store_reversed=True)
bn_scale, bn_offset = convert.batch_norm(net, "batch_norm2")
output_scale = convert.ste_quant(net, "quant3")
factor, offset = convert.div_factor_batch_norm(input_scale, weight_scale, output_scale, bn_scale, bn_offset, pool=8)
weight = weight.reshape(net_params["F2"], net_params["C"])
weight = align_array(weight)
header.add(HeaderComment("Layer 2\n"
"=======\n"
"Convolution + BN + ReLU + Pooling\n\n"
"Input: [F1, C, T]\n"
"Weight: [F2, C] (aligned to [F2, 24]\n"
"Output: [F2, T // 8]",
mode="/*"))
header.add(HeaderArray("net_l2_factor", "int32_t", factor.ravel()))
header.add(HeaderArray("net_l2_offset", "int32_t", offset.ravel()))
header.add(HeaderConstant("NET_L2_WEIGHT_LEN", weight.shape[-1]))
header.add(HeaderArray("net_l2_weight", "int8_t", weight.ravel()))
header.add(HeaderArray("net_l2_weight_32", "int32_t", weight.ravel()))
# layer3
input_scale = convert.ste_quant(net, "quant3")
weight, weight_scale = convert.inq_conv2d(net, "sep_conv1")
output_scale = convert.ste_quant(net, "quant4")
factor = convert.div_factor(input_scale, weight_scale, output_scale)
weight = weight.reshape(net_params["F2"], 16)
header.add(HeaderComment("Layer 3\n"
"=======\n"
"Convolution\n\n"
"Input: [F2, T // 8]\n"
"Weight: [F2, 16]\n"
"Output: [F2, T // 8]",
mode="/*", blank_line=False))
header.add(HeaderConstant("NET_L3_PAD_START", 7))
header.add(HeaderConstant("NET_L3_PAD_END", 8))
header.add(HeaderConstant("NET_L3_PAD_INPUT_LEN", net_params["T"] // 8 + 7 + 8))
header.add(HeaderConstant("NET_L3_PAD_INPUT_LEN_ALIGN", align_array_size(net_params["T"] // 8 + 7 + 8)))
header.add(HeaderConstant("NET_L3_FACTOR", factor))
header.add(HeaderConstant("NET_L3_WEIGHT_LEN", weight.shape[-1]))
header.add(HeaderArray("net_l3_weight", "int8_t", weight.ravel()))
# layer4
input_scale = convert.ste_quant(net, "quant4")
weight, weight_scale = convert.inq_conv2d(net, "sep_conv2")
output_scale = convert.ste_quant(net, "quant5")
bn_scale, bn_offset = convert.batch_norm(net, "batch_norm3")
factor, offset = convert.div_factor_batch_norm(input_scale, weight_scale, output_scale, bn_scale, bn_offset, pool=8)
weight = weight.reshape(net_params["F2"], net_params["F2"])
header.add(HeaderComment("Layer 4\n"
"=======\n"
"Convolution + BN + ReLU + Pooling\n\n"
"Input: [F2, T // 8]\n"
"Weight: [F2, F2]\n"
"Output: [F2, T // 64]",
mode="/*"))
header.add(HeaderArray("net_l4_factor", "int32_t", factor.ravel()))
header.add(HeaderArray("net_l4_offset", "int32_t", offset.ravel()))
header.add(HeaderConstant("NET_L4_WEIGHT_LEN", weight.shape[-1]))
header.add(HeaderArray("net_l4_weight", "int8_t", weight.ravel()))
# layer5
input_scale = convert.ste_quant(net, "quant5")
output_scale = convert.ste_quant(net, "quant6")
weight, bias, weight_scale = convert.inq_linear(net, "fc")
weight = weight.reshape(net_params["N"], net_params["F2"] * (net_params["T"] // 64))
#weight = align_array(weight)
# we want to align, not for the product F2*T//64, but for T//64 itself.
t64 = net_params["T"] // 64
t64_align = align_array_size(t64)
weight_align = np.zeros((net_params["N"], net_params["F2"] * t64_align), dtype=int)
for i in range(net_params["F2"]):
weight_align[:, i * t64_align: i * t64_align + t64] = weight[:, i * t64: (i + 1) * t64]
factor = convert.div_factor(input_scale, weight_scale, output_scale)
header.add(HeaderComment("Layer 5\n"
"=======\n"
"Linear Layer (without scaling in the end)\n\n"
"Input: [F2, T // 64]\n"
"Weight: [N, F2 * (T // 64)]\n"
"Bias: [N]\n"
"Output: [N]",
mode="/*"))
header.add(HeaderConstant("NET_L5_FACTOR", factor))
header.add(HeaderArray("net_l5_bias", "int8_t", bias.ravel()))
header.add(HeaderConstant("NET_L5_WEIGHT_LEN", weight_align.shape[-1]))
header.add(HeaderArray("net_l5_weight", "int8_t", weight_align.ravel()))
# store the header file
header.write()
if __name__ == "__main__":
parser = argparse.ArgumentParser("Generates the header file defining the trained EEGNet")
parser.add_argument("-o", "--output", help="Export header file name", default=DEFAULT_HEADER_NAME)
parser.add_argument("-n", "--net", help="numpy file containing the network", default=DEFAULT_NET_NPZ)
parser.add_argument("-c", "--config", help="configuration file name", default=DEFAULT_CONFIG_JSON)
args = parser.parse_args()
gen_net_header(args.net, args.config, args.output)
|
#!/bin/evn python
# encoding:utf-8
'''
#=============================================================================
# FileName: cmdLine.py
# Desc: 解析命令行参数,供zoomeye使用
# Author: Crow
# Email: lrt_no1@163.com
# HomePage: @_@"
# Version: 2.0.1
# LastChange: 2017-01-01 17:25:04
# History:
#=============================================================================
'''
import sys
#from optparse import OptionError
#from optparse import OptionGroup
from optparse import OptionParser
#from optparse import SUPPRESS_HELP
from lib.core.data import asys
def cmdLineParser(argv=None):
if not argv:
argv = sys.argv
try:
parser = OptionParser()
"""
parser.add_option("--hh",dest="advancedHelp",
action="store_true",
help="Show advanced help message and exit")
parser.add_option("--version",dest="showVersion",
action="store_true",
help="Show program's version number and exit")
parser.add_option("-v",dest="verbose",type="int",
help="")
"""
parser.add_option("-d", action="store_true",dest="qiantai",
help="")
parser.add_option("-s", action="store_true",dest="houtai",
help="")
parser.add_option("--app",dest="app",help="")
(options, args) = parser.parse_args()
if options.app:
asys.RUN_APPS_NAME = options.app.split(',')
if options.qiantai:
asys.RUN_MODULE = 1
elif options.houtai:
asys.RUN_MODULE = 0
except Exception,e:
print str(e)
pass
return parser
|
import random
import json
# station = ["beijing","shanghai","nanjing","hangzhou","wuxi","ningbo","qingdao","wenzhou","shenzhen","tianjing"]
# stationID = ['10010','10011','10012','10013','10014','10015','10016','10017','10018','10019',"10020"]
# weekdays = ["Mon","Tues","Wed","Thurs","Fri","Sat",'Sun']
# def init():
# # 每次使用的d 必须是临时变量 不能定义成全局变量
# for i in range(10):
# d = {}
# src = int(random.random() * 10)
# d['FightID'] = stationID[i]
# d['StaStat'] = station[src]
# des = int(random.random() * 10)
# des = (src + des) // 2
# if des == src and des <= 9:
# des = src + 1
# else:
# des = src // 2 -1
# d['EndStat'] = station[des]
# m = int(random.random() * 10) // 2
# d['DataSch'] = weekdays[m]
# planeSet.append(d)
def uploadFile(fileName,planeSet):
fp = open(fileName,'wb')
data = json.dumps(planeSet).encode('utf-8')
fp.write(data)
fp.close()
pass
def loadFile(fileName):
fp = open(fileName,'rb')
data = fp.readline()
planeSet = json.loads(data)
return planeSet
pass
def displayTicket(planeSet):
print("\033[41;1mSTART END TIME \033[0m")
for res in planeSet:
print("\033[2;31;42;1m{:<9} {:<8} {:<5}\033[0m".format(res['StaStat'],res['EndStat'],res['DataSch']))
pass
def addFligtINFO(planeSet):
d = {}
res = input("-->")
pass
if __name__ == "__main__":
# init()
# print(planeSet)
# uploadFile('planeSchedule',planeSet)
planeSet = []
planeSet = loadFile('planeSchedule')
# print(planeSet)
displayTicket(planeSet)
res = input("==>")
print(res,type(res))
|
import os
import re
import datetime
def whoisFunc(name):
# выполняем команду whois с переданным доменом
try:
output = os.popen("whois "+name)
except OSError:
return False
# разбиваем информацию, которую выдал whois на массив через "\n" отступ
output = str(output.read()).split("\n")
# словарь результата
arResult = {}
# массив в который будет записываться список name server'ов
arServers = []
# флаг того, что у нас есть дата создания, изначально равен False
flagSuccess = False
for elem in output:
# т.к. формат вывода информации у whois для разных доменов может быть разный, попадаются домены,
# в которых перед выводом информации о домене, находится не нужная информация помеченная %
# нужно исключиим эту информацию поиском %
#исключим строки где есть "%" и которые совсем пустые
if elem.find("%", 0) != -1 or elem.strip() == "":
continue
# выходим из массива, как доходим до строчки, где написано про последнее обновление
if elem.lower().find("last update") != -1:
break
# разделяем элемент массива на два значения (ключ:значение)
elem = elem.strip().split(": ")
# если значение в массиве только одно, пропускаем строку
if len(elem) <= 1:
continue
# приводим ключ к нижнему регистру
elem[0] = elem[0].strip().lower()
# ищем в ключе подстроку "creat", т.е дату создания.
# Именно "creat", т.к. формат вывода ключей у whois для разных доменов может быть разный
if elem[0].find("creat", 0) != -1:
# В случае успеха, обрабатываем дату и записываем в словарь arResult
arResult[elem[0]] = datetime.datetime.strptime(elem[1].strip(), "%Y-%m-%dT%H:%M:%SZ")
# Устанавливаем флаг присутствия даты = True
flagSuccess = True
# ищем в ключе подстроку "server", т.е имя сервера. Исключаем подстроку whois, т.к.
# в списке, который выдает whois есть ключ Registrar WHOIS Server
if elem[0].find("server", 0) != -1 and elem[0].find("whois", 0) == -1:
# В случае успеха, записываем имя в массив серверов
arServers.append(elem[1].strip())
# ищем в ключе подстроку "org", т.е список name server'ов.
if elem[0].find("org", 0) != -1:
# В случае успеха, записываем имя организации
arResult[elem[0].strip()] = elem[1].strip()
# записываем в словарь результатов массив name server'ов если там есть хотябы один сервер
if len(arServers) > 0:
arResult["server lists"] = arServers
# если дата есть, вызвращаем словарь arResult
if flagSuccess:
return arResult
else:
return False
print(whoisFunc("drweb.com"))
print(" ")
print(whoisFunc("drweb.ru"))
print(" ")
print(whoisFunc("drweb.net"))
print(" ")
print(whoisFunc("drweb.de"))
|
#! /usr/bin/python
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
plt.rc('axes', titlesize=16) # fontsize of the axes title
plt.rc('axes', labelsize=16) # fontsize of the x and y labels
plt.rc('xtick', labelsize=12) # fontsize of the tick labels
plt.rc('ytick', labelsize=12) # fontsize of the tick labels
plt.rc('legend', fontsize=12) # legend fontsize
fig, (ax1, ax2) = plt.subplots(2, figsize=(6, 5))
fig.subplots_adjust(hspace=.5)
#=========================
# RPA cache
#========================
x1=(1, 2, 3, 4, 5, 6)
rpao_x_lable=('4B', '8B', '16B', '32B', '64B', '128B')
youtube_rpao_hit_rate = (0.370, 0.579, 0.725, 0.824, 0.891, 0.934)
lj_rpao_hit_rate = (0.307, 0.496, 0.656, 0.778, 0.861, 0.916)
pokec_rpao_hit_rate = (0.242, 0.412, 0.590, 0.738, 0.840, 0.906)
rmat19_32_rpao_hit_rate = (0.211, 0.372, 0.557, 0.716, 0.826, 0.899)
rmat21_32_rpao_hit_rate = (0.180, 0.322, 0.504, 0.678, 0.808, 0.892)
ax1.set_title('RPA prefetch buffer')
ax1.plot(x1, youtube_rpao_hit_rate, '-<')
ax1.plot(x1, lj_rpao_hit_rate, '-s')
ax1.plot(x1, pokec_rpao_hit_rate, '->')
ax1.plot(x1, rmat19_32_rpao_hit_rate, '-^')
ax1.plot(x1, rmat21_32_rpao_hit_rate, '-p')
ax1.set_ylabel('Hit rate')
ax1.set_xticks(x1)
ax1.set_xticklabels(rpao_x_lable)
vals=ax1.get_yticks()
ax1.set_yticklabels(['{:3.1f}%'.format(100*x) for x in vals])
ax1.grid(linewidth=0.5)
ax1.xaxis.grid(False)
#==============================
# CIA Cache
#===============================
x2=(1, 2, 3, 4, 5, 6)
ciao_x_lable=('4B', '8B', '16B', '32B', '64B', '128B')
youtube_ciao_hit_rate = (0.0600, 0.475, 0.718, 0.844, 0.912, 0.950)
lj_ciao_hit_rate = (0.0028, 0.489, 0.734, 0.858, 0.922, 0.956)
pokec_ciao_hit_rate = (0.0002, 0.489, 0.734, 0.857, 0.920, 0.954)
rmat19_32_ciao_hit_rate = (0.0002, 0.497, 0.746, 0.871, 0.934, 0.966)
rmat21_32_ciao_hit_rate = (0.0001, 0.496, 0.745, 0.869, 0.932, 0.964)
ax2.set_title('CIA prefetch buffer')
ax2.plot(x2, youtube_ciao_hit_rate, '-<')
ax2.plot(x2, lj_ciao_hit_rate, '-s')
ax2.plot(x2, pokec_ciao_hit_rate, '->')
ax2.plot(x2, rmat19_32_ciao_hit_rate, '-^')
ax2.plot(x2, rmat21_32_ciao_hit_rate, '-p')
ax2.set_ylabel('Hit rate')
ax2.set_xticks(x2)
ax2.set_xticklabels(ciao_x_lable)
vals=ax2.get_yticks()
ax2.set_yticklabels(['{:3.1f}%'.format(100*x) for x in vals])
ax2.grid(linewidth=0.5)
ax2.xaxis.grid(False)
#=========================
# Depth read cache
#========================
#fig.subplots_adjust(hspace=.6)
#x3=(1, 2, 3, 4, 5, 6, 7)
#depth_x_lable=('1Kx64B', '2Kx64B', '4Kx64B', '8Kx64B', '16Kx64B', '32Kx64B', '64Kx64B')
#youtube_depth_read_rate = (0.57, 0.65, 0.75, 0.86, 0.95, 0.97, 0.97)
#lj_depth_read_rate = (0.35, 0.40, 0.46, 0.56, 0.70, 0.86, 0.99)
#pokec_depth_read_rate = (0.16, 0.23, 0.35, 0.57, 0.89, 0.99, 0.99)
#rmat19_32_depth_read_rate = (0.12, 0.25, 0.50, 0.99, 0.99, 0.99, 0.99)
#rmat21_32_depth_read_rate = (0.03, 0.06, 0.13, 0.26, 0.51, 0.99, 0.99)
#ax3.set_title('Depth read cache')
#
#ax3.plot(x3, youtube_depth_read_rate, '-<')
#ax3.plot(x3, lj_depth_read_rate, '-s')
#ax3.plot(x3, pokec_depth_read_rate, '->')
#ax3.plot(x3, rmat19_32_depth_read_rate, '-^')
#ax3.plot(x3, rmat21_32_depth_read_rate, '-p')
#ax3.set_ylabel('Hit rate')
#ax3.set_xticks(x3)
#ax3.set_xticklabels(depth_x_lable)
#for tick in ax3.get_xticklabels():
# tick.set_rotation(15)
#
#
#vals=ax3.get_yticks()
#ax3.set_yticklabels(['{:3.1f}%'.format(100*x) for x in vals])
#ax3.grid(linewidth=0.5)
#ax3.xaxis.grid(False)
#
##=========================
## Depth write cache
##========================
#x4=(1, 2, 3, 4, 5, 6, 7)
#depth_x_lable=('1Kx64B', '2Kx64B', '4Kx64B', '8Kx64B', '16Kx64B', '32Kx64B', '64Kx64B')
#youtube_depth_write_rate = (0.73, 0.77, 0.84, 0.91, 0.98, 0.99, 0.99)
#lj_depth_write_rate = (0.40, 0.44, 0.49, 0.57, 0.70, 0.87, 0.99)
#pokec_depth_write_rate = (0.20, 0.26, 0.37, 0.58, 0.88, 0.99, 0.99)
#rmat19_32_depth_write_rate = (0.12, 0.25, 0.50, 0.99, 0.99, 0.99, 0.99)
#rmat21_32_depth_write_rate = (0.03, 0.06, 0.13, 0.26, 0.51, 0.99, 0.99)
#ax4.set_title('Depth write cache')
#
#ax4.plot(x4, youtube_depth_write_rate, '-<')
#ax4.plot(x4, lj_depth_write_rate, '-s')
#ax4.plot(x4, pokec_depth_write_rate, '->')
#ax4.plot(x4, rmat19_32_depth_write_rate, '-^')
#ax4.plot(x4, rmat21_32_depth_write_rate, '-p')
#ax4.set_ylabel('Hit rate')
#ax4.set_xticks(x4)
#ax4.set_xticklabels(depth_x_lable)
#vals=ax4.get_yticks()
#ax4.set_yticklabels(['{:3.1f}%'.format(100*x) for x in vals])
#ax4.grid(linewidth=0.5)
#ax4.xaxis.grid(False)
#ax4.set_xlabel('prefetch buffer/cache Size')
# Adding the legend and showing the plot
#ax1.legend(['youtube', 'lj', 'pokec', 'ramt-19-32', 'rmat-21-32'],
# loc='lower right',
# ncol=3)
ax1.legend(['Youtube', 'LJ', 'Pokec', 'R-MATI', 'R-MATII'],
loc='lower right',
ncol=3)
#ax3.legend(['youtube', 'lj', 'pokec', 'ramt-19-32', 'rmat-21-32'],
# loc='lower right',
# ncol=3)
#=======================================================================
#ret.get_frame().set_alpha(0.4)
plt.savefig("../prefetch-hit.pdf", bbox_inches='tight')
#plt.show()
|
from tastypie.resources import ModelResource, ALL, ALL_WITH_RELATIONS
from tastypie.authentication import SessionAuthentication
from tastypie import fields
from .models import Review, Book
from django.contrib.auth.models import User
class UserResource(ModelResource):
class Meta:
queryset = User.objects.all()
fields = ['username'] # Only username field
authentication = SessionAuthentication() # Access only for authenticated user
class BookResource(ModelResource):
class Meta:
queryset = Book.objects.all()
allowed_methods = ['get']
authentication = SessionAuthentication() # Access only for authenticated user
class ReviewResource(ModelResource):
book = fields.ToOneField(BookResource, 'book')
user = fields.ToOneField(UserResource, 'user', full=True)
class Meta:
queryset = Review.objects.all()
allowed_methods = ['get']
authentication = SessionAuthentication() # Access only for authenticated user
filtering = {'book': ALL_WITH_RELATIONS}
|
../../Sum-Exp-Data.py
|
s,b2=map(str,input().split())
d3=s+b2
print(d3)
|
# intraday_ml_strategy.py
import numpy as np
import pandas as pd
from sklearn.externals import joblib
from qstrader.price_parser import PriceParser
from qstrader.event import (SignalEvent, EventType)
from qstrader.strategy.base import AbstractStrategy
class IntradayMachineLearningPredictionStrategy(AbstractStrategy):
"""
Requires:
tickers - The list of ticker symbols
events_queue - A handle to the system events queue
"""
def __init__(
self, tickers, events_queue,
model_pickle_file, lags=5
):
self.tickers = tickers
self.events_queue = events_queue
self.model_pickle_file = model_pickle_file
self.lags = lags
self.invested = False
self.cur_prices = np.zeros(self.lags+1)
self.cur_returns = np.zeros(self.lags)
self.minutes = 0
self.qty = 10000
self.model = joblib.load(model_pickle_file)
def _update_current_returns(self, event):
"""
Updates the array of current returns "features"
used by the machine learning model for prediction.
"""
# Adjust the feature vector to move all lags by one
# and then recalculate the returns
for i, f in reversed(list(enumerate(self.cur_prices))):
if i > 0:
self.cur_prices[i] = self.cur_prices[i-1]
else:
self.cur_prices[i] = event.close_price/float(
PriceParser.PRICE_MULTIPLIER
)
if self.minutes > (self.lags + 1):
for i in range(0, self.lags):
self.cur_returns[i] = ((
self.cur_prices[i]/self.cur_prices[i+1]
)-1.0)*100.0
def calculate_signals(self, event):
"""
Calculate the intraday machine learning
prediction strategy.
"""
if event.type == EventType.BAR:
self._update_current_returns(event)
self.minutes += 1
# Allow enough time to pass to populate the
# returns feature vector
if self.minutes > (self.lags + 2):
pred = self.model.predict(self.cur_returns.reshape((1, -1)))[0]
# Long only strategy
if not self.invested and pred == 1:
print("LONG: %s" % event.time)
self.events_queue.put(
SignalEvent(self.tickers[0], "BOT", self.qty)
)
self.invested = True
if self.invested and pred == -1:
print("CLOSING LONG: %s" % event.time)
self.events_queue.put(
SignalEvent(self.tickers[0], "SLD", self.qty)
)
self.invested = False
|
from loguru import logger
from sc2.data import Race
from sc2.constants import *
from sc2.ids.unit_typeid import *
from sc2.ids.ability_id import *
from sc2.unit import Unit
from sc2.units import Units
class ArmyGroup:
"""
Army groups allow control of an individual group of units
Units in an army group are generally organized such that they all follow the same behavior
i.e. all units in the marine army group focus banelings when they're nearby
while all units in the marauder army group focus lurkers first
Usage in bot class:
self.my_army = ArmyGroup(self) # make sure to pass self as an argument when initializing
self.my_army.add_to(self.forces) # add to army
for unit in my_army.get_units():
# for unit in self.units.tags_in(self.my_army.unit_tags): # how to get units object
# do things with army units
"""
def __init__(self, bot):
self.bot = bot
self.unit_tags = set()
self.state = "DEFENDING"
self.attack_position = self.bot.enemy_start_locations[0]
self.defense_idle_position = self.bot.start_location.sort_by_distance(self.bot.expansion_locations_list)[1].towards(self.bot.game_info.map_center, 16)
self.defense_range = 30 # Attack threats this distance from any friendly townhalls
self.threats = None # use get_threats() instead of getting this
#self.respond_to_nearby_threats = True
#self.target_fire_units = set() # set of UnitTypeIds
# Marine - {BANELING, INFESTOR, HIGHTEMPLAR, DARKTEMPLAR}
# Marauder - {BANELING, INFESTOR, ULTRALISK, HIGHTEMPLAR, DARKTEMPLAR, STALKER, SIEGETANKSIEGED, SIEGETANK}
def set_state(self, s):
assert s in ["DEFENDING", "ATTACKING"] # possible states
self.state = s
def trigger_attack(self, pos=None):
if pos: self.attack_position = pos
self.state = "ATTACKING"
#for u in self.get_units(): u.attack(self.attack_position) # sends all forces, even if doing something else
self.do_state()
def end_attack(self, pos=None):
if pos: self.defense_idle_position = pos
self.state = "DEFENDING"
for u in self.get_units(): u.attack(self.defense_idle_position)
self.do_state()
def do_state(self): # this should be called often
if self.state == "DEFENDING":
for u in self.get_units().idle:
if self.bot.iteration % 4 == 0: u.attack(self.defense_idle_position)
elif self.state == "ATTACKING":
for u in self.get_units().idle: # only sends idle forces
u.attack(self.attack_position) # TODO: if nearby threats attack those instead
else:
raise Exception # invalid state for ArmyGroup
def get_units(self, of_type=None):
"""
Returns a Units object.
of_type (optional) - a {set} of UnitTypeIds (i.e. MARINE) or single UnitTypeId
"""
if of_type:
assert isinstance(of_type, (set, UnitTypeId))
if self.bot.units:
return_units = self.bot.units.tags_in(self.unit_tags)
if of_type:
return return_units.of_type(of_type)
else:
return return_units
def add_to(self, to_add):
"""
Input: Units, Unit, tag or list
Add to this army group if not already added
"""
if isinstance(to_add, Units): # if units object, feed each unit back in
for unit in to_add:
self.add_to(unit)
elif isinstance(to_add, Unit): # if unit, convert to tag and feed back in
self.add_to(to_add.tag)
elif isinstance(to_add, int): # if tag, save to set
if to_add not in self.unit_tags:
self.unit_tags.add(to_add)
elif isinstance(to_add, list): # if list, iterate and feed back in
for e in to_add: # e could be a tag or a unit
self.add_to(e)
else: raise Exception # invalid argument type in ArmyGroup.add_to()
def remove_from(self, to_rm):
"""
Input: Units, Unit, tag or list
Remove from this army group if a member
"""
if isinstance(to_rm, Units): # if units object, feed each unit back in
for unit in to_rm:
self.remove_from(unit)
elif isinstance(to_rm, Unit): # if unit, convert to tag and feed back in
self.remove_from(to_rm.tag)
elif isinstance(to_rm, int): # if tag, save to set
if to_rm not in self.unit_tags:
self.unit_tags.discard(to_rm) # discard instead of remove because we don't want an exception if the tag isn't in the group
elif isinstance(to_rm, list): # if list, iterate and feed back in
for e in to_rm:
self.remove_from(e)
else: raise Exception # invalid argument type in ArmyGroup.remove_from()
def get_threats(self):
for townhall in self.bot.townhalls: # threats are visible units closer than self.defense_range to a nearby townhall
self.threats = self.bot.enemy_units.visible.closer_than(self.defense_range, townhall.position)
# overwrites it every time ughhhh
return self.threats.exclude_type({LARVA})
### From burny-bots-python-sc2 CreepyBot
###
def get_unit_info(bot, unit, field="food_required"):
# get various unit data, see list below
# usage: get_unit_info(ROACH, "mineral_cost")
assert isinstance(unit, (Unit, UnitTypeId))
if isinstance(unit, Unit):
# unit = unit.type_id
unit = unit._type_data._proto
else:
unit = bot._game_data.units[unit.value]._proto
# unit = bot._game_data.units[unit.value]
# print(vars(unit)) # uncomment to get the list below
if hasattr(unit, field):
return getattr(unit, field)
else:
return None
"""
name: "Drone"
available: true
cargo_size: 1
attributes: Light
attributes: Biological
movement_speed: 2.8125
armor: 0.0
weapons {
type: Ground
damage: 5.0
attacks: 1
range: 0.10009765625
speed: 1.5
}
mineral_cost: 50
vespene_cost: 0
food_required: 1.0
ability_id: 1342
race: Zerg
build_time: 272.0
sight_range: 8.0
"""
# update scouting info depending on visible units that we can see
# if we see buildings that are not
# depot, rax, bunker, spine crawler, spore, nydus, pylon, cannon, gateway
# then assume that is the enemy spawn location
def process_scouting(bot):
if not hasattr(bot, 'opponent_data'):
bot.opponent_data = {
"spawn_location": None, # for 4player maps
"expansions": [], # stores a list of Point2 objects of expansions
"expansions_tags": set(), # stores the expansions above as tags so we dont count them double
"race": None,
"army_tags_scouted": [], # list of dicts with entries: {"tag": 123, "scout_time": 15.6, "supply": 2}
"army_supply_scouted": 0,
"army_supply_nearby": 0,
"army_supply_visible": 0
}
# set enemy spawn location
ignore_these_buildings = [SUPPLYDEPOT, SUPPLYDEPOTLOWERED, BARRACKS, BUNKER, SPINECRAWLER, SPORECRAWLER, NYDUSNETWORK, NYDUSCANAL, PYLON, PHOTONCANNON, GATEWAY]
if bot.opponent_data["spawn_location"] is None and len(bot.enemy_start_locations) > 0:
if bot.enemy_structures.exists:
filtered_units = bot.enemy_structures.filter(lambda x:x.type_id not in ignore_these_buildings)
if filtered_units.exists:
bot.opponent_data["spawn_location"] = filtered_units.random.position.closest(bot.enemy_start_locations)
# figure out the race of the opponent
if bot.opponent_data["race"] is None and bot.enemy_units.exists:
unit_race = get_unit_info(bot, bot.enemy_units.random, "race")
racesDict = {
Race.Terran.value: "Terran",
Race.Zerg.value: "Zerg",
Race.Protoss.value: "Protoss",
}
bot.opponent_data["race"] = unit_race
# figure out how much army supply enemy has:
visible_enemy_units = bot.enemy_units.not_structure.filter(lambda x:x.type_id not in [DRONE, SCV, PROBE, LARVA, EGG])
for unit in visible_enemy_units:
isUnitInInfo = next((x for x in bot.opponent_data["army_tags_scouted"] if x["tag"] == unit.tag), None)
if isUnitInInfo is not None:
bot.opponent_data["army_tags_scouted"].remove(isUnitInInfo)
# if unit.tag not in bot.opponent_data["army_tags_scouted"]:
if bot.townhalls.ready.exists:
bot.opponent_data["army_tags_scouted"].append({
"tag": unit.tag,
"scout_time": bot.time,
"supply": get_unit_info(bot, unit) or 0,
"distance_to_base": bot.townhalls.ready.closest_to(unit).distance_to(unit),
})
# get opponent army supply (scouted / visible)
scout_timeout_duration = 300 # TODO: set the time on how long until the scouted army supply times out
bot.opponent_data["army_supply_scouted"] = sum(x["supply"] for x in bot.opponent_data["army_tags_scouted"] if x["scout_time"] > bot.time - scout_timeout_duration)
bot.opponent_data["army_supply_nearby"] = sum(x["supply"] for x in bot.opponent_data["army_tags_scouted"] if x["scout_time"] > bot.time - scout_timeout_duration and x["distance_to_base"] < 60)
bot.opponent_data["army_supply_visible"] = sum(get_unit_info(bot, x) or 0 for x in visible_enemy_units)
# get opponent expansions
if bot.iteration % 20 == 0:
enemy_townhalls = bot.enemy_structures.filter(lambda x:x.type_id in [HATCHERY, LAIR, HIVE, COMMANDCENTER, PLANETARYFORTRESS, ORBITALCOMMAND, NEXUS])
for th in enemy_townhalls:
if len(bot.opponent_data["expansions"]) > 0 and th.position.closest(bot.opponent_data["expansions"]).distance_to(th.position.to2) < 20:
continue
if th.tag not in bot.opponent_data["expansions_tags"]:
bot.opponent_data["expansions_tags"].add(th.tag)
bot.opponent_data["expansions"].append(th.position.to2)
logger.success(f"Found New Enemy Townhall at {th.position.to2}")
def is_valid_chrono_target(unit: Unit):
# do not chrono idle buildings or buildings that already have chrono
if unit.is_idle or unit.has_buff(BuffId.CHRONOBOOSTENERGYCOST): # what a terrible ID for this
return False
else: # just to make it clear
return True
|
import unittest
import doctest
def additional_tests():
import simplejson
import simplejson.encoder
import simplejson.decoder
suite = unittest.TestSuite()
for mod in (simplejson, simplejson.encoder, simplejson.decoder):
suite.addTest(doctest.DocTestSuite(mod))
suite.addTest(doctest.DocFileSuite('../../index.rst'))
return suite
def main():
suite = additional_tests()
runner = unittest.TextTestRunner()
runner.run(suite)
if __name__ == '__main__':
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
main()
|
"""Functionality for specifying and cycling through multiple calculations."""
from __future__ import print_function
from distutils.version import LooseVersion
from multiprocessing import cpu_count
import dask
import dask.bag as db
import distributed
import itertools
import logging
import pprint
import traceback
from .calc import Calc, _TIME_DEFINED_REDUCTIONS
from .region import Region
from .var import Var
_OBJ_LIB_STR = 'library'
_PROJECTS_STR = 'projects'
_MODELS_STR = 'models'
_RUNS_STR = 'runs'
_REGIONS_STR = 'regions'
_VARIABLES_STR = 'variables'
_TAG_ATTR_MODIFIERS = dict(all='', default='default_')
class AospyException(Exception):
"""Base exception class for the aospy package."""
pass
def _get_attr_by_tag(obj, tag, attr_name):
"""Get attribute from an object via a string tag.
Parameters
----------
obj : object from which to get the attribute
attr_name : str
Unmodified name of the attribute to be found. The actual attribute
that is returned may be modified be 'tag'.
tag : str
Tag specifying how to modify 'attr_name' by pre-pending it with 'tag'.
Must be a key of the _TAG_ATTR_MODIFIERS dict.
Returns
-------
the specified attribute of obj
"""
attr_name = _TAG_ATTR_MODIFIERS[tag] + attr_name
return getattr(obj, attr_name)
def _permuted_dicts_of_specs(specs):
"""Create {name: value} dict, one each for every permutation.
Each permutation becomes a dictionary, with the keys being the attr names
and the values being the corresponding value for that permutation. These
dicts can then be directly passed to the Calc constructor.
"""
permuter = itertools.product(*specs.values())
return [dict(zip(specs.keys(), perm)) for perm in permuter]
def _merge_dicts(*dict_args):
"""Merge the given dictionaries into single dict.
Given any number of dicts, shallow copy and merge into a new dict,
precedence goes to key value pairs in latter dicts.
From http://stackoverflow.com/a/26853961/1706640
"""
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
def _user_verify(prompt='Perform these computations? [y/n] '):
"""Prompt the user for verification."""
if not input(prompt).lower()[0] == 'y':
raise AospyException('Execution cancelled by user.')
def _get_all_objs_of_type(type_, parent):
"""Get all attributes of the given type from the given object.
Parameters
----------
type_ : The desired type
parent : The object from which to get the attributes with type matching
'type_'
Returns
-------
A list (possibly empty) of attributes from 'parent'
"""
return set([obj for obj in parent.__dict__.values()
if isinstance(obj, type_)])
class CalcSuite(object):
"""Suite of Calc objects generated from provided specifications."""
_CORE_SPEC_NAMES = {_OBJ_LIB_STR, _PROJECTS_STR, _MODELS_STR, _RUNS_STR}
_AUX_SPEC_NAMES = {_VARIABLES_STR,
_REGIONS_STR,
'date_ranges',
'input_time_intervals',
'input_time_datatypes',
'input_time_offsets',
'input_vertical_datatypes',
'output_time_intervals',
'output_time_regional_reductions',
'output_vertical_reductions'}
_NAMES_SUITE_TO_CALC = {
_PROJECTS_STR: 'proj',
_MODELS_STR: 'model',
_RUNS_STR: 'run',
_VARIABLES_STR: 'var',
_REGIONS_STR: 'region',
'date_ranges': 'date_range',
'input_time_intervals': 'intvl_in',
'input_time_datatypes': 'dtype_in_time',
'input_time_offsets': 'time_offset',
'input_vertical_datatypes': 'dtype_in_vert',
'output_time_intervals': 'intvl_out',
'output_time_regional_reductions': 'dtype_out_time',
'output_vertical_reductions': 'dtype_out_vert',
}
def __init__(self, calc_suite_specs):
self._specs_in = calc_suite_specs
self._obj_lib = self._specs_in[_OBJ_LIB_STR]
def _get_requested_spec(self, obj, spec_name):
"""Helper to translate user specifications to needed objects."""
requested = self._specs_in[spec_name]
if isinstance(requested, str):
return _get_attr_by_tag(obj, requested, spec_name)
else:
return requested
def _permute_core_specs(self):
"""Generate all requested combinations of the core objects."""
obj_trees = []
projects = self._get_requested_spec(self._obj_lib, _PROJECTS_STR)
for project in projects:
models = self._get_requested_spec(project, _MODELS_STR)
for model in models:
runs = self._get_requested_spec(model, _RUNS_STR)
for run in runs:
obj_trees.append({
self._NAMES_SUITE_TO_CALC[_PROJECTS_STR]: project,
self._NAMES_SUITE_TO_CALC[_MODELS_STR]: model,
self._NAMES_SUITE_TO_CALC[_RUNS_STR]: run,
})
return obj_trees
def _get_regions(self):
"""Get the requested regions."""
if self._specs_in[_REGIONS_STR] == 'all':
return [_get_all_objs_of_type(
Region, getattr(self._obj_lib, 'regions', self._obj_lib)
)]
else:
return [set(self._specs_in[_REGIONS_STR])]
def _get_variables(self):
"""Get the requested variables."""
if self._specs_in[_VARIABLES_STR] == 'all':
return _get_all_objs_of_type(
Var, getattr(self._obj_lib, 'variables', self._obj_lib)
)
else:
return set(self._specs_in[_VARIABLES_STR])
def _get_date_ranges(self):
"""Parse the input to get the desired date ranges."""
if self._specs_in['date_ranges'] == 'default':
return ['default']
else:
return self._specs_in['date_ranges']
def _get_time_reg_reducts(self):
"""Parse the input to get the desired spatiotemporal reductions."""
return [self._specs_in['output_time_regional_reductions']]
def _get_aux_specs(self):
"""Get and pre-process all of the non-core specifications."""
# Drop the "core" specifications, which are handled separately.
specs = self._specs_in.copy()
[specs.pop(core) for core in self._CORE_SPEC_NAMES]
specs[_REGIONS_STR] = self._get_regions()
specs[_VARIABLES_STR] = self._get_variables()
specs['date_ranges'] = self._get_date_ranges()
specs['output_time_regional_reductions'] = self._get_time_reg_reducts()
return specs
def _permute_aux_specs(self):
"""Generate all permutations of the non-core specifications."""
# Convert to attr names that Calc is expecting.
calc_aux_mapping = self._NAMES_SUITE_TO_CALC.copy()
# Special case: manually add 'library' to mapping
calc_aux_mapping[_OBJ_LIB_STR] = None
[calc_aux_mapping.pop(core) for core in self._CORE_SPEC_NAMES]
specs = self._get_aux_specs()
for suite_name, calc_name in calc_aux_mapping.items():
specs[calc_name] = specs.pop(suite_name)
return _permuted_dicts_of_specs(specs)
def _combine_core_aux_specs(self):
"""Combine permutations over core and auxilliary Calc specs."""
all_specs = []
for core_dict in self._permute_core_specs():
for aux_dict in self._permute_aux_specs():
all_specs.append(_merge_dicts(core_dict, aux_dict))
return all_specs
def create_calcs(self):
"""Generate a Calc object for each requested parameter combination."""
specs = self._combine_core_aux_specs()
for spec in specs:
spec['dtype_out_time'] = _prune_invalid_time_reductions(spec)
return [Calc(**sp) for sp in specs]
def _prune_invalid_time_reductions(spec):
"""Prune time reductions of spec with no time dimension."""
valid_reductions = []
if not spec['var'].def_time and spec['dtype_out_time'] is not None:
for reduction in spec['dtype_out_time']:
if reduction not in _TIME_DEFINED_REDUCTIONS:
valid_reductions.append(reduction)
else:
msg = ("Var {0} has no time dimension "
"for the given time reduction "
"{1} so this calculation will "
"be skipped".format(spec['var'].name, reduction))
logging.info(msg)
else:
valid_reductions = spec['dtype_out_time']
return valid_reductions
def _compute_or_skip_on_error(calc, compute_kwargs):
"""Execute the Calc, catching and logging exceptions, but don't re-raise.
Prevents one failed calculation from stopping a larger requested set
of calculations.
"""
try:
return calc.compute(**compute_kwargs)
except Exception:
msg = ("Skipping aospy calculation `{0}` due to error with the "
"following traceback: \n{1}")
logging.warning(msg.format(calc, traceback.format_exc()))
return None
def _submit_calcs_on_client(calcs, client, func):
"""Submit calculations via dask.bag and a distributed client"""
logging.info('Connected to client: {}'.format(client))
if LooseVersion(dask.__version__) < '0.18':
dask_option_setter = dask.set_options
else:
dask_option_setter = dask.config.set
with dask_option_setter(get=client.get):
return db.from_sequence(calcs).map(func).compute()
def _n_workers_for_local_cluster(calcs):
"""The number of workers used in a LocalCluster
An upper bound is set at the cpu_count or the number of calcs submitted,
depending on which is smaller. This is to prevent more workers from
being started than needed (but also to prevent too many workers from
being started in the case that a large number of calcs are submitted).
"""
return min(cpu_count(), len(calcs))
def _exec_calcs(calcs, parallelize=False, client=None, **compute_kwargs):
"""Execute the given calculations.
Parameters
----------
calcs : Sequence of ``aospy.Calc`` objects
parallelize : bool, default False
Whether to submit the calculations in parallel or not
client : distributed.Client or None
The distributed Client used if parallelize is set to True; if None
a distributed LocalCluster is used.
compute_kwargs : dict of keyword arguments passed to ``Calc.compute``
Returns
-------
A list of the values returned by each Calc object that was executed.
"""
if parallelize:
def func(calc):
"""Wrap _compute_or_skip_on_error to require only the calc
argument"""
if 'write_to_tar' in compute_kwargs:
compute_kwargs['write_to_tar'] = False
return _compute_or_skip_on_error(calc, compute_kwargs)
if client is None:
n_workers = _n_workers_for_local_cluster(calcs)
with distributed.LocalCluster(n_workers=n_workers) as cluster:
with distributed.Client(cluster) as client:
result = _submit_calcs_on_client(calcs, client, func)
else:
result = _submit_calcs_on_client(calcs, client, func)
if compute_kwargs['write_to_tar']:
_serial_write_to_tar(calcs)
return result
else:
return [_compute_or_skip_on_error(calc, compute_kwargs)
for calc in calcs]
def _serial_write_to_tar(calcs):
for calc in calcs:
if calc.proj.tar_direc_out:
for dtype_out_time in calc.dtype_out_time:
calc._write_to_tar(dtype_out_time)
def _print_suite_summary(calc_suite_specs):
"""Print summary of requested calculations."""
return ('\nRequested aospy calculations:\n' +
pprint.pformat(calc_suite_specs) + '\n')
def submit_mult_calcs(calc_suite_specs, exec_options=None):
"""Generate and execute all specified computations.
Once the calculations are prepped and submitted for execution, any
calculation that triggers any exception or error is skipped, and the rest
of the calculations proceed unaffected. This prevents an error in a single
calculation from crashing a large suite of calculations.
Parameters
----------
calc_suite_specs : dict
The specifications describing the full set of calculations to be
generated and potentially executed. Accepted keys and their values:
library : module or package comprising an aospy object library
The aospy object library for these calculations.
projects : list of aospy.Proj objects
The projects to permute over.
models : 'all', 'default', or list of aospy.Model objects
The models to permute over. If 'all', use all models in the
``models`` attribute of each ``Proj``. If 'default', use all
models in the ``default_models`` attribute of each ``Proj``.
runs : 'all', 'default', or list of aospy.Run objects
The runs to permute over. If 'all', use all runs in the
``runs`` attribute of each ``Model``. If 'default', use all
runs in the ``default_runs`` attribute of each ``Model``.
variables : list of aospy.Var objects
The variables to be calculated.
regions : 'all' or list of aospy.Region objects
The region(s) over which any regional reductions will be performed.
If 'all', use all regions in the ``regions`` attribute of each
``Proj``.
date_ranges : 'default' or a list of tuples
The range of dates (inclusive) over which to perform calculations.
If 'default', use the ``default_start_date`` and
``default_end_date`` attribute of each ``Run``. Else provide a
list of tuples, each containing a pair of start and end dates,
such as ``date_ranges=[(start, end)]`` where ``start`` and
``end`` are each ``datetime.datetime`` objects, partial
datetime strings (e.g. '0001'), ``np.datetime64`` objects, or
``cftime.datetime`` objects.
output_time_intervals : {'ann', season-string, month-integer}
The sub-annual time interval over which to aggregate.
- 'ann' : Annual mean
- season-string : E.g. 'JJA' for June-July-August
- month-integer : 1 for January, 2 for February, etc. Each one is
a separate reduction, e.g. [1, 2] would produce averages (or
other specified time reduction) over all Januaries, and
separately over all Februaries.
output_time_regional_reductions : list of reduction string identifiers
Unlike most other keys, these are not permuted over when creating
the :py:class:`aospy.Calc` objects that execute the calculations;
each :py:class:`aospy.Calc` performs all of the specified
reductions. Accepted string identifiers are:
- Gridpoint-by-gridpoint output:
- 'av' : Gridpoint-by-gridpoint time-average
- 'std' : Gridpoint-by-gridpoint temporal standard deviation
- 'ts' : Gridpoint-by-gridpoint time-series
- Averages over each region specified via `region`:
- 'reg.av', 'reg.std', 'reg.ts' : analogous to 'av', 'std', 'ts'
output_vertical_reductions : {None, 'vert_av', 'vert_int'}, optional
How to reduce the data vertically:
- None : no vertical reduction
- 'vert_av' : mass-weighted vertical average
- 'vert_int' : mass-weighted vertical integral
input_time_intervals : {'annual', 'monthly', 'daily', '#hr'}
A string specifying the time resolution of the input data. In
'#hr' above, the '#' stands for a number, e.g. 3hr or 6hr, for
sub-daily output. These are the suggested specifiers, but others
may be used if they are also used by the DataLoaders for the given
Runs.
input_time_datatypes : {'inst', 'ts', 'av'}
What the time axis of the input data represents:
- 'inst' : Timeseries of instantaneous values
- 'ts' : Timeseries of averages over the period of each time-index
- 'av' : A single value averaged over a date range
input_vertical_datatypes : {False, 'pressure', 'sigma'}, optional
The vertical coordinate system used by the input data:
- False : not defined vertically
- 'pressure' : pressure coordinates
- 'sigma' : hybrid sigma-pressure coordinates
input_time_offsets : {None, dict}, optional
How to offset input data in time to correct for metadata errors
- None : no time offset applied
- dict : e.g. ``{'hours': -3}`` to offset times by -3 hours
See :py:meth:`aospy.utils.times.apply_time_offset`.
exec_options : dict or None (default None)
Options regarding how the calculations are reported, submitted, and
saved. If None, default settings are used for all options. Currently
supported options (each should be either `True` or `False`):
- prompt_verify : (default False) If True, print summary of
calculations to be performed and prompt user to confirm before
submitting for execution.
- parallelize : (default False) If True, submit calculations in
parallel.
- client : distributed.Client or None (default None) The
dask.distributed Client used to schedule computations. If None
and parallelize is True, a LocalCluster will be started.
- write_to_tar : (default True) If True, write results of calculations
to .tar files, one for each :py:class:`aospy.Run` object.
These tar files have an identical directory structures the
standard output relative to their root directory, which is
specified via the `tar_direc_out` argument of each Proj
object's instantiation.
Returns
-------
A list of the return values from each :py:meth:`aospy.Calc.compute` call
If a calculation ran without error, this value is the
:py:class:`aospy.Calc` object itself, with the results of its
calculations saved in its ``data_out`` attribute. ``data_out`` is a
dictionary, with the keys being the temporal-regional reduction
identifiers (e.g. 'reg.av'), and the values being the corresponding
result.
If any error occurred during a calculation, the return value is None.
Raises
------
AospyException
If the ``prompt_verify`` option is set to True and the user does not
respond affirmatively to the prompt.
"""
if exec_options is None:
exec_options = dict()
if exec_options.pop('prompt_verify', False):
print(_print_suite_summary(calc_suite_specs))
_user_verify()
calc_suite = CalcSuite(calc_suite_specs)
calcs = calc_suite.create_calcs()
if not calcs:
raise AospyException(
"The specified combination of parameters yielded zero "
"calculations. Most likely, one of the parameters is "
"inadvertently empty."
)
return _exec_calcs(calcs, **exec_options)
|
#buh usgiig jijgeer
a = input()
print(str.lower(a))
|
import pandas as pd
import numpy as np
import os
import json
import logging
import sys
logging.basicConfig(filename='logs.log',
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG)
#amount = invoice amount
#This function removes payments having single subset
def removing_single_subsets(read_from, write_to):
for i in os.listdir(read_from):
logging.info('Processing '+str(i).split('.')[0])
dataset = pd.read_csv(r'' + read_from + os.sep + str(i), sep=',')
temp = dataset.groupby(['payment_id'])['subset_number'].nunique().reset_index()
valid_payments = temp[temp['subset_number']>1]['payment_id'].unique()
dataset = dataset[dataset['payment_id'].isin(valid_payments)]
if len(dataset)>1:
logging.info('Customer have multiple subsets.')
dataset.to_csv(write_to + str(i))
else:
logging.info('Customer ' + str(i).split('.')[0] + ' removed due to all single subsets')
#This function will populate output
def output_logic(read_from, write_to):
for i in os.listdir(read_from):
dataset = pd.read_csv(r'' + read_from + os.sep + str(i), sep=',',index_col=0)
temp=dataset.copy()
temp['diff_payment_id_&_payment_hdr_id'] = abs(temp['payment_id'] - temp['payment_hdr_id'])
agg_obj = temp[['payment_id', 'subset_number', 'diff_payment_id_&_payment_hdr_id', 'payment_hdr_id']].groupby(['payment_id', 'subset_number']).agg({'payment_hdr_id':'var','diff_payment_id_&_payment_hdr_id':'sum'}).reset_index()
agg_obj['payment_hdr_id']=agg_obj['payment_hdr_id'].fillna(0)
dataset['output'] = temp.apply(lambda row: 1 if ((agg_obj[(agg_obj['payment_id'] == row['payment_id']) & (
agg_obj['subset_number'] == row['subset_number'])][
'diff_payment_id_&_payment_hdr_id'].values[
0] == 0) & (agg_obj[(agg_obj['payment_id'] == row['payment_id']) & (agg_obj['subset_number'] == row['subset_number'])]['payment_hdr_id'].values[
0] == 0)) else 0, axis=1)
dataset.to_csv(write_to + str(i))
#This function assigns rank to average_delay_categorical feature
#This function calculates the variance of delay of all invoices in each subset
def variance_and_avg_delay_categorical(read_from, write_to):
for i in os.listdir(read_from):
dataset = pd.read_csv(r'' + read_from + os.sep + str(i), sep=',',index_col=0)
dataset['payment_date'] = pd.to_datetime(dataset['payment_date'])
dataset['invoice_date'] = pd.to_datetime(dataset['invoice_date'])
dataset['delay'] = dataset['payment_date'].subtract(dataset['invoice_date'], axis=0)
dataset['delay'] = dataset['delay'].apply(lambda x: pd.Timedelta(x).days)
temp = dataset.groupby(['payment_id', 'subset_number']).agg({'delay':'var'}).reset_index()
temp['delay']=temp['delay'].fillna(0)
temp['delay']=round(temp['delay'],5)
temp['variance_categorical']=temp.groupby(['payment_id'])['delay'].rank(ascending=True,method='dense')
dataset['variance']=dataset.apply(lambda row : temp[(temp['payment_id']==row['payment_id']) & (temp['subset_number']==row['subset_number'])]['delay'].values[0],axis=1)
dataset['variance_categorical']=dataset.apply(lambda row : temp[(temp['payment_id']==row['payment_id']) & (temp['subset_number']==row['subset_number'])]['variance_categorical'].values[0],axis=1)
dataset['variance_categorical'] = dataset['variance_categorical'] - 1
temp2 = dataset.groupby(['payment_id', 'subset_number']).agg({'delay':'mean'}).reset_index()
temp2['delay'] = round(temp2['delay'],5)
temp2['avg_delay_categorical'] = temp2.groupby(['payment_id'])['delay'].rank(ascending=False,method='dense')
dataset['average_delay'] = dataset.apply(lambda row : temp2[(temp2['payment_id']==row['payment_id']) & (temp2['subset_number']==row['subset_number'])]['delay'].values[0],axis=1)
dataset['avg_delay_categorical']=dataset.apply(lambda row : temp2[(temp2['payment_id']==row['payment_id']) & (temp2['subset_number']==row['subset_number'])]['avg_delay_categorical'].values[0],axis=1)
dataset['avg_delay_categorical'] = dataset['avg_delay_categorical'] - 1
dataset.to_csv(write_to + str(i))
#This function will create the feature number_invoices_closed
def number_unique_invoice_count(read_from, write_to):
for i in os.listdir(read_from):
dataset = pd.read_csv(r'' + read_from + os.sep + str(i), sep=',', index_col=0)
dataset['number_invoices_closed']=0
subset_invoice_count = dataset.groupby(['payment_id','subset_number'])['invoice'].nunique().reset_index()
payment_invoice_count = dataset.groupby('payment_id')['invoice'].nunique().reset_index()
dataset['number_invoices_closed'] = dataset.apply(lambda row: (subset_invoice_count[(subset_invoice_count['payment_id']==row['payment_id']) & (subset_invoice_count['subset_number']==row['subset_number'])]['invoice'].values[0]/payment_invoice_count[payment_invoice_count['payment_id']==row['payment_id']]['invoice'].values[0]), axis=1)
dataset['unique_invoice_count'] = dataset.apply(lambda row: payment_invoice_count[payment_invoice_count['payment_id']==row['payment_id']]['invoice'].values[0],axis=1)
dataset.to_csv(write_to + str(i))
def variance_categorical_old(read_from, write_to):
for i in os.listdir(read_from):
dataset = pd.read_csv(r'' + read_from + os.sep + str(i), sep=',',index_col=0)
if len(dataset)>1:
# dataset['subset_number'] = dataset.index
dataset['payment_date'] = pd.to_datetime(dataset['payment_date'])
dataset['invoice_date'] = pd.to_datetime(dataset['invoice_date'])
dataset['delay'] = dataset['payment_date'].subtract(dataset['invoice_date'], axis=0)
dataset['delay'] = dataset['delay'].apply(lambda x: pd.Timedelta(x).days)
group = dataset.groupby(by=['payment_id', 'subset_number'])
new_data = pd.DataFrame()
for name, data in group:
payment_id, subset_number = name[0], name[1]
data['output'] = 0
if len(data) > 1:
data['variance'] = data['delay'].var()
if data['payment_hdr_id'].unique()[0] == payment_id and data['payment_hdr_id'].var() == 0:
data['output'] = 1
else:
data['variance'] = 0
if data['payment_hdr_id'].unique()[0] == payment_id:
data['output'] = 1
data['average_delay'] = data['delay'].mean()
logging.info("here", name)
data['payment_id'] = payment_id
data['subset_number'] = subset_number
new_data = pd.concat([new_data, data])
logging.info("here 2", name)
grouped_payment_id = new_data.groupby(by=['payment_id'])
new_data['variance_categorical'] = 0
new_data_more_new = pd.DataFrame()
for name, data in grouped_payment_id:
unique_variance = data['variance'].unique()
logging.info(unique_variance)
data['payment_id'] = name
unique_variance.sort()
data['variance_categorical'] = data['variance'].apply(rank, args=(unique_variance,))
new_data_more_new = pd.concat([new_data_more_new, data])
new_data_more_new.to_csv(write_to + str(i))
# This function defines and assigns bins to the open invoice amount for each subset
def LMH_assign(val):
if val<=100:
return 'L1'
elif val<=500:
return 'L2'
elif val<=1000:
return 'L3'
elif val<=5000:
return 'M'
else:
return 'H'
#This function calculates LMH cumulative
def LMH_cumulative(read_from,write_to, customer_level_json):
local = json.load(open(customer_level_json, 'r'))
for i in os.listdir(read_from):
dataset = pd.read_csv(r'' + read_from + os.sep + str(i), sep=',',index_col=0)
cust_num=dataset['customer_number'].unique()[0]
if type(cust_num)!=str:
cust_num=cust_num.astype(str)
dataset['LMH'] = dataset['amount'].apply(LMH_assign)
# dataset['LMH'] = pd.cut(dataset['amount'], [0, 100, 500, 1000, 5000, 10000000000], labels=['L1', 'L2', 'L3', 'M', 'H'])
if local.get(cust_num).get('buckets_invoice').get('L1_perc'):
L1_perc = dataset['L1_perc'] = local.get(cust_num).get('buckets_invoice').get('L1_perc')[0]
else:
L1_perc = dataset['L1_perc'] = 0
if local.get(cust_num).get('buckets_invoice').get('L2_perc'):
L2_perc = dataset['L2_perc'] = local.get(cust_num).get('buckets_invoice').get('L2_perc')[0]
else:
L2_perc = dataset['L2_perc'] = 0
if local.get(cust_num).get('buckets_invoice').get('L3_perc'):
L3_perc = dataset['L3_perc'] = local.get(cust_num).get('buckets_invoice').get('L3_perc')[0]
else:
L3_perc = dataset['L3_perc'] = 0
if local.get(cust_num).get('buckets_invoice').get('M_perc'):
M_perc = dataset['M_perc'] = local.get(cust_num).get('buckets_invoice').get('M_perc')[0]
else:
M_perc = dataset['M_perc'] = 0
if local.get(cust_num).get('buckets_invoice').get('H_perc'):
H_perc = dataset['H_perc'] = local.get(cust_num).get('buckets_invoice').get('H_perc')[0]
else:
H_perc = dataset['H_perc'] = 0
dataset['LMH_cumulative']=0
for j in dataset['payment_id'].unique():
for k in dataset[dataset['payment_id']==j]['subset_number'].unique():
temp=dataset[(dataset['payment_id']==j) & (dataset['subset_number']==k)]
L1 = len(temp[temp['LMH'] == 'L1'])/len(temp)
L2 = len(temp[temp['LMH'] == 'L2']) / len(temp)
L3 = len(temp[temp['LMH'] == 'L3']) / len(temp)
M = len(temp[temp['LMH'] == 'M']) / len(temp)
H = len(temp[temp['LMH'] == 'H']) / len(temp)
dataset.loc[(dataset['payment_id'] == j) & (dataset['subset_number'] == k), 'LMH_cumulative']=L1*L1_perc + L2*L2_perc + L3*L3_perc + M*M_perc + H*H_perc
dataset.to_csv(write_to + str(i))
#This function populates quarter level payment and invoice level features AND avg_of_all_delays feature
def quarter_level_features(read_from, write_to, customer_level_json):
for i in os.listdir(read_from):
dictionary = json.load(open(customer_level_json, 'r'))
dataset = pd.read_csv(r''+read_from + os.sep + str(i), sep=',', index_col=0)
cust_num = dataset['customer_number'].unique()[0]
if type(cust_num)!=str:
cust_num=cust_num.astype(str)
dataset['avg_of_invoices_closed'] = dictionary.get(cust_num).get('avg_of_invoices_closed')
dataset['avg_of_all_delays'] = dictionary.get(cust_num).get('avg_of_all_delays')
dataset['payment_count_quarter_q1'] = dictionary.get(cust_num).get('payment_count_quarter').get('q1')
dataset['payment_count_quarter_q2'] = dictionary.get(cust_num).get('payment_count_quarter').get('q2')
dataset['payment_count_quarter_q3'] = dictionary.get(cust_num).get('payment_count_quarter').get('q3')
dataset['payment_count_quarter_q4'] = dictionary.get(cust_num).get('payment_count_quarter').get('q4')
dataset['invoice_count_quarter_q1'] = dictionary.get(cust_num).get('invoice_count_quarter').get('q1')
dataset['invoice_count_quarter_q2'] = dictionary.get(cust_num).get('invoice_count_quarter').get('q2')
dataset['invoice_count_quarter_q3'] = dictionary.get(cust_num).get('invoice_count_quarter').get('q3')
dataset['invoice_count_quarter_q4'] = dictionary.get(cust_num).get('invoice_count_quarter').get('q4')
dataset['avg_of_invoices_closed'] = dictionary.get(cust_num).get('avg_of_invoices_closed')
dataset['avg_of_all_delays'] = dictionary.get(cust_num).get('avg_of_all_delays')
dataset.to_csv(write_to + str(i))
#supporting function for avg_delay_categorical
def rank(x, un_var):
index = np.where(un_var == x)
return index[0][0]
def avg_delay_categorical_old(read_from,write_to):
for i in os.listdir(read_from):
data = pd.read_csv(r''+read_from + os.sep + str(i), sep=',',index_col=0)
if len(data)>1:
grouped_payment_id = data.groupby(by=['payment_id'])
new_data_final=pd.DataFrame()
for name,group in grouped_payment_id:
unique_delay = group['average_delay'].unique()
sorted_delay = np.asarray(sorted(unique_delay, reverse=True))
group['avg_delay_categorical'] = group['average_delay'].apply(rank, args=(sorted_delay,))
new_data_final = pd.concat([new_data_final, group], axis=0)
new_data_final.to_csv(write_to + str(i))
#This function rolls up the payment information to the subsets level
def subset_rolled_up(read_from, write_to):
variables = ['account_id','customer_number', 'payment_id', 'subset_number', 'output',
'variance_categorical','avg_delay_categorical',
'L1_perc', 'L2_perc', 'L3_perc', 'M_perc', 'H_perc',
'LMH_cumulative',
'avg_of_invoices_closed',
'avg_of_all_delays',
'payment_count_quarter_q1', 'payment_count_quarter_q2', 'payment_count_quarter_q3',
'payment_count_quarter_q4',
'invoice_count_quarter_q1', 'invoice_count_quarter_q2', 'invoice_count_quarter_q3',
'invoice_count_quarter_q4', 'payment_amount', 'number_invoices_closed',
'payment_date','unique_invoice_count']
for i in os.listdir(read_from):
final = pd.DataFrame()
dataset = pd.read_csv(r''+read_from + os.sep + str(i), sep=',', index_col=0)
for j in dataset['payment_id'].unique():
for k in dataset[dataset['payment_id'] == j]['subset_number'].unique():
temp = dataset[(dataset['payment_id'] == j) & (dataset['subset_number'] == k)][variables]
final = final.append(temp.iloc[[0]], ignore_index=True)
final['subset_count']=final.groupby(['payment_id'])['payment_id'].transform('count')
final=final[final['subset_count']>=1]
final.to_csv(write_to + str(i))
def create_all_features(read_from_, read_from, write_to, write_to_, customer_level_json):
logging.info('Feature Creation Started.')
#Removing Single Subsets
logging.info('Removing Single Subsets..')
removing_single_subsets(read_from_, write_to)
logging.info('Single Subsets Removed.')
#Output Logic
logging.info('Output Variable getting created.')
output_logic(read_from, write_to)
logging.info('Output Variable got created.')
#Variance and Average Categorical Feature
logging.info('Avg delay categorical started.')
variance_and_avg_delay_categorical(read_from, write_to)
logging.info('Avg delay categorical Finished.')
#Number of invoices in a subset divided by the total open invoices
logging.info('unique invoice count started.')
number_unique_invoice_count(read_from, write_to)
logging.info('unique invoice count finished.')
# LMH_cumulative
logging.info('LMH Started')
LMH_cumulative(read_from,write_to, customer_level_json)
logging.info('LMH Finished')
#quarter_level
logging.info('Quarter Features Started.')
quarter_level_features(read_from, write_to, customer_level_json)
logging.info('Quarter Features Finished.')
logging.info('Feature Generation Ended.')
#Subset Filtering and rolling up
logging.info('Subsets Rolling up.')
subset_rolled_up(read_from, write_to_)
logging.info('Subsets Rolled up.')
def prepare_predictions_data(read_from, write_to):
predictions_data = pd.DataFrame()
for i in os.listdir(read_from):
data = pd.read_csv(r'' + read_from + os.sep + str(i), sep=',',index_col=0)
predictions_data = predictions_data.append(data)
predictions_data.to_csv(write_to)
if __name__ == '__main__':
acct_id = str(sys.argv[1])
path = str(sys.argv[2])
read_from_ = path+"/account_"+acct_id+"/customer_wise_subsets"
read_from = path+"/account_"+acct_id+"/customer_subsets_features"
write_to = path+"/account_"+acct_id+"/customer_subsets_features/"
write_to_ = path+"/account_"+acct_id+"/subsets_rolled_up/"
customer_level_json = path+"/account_"+acct_id+"/customersJson/customersJson.json"
log_path=path+'/account_'+str(acct_id)+'/logs/'
#create features for all subsets
create_all_features(read_from_, read_from, write_to, write_to_, customer_level_json)
progress=pd.read_csv(log_path+"progress.csv")
progress['Status']='FeaturesCreation.py'
progress.to_csv(log_path+"progress.csv",index=False)
|
import pynetbox
import napalm
import pprint
import secrets
import role_mapping
class Network_Device:
'''An object of the network device you are connecting to.
Attributes:
facts: network device facts available from NAPALM
interfaces: network device interfaces available from NAPALM
'''
def __init__(self,name,driver):
self.name = name
self.network_device_username = secrets.Secrets.napalm_username
self.network_device_password = secrets.Secrets.napalm_password
self.network_device_driver = napalm.get_network_driver(driver)
self.network_device = {'hostname':self.name,'username':self.network_device_username,'password':self.network_device_password}
self.network_device_facts = self.get_device_facts(network_device_driver=self.network_device_driver, network_device=self.network_device)
self.network_device_hostname = self.network_device_facts['hostname']
# self.network_device_interfaces = self.get_device_interfaces(network_device_driver=self.network_device_driver, network_device=self.network_device)
# self.network_device_interfaces_ip = self.get_device_interfaces_ip(network_device_driver=self.network_device_driver, network_device=self.network_device)
# self.network_device_bgp_neighbors = self.get_device_bgp_neighbors(network_device_driver=self.network_device_driver, network_device=self.network_device)
# self.network_device_lldp_neighbors = self.get_device_lldp_neighbors(network_device_driver=self.network_device_driver, network_device=self.network_device)
def get_device_facts(self, network_device_driver, network_device):
# with network_device_driver(**network_device) as device:
# return device.get_interfaces()
device = network_device_driver(**network_device)
device.open()
device_facts = device.get_facts()
device.close()
return device_facts
def get_device_interfaces(self, network_device_driver, network_device):
with network_device_driver(**network_device) as device:
return device.get_interfaces()
def get_device_interfaces_ip(self, network_device_driver, network_device):
with network_device_driver(**network_device) as device:
return device.get_interfaces_ip()
def get_device_bgp_neighbors(self, network_device_driver, network_device):
with network_device_driver(**network_device) as device:
return device.get_bgp_neighbors()
def get_device_lldp_neighbors(self, network_device_driver, network_device):
with network_device_driver(**network_device) as device:
return device.get_lldp_neighbors()
class Netbox_Connection:
'''
Netbox connection using pynetbox
'''
def __init__(self):
self.netbox_url = secrets.Secrets.netbox_url
self.netbox_ssl_validation = False
self.netbox_token = secrets.Secrets.netbox_token
self.netbox_connection = pynetbox.api(self.netbox_url, token=self.netbox_token, ssl_verify=self.netbox_ssl_validation)
self.netbox_1g_sfp_id = 1100
self.netbox_10g_sfpp_id = 1200
self.netbox_1g_base_t_id = 1000
self.netbox_virtual_id = 0
self.netbox_lag_id = 200
self.netbox_40g_qsfpp_id = 1400
class Netbox_Device:
'''
A device as represented in NetBox
'''
pass
def main():
device = Network_Device('172.16.216.39','junos')
netbox_connection = Netbox_Connection()
def get_netbox_device_id():
device_object = netbox_connection.netbox_connection.dcim.devices.get(name=device.network_device_hostname)
return device_object.id
if str(device.network_device_hostname) in str(netbox_connection.netbox_connection.dcim.devices.all()):
for interface in device.network_device_facts['interface_list']:
if interface.startswith('ge'):
try:
netbox_connection.netbox_connection.dcim.interfaces.create(device=str(get_netbox_device_id()),name=interface,type=netbox_connection.netbox_1g_sfp_id,enabled=True)
except: pass
elif interface.startswith('xe'):
try:
netbox_connection.netbox_connection.dcim.interfaces.create(device=str(get_netbox_device_id()),name=interface,type=netbox_connection.netbox_10g_sfpp_id,enabled=True)
except: pass
elif interface.startswith('lo'):
try:
netbox_connection.netbox_connection.dcim.interfaces.create(device=str(get_netbox_device_id()),name=interface,type=netbox_connection.netbox_virtual_id,enabled=True)
except: pass
# elif interface.startswith('ae'):
# try:
# netbox_connection.netbox_connection.dcim.interfaces.create(device=str(get_netbox_device_id()),name=interface,type=netbox_connection.netbox_lag_id,enabled=True)
# except: pass
elif interface.startswith('me'):
try:
netbox_connection.netbox_connection.dcim.interfaces.create(device=str(get_netbox_device_id()),name=interface,type=netbox_connection.netbox_1g_base_t_id,enabled=True)
except: pass
elif interface.startswith('em0'):
try:
netbox_connection.netbox_connection.dcim.interfaces.create(device=str(get_netbox_device_id()),name=interface,type=netbox_connection.netbox_1g_base_t_id,enabled=True)
except: pass
elif interface.startswith('em1'):
try:
netbox_connection.netbox_connection.dcim.interfaces.create(device=str(get_netbox_device_id()),name=interface,type=netbox_connection.netbox_1g_sfp_id,enabled=True)
except: pass
elif interface.startswith('fxp'):
try:
netbox_connection.netbox_connection.dcim.interfaces.create(device=str(get_netbox_device_id()),name=interface,type=netbox_connection.netbox_1g_base_t_id,enabled=True)
except: pass
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
import os
import re
import sys
import unittest
pkgpath = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) or '..'
sys.path.insert(0, pkgpath)
def suite():
s = unittest.TestSuite()
# Get the suite() of every module in this directory beginning with
# "test_".
for fname in os.listdir(os.path.join(pkgpath, 'test')):
match = re.match(r'(test_\S+)\.py$', fname)
if match:
modname = match.group(1)
s.addTest(__import__(modname).suite())
return s
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
from django.test import TestCase
class ExchangeTests(TestCase):
pass
|
#!/usr/bin/python
# Orthanc - A Lightweight, RESTful DICOM Store
# Copyright (C) 2012-2016 Sebastien Jodogne, Medical Physics
# Department, University Hospital of Liege, Belgium
# Copyright (C) 2017-2020 Osimis S.A., Belgium
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import time
import sys
import RestToolbox
import md5
##
## Print help message
##
if len(sys.argv) != 3:
print("""
Sample script that anonymizes patients in real-time. A patient gets
anonymized as soon as she gets stable (i.e. when no DICOM instance has
been received for this patient for a sufficient amount of time - cf.
the configuration option "StableAge").
Usage: %s [hostname] [HTTP port]
For instance: %s 127.0.0.1 8042
""" % (sys.argv[0], sys.argv[0]))
exit(-1)
URL = 'http://%s:%d' % (sys.argv[1], int(sys.argv[2]))
##
## The following function is called whenever a patient gets stable
##
COUNT = 1
def AnonymizePatient(path):
global URL
global COUNT
patient = RestToolbox.DoGet(URL + path)
patientID = patient['MainDicomTags']['PatientID']
# Ignore anonymized patients
if not 'AnonymizedFrom' in patient:
print('Patient with ID "%s" is stabilized: anonymizing it...' % (patientID))
# The PatientID after anonymization is taken as the 8 first
# characters from the MD5 hash of the original PatientID
anonymizedID = md5.new(patientID).hexdigest()[:8]
anonymizedName = 'Anonymized patient %d' % COUNT
COUNT += 1
RestToolbox.DoPost(URL + path + '/anonymize',
{ 'Replace' : { 'PatientID' : anonymizedID,
'PatientName' : anonymizedName } })
# Delete the source patient after the anonymization
RestToolbox.DoDelete(URL + change['Path'])
##
## Main loop that listens to the changes API.
##
current = 0
while True:
r = RestToolbox.DoGet(URL + '/changes', {
'since' : current,
'limit' : 4 # Retrieve at most 4 changes at once
})
for change in r['Changes']:
if change['ChangeType'] == 'StablePatient':
AnonymizePatient(change['Path'])
current = r['Last']
if r['Done']:
print('Everything has been processed: Waiting...')
time.sleep(1)
|
# Generated by Django 2.2.13 on 2020-07-12 06:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('product', '0020_auto_20200710_2318'),
]
operations = [
migrations.AlterField(
model_name='category',
name='status',
field=models.CharField(choices=[('T', 'T'), ('F', 'F')], max_length=20),
),
]
|
from django.contrib import admin
# Register your models here.
from location.models import Location, Student, Grade
class LocationAdmin(admin.ModelAdmin):
list_display = ('lat', 'long', 'student', 'timestamp')
list_filter = ('student', 'timestamp')
class GradeAdmin(admin.ModelAdmin):
list_display = ('date', 'item', 'itemCode', 'grade', 'passed', 'student')
list_filter = ('student', 'item', 'itemCode', 'item')
class StudentAdmin(admin.ModelAdmin):
list_display = ('id', 'givenName', 'mail')
search_fields = ['u', 'givenName']
admin.site.register(Location, LocationAdmin)
admin.site.register(Student, StudentAdmin)
admin.site.register(Grade, GradeAdmin)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Note: Cleaner library is property of isMOOD and is not publicly distributed
# The script fails without this library at the moment
from future import print_function
import Cleaner # TO IMPLEMENT
import csv
import re
import settings
import spacy
__author__ = 'Zoe Kotti'
__email__ = 'kotti@ismood.com'
__copyright__ = 'Copyright 2019, isMOOD'
# Prepare Cleaner -- TO IMPLEMENT
class_setter = {
'polytonic': True,
'lower': True
}
# Load Cleaner -- TO IMPLEMENT
cleaner = Cleaner(class_setter)
# Load Greek core from spacy
nlp = spacy.load('el_core_news_md')
# Sources of Greek terms
INPUT_FILE_ASPELL = 'lexicons/el_GR-0.9.csv'
INPUT_FILE_WIKI = 'lexicons/elwords_from_wiktionary.csv'
INPUT_FILE_LEMMAS = 'lexicons/greek_lemmas.csv'
# Mapping dictionary for the spacy POS tags found at:
# https://spacy.io/api/annotation#pos-tagging
POS_DICT = {
'ADJ': 'adjective',
'ADP': 'adposition',
'ADV': 'adverb',
'AUX': 'auxiliary',
'CONJ': 'conjunction',
'CCONJ': 'coordinating_conjunction',
'DET': 'determiner',
'INTJ': 'interjection',
'NOUN': 'noun',
'NUM': 'numeral',
'PART': 'particle',
'PRON': 'pronoun',
'PROPN': 'proper_noun',
'PUNCT': 'punctuation',
'SCONJ': 'subordinating_conjunction',
'SYM': 'symbol',
'VERB': 'verb',
'X': 'other',
'SPACE': 'space'
}
def find_sentiment(pos_score, neg_score, obj_score):
scores = [pos_score, neg_score, obj_score]
scores.sort(reverse=True)
magic_number = (scores[0] - scores[1]) + (scores[0] - scores[2])
# Priority is given first to positive, then to negative, and lastly to objective
if scores[0] == pos_score:
majority = 'positive'
elif scores[0] == neg_score:
majority = 'negative'
else:
majority = 'objective'
sentiment = {
'PosScore': round(pos_score, 3),
'NegScore': round(neg_score, 3),
'ObjScore': round(obj_score, 3),
'magic_number': round(magic_number, 3),
'majority': majority
}
return sentiment
def prepare_insert(term, source):
words_count = len(re.split('\s+', term))
insert = {
'_id': term,
'sources': [source],
'sources_count': 1,
'clean': cleaner.clean_text(term)['text'],
'words_count': words_count
}
if words_count == 1:
doc = nlp(term.encode('utf-8'))
spacy = {
'lemma': doc[0].lemma_,
'pos': doc[0].pos_,
'tag': doc[0].tag_,
'dep': doc[0].dep_,
'shape': doc[0].shape_,
'is_alpha': doc[0].is_alpha,
'is_stop': doc[0].is_stop
}
insert['spacy'] = spacy
return insert
def init_greek_terms(greek_terms):
# Aspell
with open(INPUT_FILE_ASPELL, 'r') as dict_aspell:
csv_reader = csv.DictReader(dict_aspell)
for row in csv_reader:
insert = prepare_insert(row['term'], 'aspell')
greek_terms.insert_one(insert)
# Wiktionary
with open(INPUT_FILE_WIKI, 'r') as dict_wiki:
csv_reader = csv.DictReader(dict_wiki)
for row in csv_reader:
term = row['term']
if greek_terms.count({'_id': term}) == 0:
insert = prepare_insert(term, 'wiktionary')
greek_terms.insert_one(insert)
else:
existing_term = greek_terms.find_one({'_id': term}, {'sources_count': 1})
update = {
'$addToSet': {'sources': 'wiktionary'},
'$set': {'sources_count': existing_term['sources_count'] + 1}
}
greek_terms.update({'_id': term}, update)
# Greek Lemmas
with open(INPUT_FILE_LEMMAS, 'r') as dict_lemmas:
csv_reader = csv.DictReader(dict_lemmas)
for row in csv_reader:
term = row['term']
if greek_terms.count({'_id': term}) == 0:
insert = prepare_insert(term, 'greek_lemmas')
greek_terms.insert_one(insert)
else:
existing_term = greek_terms.find_one({'_id': term}, {'sources_count': 1})
update = {
'$addToSet': {'sources': 'greek_lemmas'},
'$set': {'sources_count': existing_term['sources_count'] + 1}
}
greek_terms.update({'_id': term}, update)
def populate_lemmas(greek_terms):
documents = greek_terms.find({'spacy': {'$exists': 1}}, {'spacy': 1}, no_cursor_timeout=True)
lemmas = set()
ids = set()
for doc in documents:
spacy = doc['spacy']
lemmas.add(spacy['lemma'].lower())
ids.add(doc['_id'].lower())
for lemma in lemmas:
if lemma in ids:
# Ignore case sensitivity of ids
existing_lemmas = greek_terms.find({'_id': {'$regex': '^{}$'.format(lemma.encode('utf-8')), '$options': '-i'}}, {'sources': 1, 'sources_count': 1})
for existing in existing_lemmas:
update = {
'$addToSet': {'sources': 'lemmas_generated'},
'$set': {'sources_count': existing['sources_count'] + 1}
}
greek_terms.update({'_id': existing['_id']}, update)
else:
insert = prepare_insert(lemma, 'lemmas_generated')
greek_terms.insert_one(insert)
documents.close()
def map_sentiment(greek_terms, english_sentiment_terms):
english_documents = english_sentiment_terms.find({}, {'sentiment': 1, 'translation': 1}, no_cursor_timeout=True).sort('_id', 1)
index = 0
for doc in english_documents:
print("Index: {}".format(index))
translation = doc['translation']
translation_lowercase = translation['lowercase']
en_sentiment = doc['sentiment']
greek_documents = greek_terms.find({'_id': {'$regex': '^{}$'.format(translation_lowercase.encode('utf-8')), '$options': '-i'}}, {'sentiment': 1})
if greek_documents.count():
for gr_doc in greek_documents:
if 'sentiment' in gr_doc.keys():
gr_sentiment = gr_doc['sentiment']
pos_score = gr_sentiment['PosScore'] + en_sentiment['PosScore']
neg_score = gr_sentiment['NegScore'] + en_sentiment['NegScore']
obj_score = gr_sentiment['ObjScore'] + en_sentiment['ObjScore']
occurrences = gr_sentiment['occurrences'] + 1
else:
pos_score = en_sentiment['PosScore']
neg_score = en_sentiment['NegScore']
obj_score = en_sentiment['ObjScore']
occurrences = 1
update = {
'$set': {
'sentiment': {
'PosScore': pos_score,
'NegScore': neg_score,
'ObjScore': obj_score,
'occurrences': occurrences
}
}
}
greek_terms.update({'_id': gr_doc['_id']}, update)
print("Index: {} -- Term: {}".format(index, gr_doc['_id'].encode('utf-8')))
index += 1
english_documents.close()
greek_documents = greek_terms.find({'sentiment': {'$exists': 1}}, {'sentiment': 1}, no_cursor_timeout=True)
for doc in greek_documents:
gr_sentiment = doc['sentiment']
pos_score = gr_sentiment['PosScore'] / gr_sentiment['occurrences']
neg_score = gr_sentiment['NegScore'] / gr_sentiment['occurrences']
obj_score = gr_sentiment['ObjScore'] / gr_sentiment['occurrences']
sentiment = find_sentiment(pos_score, neg_score, obj_score)
greek_terms.update({'_id': doc['_id']}, {'$set': {'sentiment': sentiment}})
greek_documents.close()
def init_greek_sentiment_terms(greek_terms, greek_sentiment_terms):
gr_terms_documents = greek_terms.find({'$and': [{'sentiment': {'$exists': 1}}, {'words_count': 1}]}, {'clean': 1, 'sentiment': 1, 'spacy': 1}, no_cursor_timeout=True)
for doc in gr_terms_documents:
clean = doc['clean']
spacy = doc['spacy']
sentiment = doc['sentiment']
pos_score = sentiment['PosScore']
neg_score = sentiment['NegScore']
obj_score = sentiment['ObjScore']
gr_doc = greek_sentiment_terms.find_one({'_id': clean})
# Clean term exists
if gr_doc:
# Clean term has sentiment
if 'sentiment' in gr_doc.keys():
gr_sentiment = gr_doc['sentiment']
pos_score += gr_sentiment['PosScore']
neg_score += gr_sentiment['NegScore']
obj_score += gr_sentiment['ObjScore']
update = {
'$push': {
'pos': POS_DICT[spacy['pos']]
},
'$set': {
'sources_count': gr_doc['sources_count'] + 1,
'sentiment': {
'PosScore': pos_score,
'NegScore': neg_score,
'ObjScore': obj_score
}
}
}
greek_sentiment_terms.update({'_id': clean}, update)
# Clean term does not exist
else:
insert = {
'_id': clean,
'sources_count': 1,
'words_count': len(re.split('\s+', clean)),
'pos': [POS_DICT[spacy['pos']]],
'sentiment': {
'PosScore': pos_score,
'NegScore': neg_score,
'ObjScore': obj_score
}
}
greek_sentiment_terms.insert_one(insert)
gr_terms_documents.close()
greek_documents = greek_sentiment_terms.find({}, {'sentiment': 1, 'pos': 1, 'sources_count': 1}, no_cursor_timeout=True)
for gr_doc in greek_documents:
gr_sentiment = gr_doc['sentiment']
pos_score = gr_sentiment['PosScore'] / gr_doc['sources_count']
neg_score = gr_sentiment['NegScore'] / gr_doc['sources_count']
obj_score = gr_sentiment['ObjScore'] / gr_doc['sources_count']
sentiment = find_sentiment(pos_score, neg_score, obj_score)
update = {
'$set': {
'sentiment': sentiment,
'pos': max(gr_doc['pos'], key=gr_doc['pos'].count)
}
}
greek_sentiment_terms.update({'_id': gr_doc['_id']}, update)
greek_documents.close()
def main():
greek_terms = settings.MONGO_CLIENT.lexicondb.greek_terms
greek_sentiment_terms = settings.MONGO_CLIENT.lexicondb.greek_sentiment_terms
english_sentiment_terms = settings.MONGO_CLIENT.lexicondb.english_sentiment_terms
init_greek_terms(greek_terms)
populate_lemmas(greek_terms)
map_sentiment(greek_terms, english_sentiment_terms)
init_greek_sentiment_terms(greek_terms, greek_sentiment_terms)
if __name__ == '__main__':
main()
|
# Generated by Django 3.2 on 2021-04-24 12:19
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('Tour_app', '0007_auto_20210424_1745'),
]
operations = [
migrations.RenameField(
model_name='tour_package',
old_name='dests',
new_name='places',
),
migrations.AlterField(
model_name='tour_package',
name='departure',
field=models.DateField(default=datetime.datetime(2021, 4, 24, 12, 19, 57, 919313, tzinfo=utc)),
),
]
|
def negativePower(a,b):
a=float(a)
if(a>0 and b==-1):
return (1/a) #base case
return (1/a)*negativePower(a,b+1)
result1=negativePower(2,-1)
result2=negativePower(5,-3)
result3=negativePower(10,-2)
print ("The results are:",result1,result2,result3)
|
x = a
x.find
def osamäärä(a,b):
"""
:param a: jaettava
:param b: jakaja
:return c: osamäärän arvo
"""
c = a/b
return c
osamäärä
|
import numpy as np
from ..constants import COLOR_CHANNEL_INDICES
# Default trimmedness: discard anything more than 4 standard deviations from a central value
DEFAULT_TRIM_STDEV = 4
def _trim_data_to_stdev(sample, trim_stdev):
""" Trim the farther reaches of a data set based on a central value and standard deviation
Arguments:
sample: 1-dimensional numpy array to be trimmed
trim_stdev: number of standard deviations away from the median to keep
e.g. if 0, only values matching the median will be kept.
If 2, anything within 2 standard deviations of the median will be kept
Return:
trimmed version of the sample with anything outside of `trim_stdev` standard deviations of the mean removed
"""
median = np.median(sample)
stdev = np.std(sample)
allowed_half_width = stdev * trim_stdev
min_ = median - allowed_half_width
max_ = median + allowed_half_width
trimmed_sample = sample[(sample >= min_) & (sample <= max_)]
return trimmed_sample
def median_seeded_outlier_removed_mean(sample, trim_stdev=DEFAULT_TRIM_STDEV):
""" Calculate the Median-Seeded, Outlier-Removed Mean (~MSORM~) of a flat data sample
Arguments:
sample: n-dimensional numpy array to find the central value of.
NOTE: if you are trying to get msorms of an image or a stack of images, you probably want
to use one of the other functions in this file.
trim_stdev: number of standard deviations away from the median to keep
Return:
the mean of the sample after outliers have been removed.
Outliers are removed based on their distance from the median
"""
trimmed_sample = _trim_data_to_stdev(sample, trim_stdev)
return np.mean(trimmed_sample)
msorm = median_seeded_outlier_removed_mean
def _validate_rgb_image_shape(rgb_image, image_name):
shape = rgb_image.shape
if len(shape) != 3:
raise ValueError(
f"{image_name} is expected to have 3 dimensions but had shape {rgb_image.shape}"
)
num_color_channels = rgb_image.shape[2]
expected_num_color_channels = len(COLOR_CHANNEL_INDICES)
if num_color_channels != expected_num_color_channels:
raise ValueError(
f"{image_name} is expected to have {expected_num_color_channels} "
f"channels but had {num_color_channels}. (shape={rgb_image.shape})"
)
def image_msorm(image, trim_stdev=DEFAULT_TRIM_STDEV):
""" Calculate the Median-Seeded, Outlier-Removed Mean (~MSORM~ for short) of an RGB image
Arguments:
image: RGB image numpy array to find the central value of
trim_stdev: number of standard deviations away from the median to keep
Return:
1D numpy array: for each channel, the mean of the sample after outliers have been removed.
Outliers are removed based on their distance from the median
"""
_validate_rgb_image_shape(image, "Image passed to image_msorm()")
flattened_channels = [
image[:, :, channel].flatten() for channel in COLOR_CHANNEL_INDICES
]
return np.array([msorm(channel, trim_stdev) for channel in flattened_channels])
def image_stack_msorm(image_stack, trim_stdev=DEFAULT_TRIM_STDEV):
""" Calculate the Median-Seeded, Outlier-Removed Mean (~MSORM~ for short) of a "stack" of RGB images
Arguments:
image_stack: RGB image "stack" numpy array to find the central value of.
This is a 4-dimensional numpy array where the first dimension iterates over images.
trim_stdev: number of standard deviations away from the median to keep
Return:
1D numpy array: for each channel,
the mean of the sample (across all images) after outliers have been removed.
Outliers are removed based on their distance from the median
"""
_validate_rgb_image_shape(
image_stack[0], "First image in stack passed to image_stack_msorm()"
)
flattened_channels = [
image_stack[:, :, :, channel].flatten() for channel in COLOR_CHANNEL_INDICES
]
return np.array([msorm(channel, trim_stdev) for channel in flattened_channels])
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 17 18:36:34 2019
@author: MAGESHWARAN
"""
import os
import json
import cv2
import numpy as np
from tqdm import tqdm
base_dir = os.getcwd()
data_folder = os.path.join(base_dir, "Dataset")
images_folder = os.path.join(data_folder, "Images")
crops_folder = os.path.join(data_folder, "Crops")
sample_testset = os.path.join(data_folder, "sample_testset")
model_sample_result = os.path.join(sample_testset, "sample_result.json")
sample_images = os.path.join(sample_testset, "images")
sample_crops = os.path.join(sample_testset, "crops")
def ModifiedFLANN(img1, img2):
mini_match_count = 10
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=10)
sift = cv2.xfeatures2d.SIFT_create()
kp1, des1 = sift.detectAndCompute(img1, None)
kp2, des2 = sift.detectAndCompute(img2, None)
orgBorder = None
flannMatch = True
if (des1 is None) or (des2 is None):
flannMatch = False
return flannMatch, orgBorder
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
good_matches = []
for match1, match2 in matches:
if match1.distance < (0.7 * match2.distance):
good_matches.append((match1))
if len(good_matches) > mini_match_count:
cropImg = []
orgImg = []
for m in good_matches:
cropImg.append(kp1[m.queryIdx].pt)
orgImg.append(kp2[m.trainIdx].pt)
cropImg, orgImg = np.float32((cropImg, orgImg))
H, _ = cv2.findHomography(cropImg, orgImg, cv2.RANSAC, 3.0)
if H is None:
return flannMatch, orgBorder
h, w, _ = img1.shape
cropBorder = np.float32([[[0, 0], [0, h-1], [w-1, h-1], [w-1, 0]]])
orgBorder = cv2.perspectiveTransform(cropBorder, H)
return flannMatch, orgBorder
def findMinMax(border):
x, y = np.absolute(np.transpose(border)[0]), np.absolute(np.transpose(border)[1])
x1, x2 = int(x.min()), int(x.max())
y1, y2 = int(y.min()), int(y.max())
return [x1, y1, x2, y2]
completeTracker = {}
noAssociationCropImages = os.listdir(sample_crops)
noAssociationImages = os.listdir(sample_images)
for imagefile in tqdm(os.listdir(sample_images)):
img = cv2.imread(os.path.join(sample_images, imagefile))
imageTracker = []
for cropfile in os.listdir(sample_crops):
crop_img = cv2.imread(os.path.join(sample_crops,
cropfile))
flannMatch, crop_border = ModifiedFLANN(crop_img, img)
if flannMatch:
if crop_border is not None:
pts = findMinMax(crop_border[0])
imageTracker.append((cropfile.replace(".jpg", ""), pts))
if cropfile in noAssociationCropImages:
noAssociationCropImages.remove(cropfile)
completeTracker[imagefile.replace(".jpg", "")] = imageTracker
NA_Crops = []
for crop in noAssociationCropImages:
NA_Crops.append([crop.replace(".jpg", ""), []])
completeTracker["NA"] = NA_Crops
with open(model_sample_result, "w") as f:
json.dump(completeTracker, f, sort_keys=True, indent = 4)
print("Output Json File is generated")
|
import re
def show_me(name):
return bool(re.match(r'(-[A-Z][a-z]+)+$', '-' + name))
|
list = []
n = int(input("Enter number of elements : "))
for i in range(0, n):
ele = int(input())
list.append(ele)
print(list)
for ele in list:
if(ele>0):
print(ele, end = " ")
|
# -*- coding: utf-8 -*-
import base64
from django.core.urlresolvers import resolve
from django.contrib.auth.models import AnonymousUser
from rest_framework import exceptions
from rest_framework import authentication
from rest_framework.permissions import BasePermission
from django.contrib.auth.models import User
ALLOWED_PATHS = [
'customer-list',
]
ALLOWED_PATHS_ADMIN = [
'fleet-list',
'fleet-detail'
]
class Authenticate(authentication.BasicAuthentication):
"""
Custom auth method to authenticate the user trought the token
"""
def _get_path(self, request):
return resolve(request.path).url_name
def _allowed_path(self, request):
url_name = self._get_path(request)
return True if url_name in ALLOWED_PATHS else False
def _allowed_path_admin(self, request):
url_name = self._get_path(request)
return True if url_name in ALLOWED_PATHS_ADMIN else False
def bad_credentials(self):
raise exceptions.AuthenticationFailed('Bad credentials')
def get_authorization_header(self, request):
auth = request.META.get('HTTP_AUTHORIZATION', b'')
if type(auth) == type(''):
auth = auth.encode('iso-8859-1')
return auth
def authenticate_credentials(self, username=None, password=None,
anonymous=False, request=None):
if anonymous:
return (AnonymousUser(), None)
try:
user = User.objects.get(username=username)
if not user.check_password(password):
self.bad_credentials()
if self._allowed_path_admin(request) and not user.is_superuser:
self.bad_credentials()
except User.DoesNotExist:
self.bad_credentials()
return (user, None)
def authenticate(self, request, simple=False):
auth = self.get_authorization_header(request).split()
if not auth and self._allowed_path(request) or self._allowed_path(request):
return self.authenticate_credentials(anonymous=True)
try:
auth_parts = base64.b64decode(auth[1]).decode('iso-8859-1').partition(':')
except (IndexError, TypeError):
self.bad_credentials()
username, password = auth_parts[0], auth_parts[2]
return self.authenticate_credentials(username, password, request=request)
|
import unittest
from katas.kyu_7.sir_show_me_your_id import show_me
class ShowMeTestCase(unittest.TestCase):
def test_true_1(self):
self.assertTrue(show_me('Francis'))
def test_true_2(self):
self.assertTrue(show_me('Jean-Eluard'))
def test_true_3(self):
self.assertTrue(show_me('Bernard-Henry-Levy'))
def test_false_1(self):
self.assertFalse(show_me('Le Mec'))
def test_false_2(self):
self.assertFalse(show_me('Meme Gertrude'))
def test_false_3(self):
self.assertFalse(show_me('A-a-a-a----a-a'))
def test_false_4(self):
self.assertFalse(show_me('Z-------------'))
def test_false_5(self):
self.assertFalse(show_me('Jean-luc'))
def test_false_6(self):
self.assertFalse(show_me('Jean--Luc'))
def test_false_7(self):
self.assertFalse(show_me('JeanLucPicard'))
def test_false_8(self):
self.assertFalse(show_me('-Jean-Luc'))
def test_false_9(self):
self.assertFalse(show_me('Jean-Luc-Picard-'))
|
# KVM-based Discoverable Cloudlet (KD-Cloudlet)
# Copyright (c) 2015 Carnegie Mellon University.
# All Rights Reserved.
#
# THIS SOFTWARE IS PROVIDED "AS IS," WITH NO WARRANTIES WHATSOEVER. CARNEGIE MELLON UNIVERSITY EXPRESSLY DISCLAIMS TO THE FULLEST EXTENT PERMITTEDBY LAW ALL EXPRESS, IMPLIED, AND STATUTORY WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT OF PROPRIETARY RIGHTS.
#
# Released under a modified BSD license, please see license.txt for full terms.
# DM-0002138
#
# KD-Cloudlet includes and/or makes use of the following Third-Party Software subject to their own licenses:
# MiniMongo
# Copyright (c) 2010-2014, Steve Lacy
# All rights reserved. Released under BSD license.
# https://github.com/MiniMongo/minimongo/blob/master/LICENSE
#
# Bootstrap
# Copyright (c) 2011-2015 Twitter, Inc.
# Released under the MIT License
# https://github.com/twbs/bootstrap/blob/master/LICENSE
#
# jQuery JavaScript Library v1.11.0
# http://jquery.com/
# Includes Sizzle.js
# http://sizzlejs.com/
# Copyright 2005, 2014 jQuery Foundation, Inc. and other contributors
# Released under the MIT license
# http://jquery.org/license
__author__ = 'Sebastian'
"""
A simple Python script to handle Secure Key Agreement communication through USB using ADB.
"""
import time
import json
import os.path
import socket
from adb.usb_exceptions import AdbCommandFailureException
from adb.adb_commands import AdbCommands, M2CryptoSigner
from ska_device_interface import ISKADevice
from pycloud.pycloud.security import rsa
from pycloud.pycloud.utils import fileutils
from pycloud.pycloud.ska import ska_constants
LOCAL_TEMP_FOLDER = 'adb/keys'
REMOTE_FOLDER = '/sdcard/cloudlet/adb/'
IN_DATA_SERVICE = 'edu.cmu.sei.cloudlet.client/.ska.adb.InDataService'
IN_FILE_SERVICE = 'edu.cmu.sei.cloudlet.client/.ska.adb.StoreFileService'
OUT_DATA_SERVICE = 'edu.cmu.sei.cloudlet.client/.ska.adb.OutDataService'
OUT_DATA_REMOTE_FILEPATH = REMOTE_FOLDER + 'out_data.json'
CLEANUP_SERVICE = 'edu.cmu.sei.cloudlet.client/.ska.adb.CleanupService'
# Global to store root folder.
root_data_folder = './data/'
######################################################################################################################
# Returns the full path for the adbkey file.
######################################################################################################################
def get_adb_key_path():
return os.path.join(root_data_folder, LOCAL_TEMP_FOLDER, 'adbkey')
######################################################################################################################
# Attempts to connect to an ADB daemon on a given USB device. Returns a proxy to that daemon if successful, or None if
# unsuccessful.
######################################################################################################################
def connect_to_adb_daemon(device):
# Load host keypair to authenticate with ADB daemon.
keySigner = M2CryptoSigner(get_adb_key_path())
try:
print 'Connecting to USB device'
device.Open()
print 'Connecting to ADB daemon'
adbDaemon = AdbCommands.Connect(device, banner="cloudlet", rsa_keys=[keySigner], auth_timeout_ms=15000)
return adbDaemon
except Exception, e:
print 'Error connecting to ADB daemon: ' + str(e)
return None
######################################################################################################################
#
######################################################################################################################
def disconnect_from_adb_daemon(adbDaemon, device):
if adbDaemon:
adbDaemon.Close()
if device:
device.Close()
######################################################################################################################
# Starts the given service on the given daemon.
######################################################################################################################
def start_service(adbDaemon, service_name, extras={}):
command = 'am startservice -n ' + service_name
for extra_key in extras:
command += ' -e ' + extra_key + ' ' + extras[extra_key]
print command
adbDaemon.Shell(command, timeout_ms=20000)
######################################################################################################################
# Implementation of a SKA device through ADB.
######################################################################################################################
class ADBSKADevice(ISKADevice):
serial_number = None
adb_daemon = None
usb_device = None
####################################################################################################################
# Sets up basic ADB stuff.
####################################################################################################################
@staticmethod
def initialize(root_folder):
global root_data_folder
root_data_folder = root_folder
####################################################################################################################
# Creates a device using the provided device info.
####################################################################################################################
def __init__(self, serial_number):
self.serial_number = serial_number
####################################################################################################################
# Returns an name for the device.
####################################################################################################################
def get_name(self):
return self.serial_number
####################################################################################################################
# Not used in ADB.
####################################################################################################################
def get_port(self):
return 0
####################################################################################################################
# Not different in ADB.
####################################################################################################################
def get_friendly_name(self):
return self.serial_number
####################################################################################################################
# Sets up basic ADB stuff.
####################################################################################################################
@staticmethod
def bootstrap():
# Set the data folder.
adb_data_folder = os.path.join(os.path.abspath(root_data_folder), LOCAL_TEMP_FOLDER)
fileutils.recreate_folder(adb_data_folder)
# Generate the adb keys. NOTE: this is a key pair required to connect to the ADB daemon on the Android device.
adb_private_key_path = get_adb_key_path()
adb_public_key_path = adb_private_key_path + '.pub'
adb_public_rsa_key_path = adb_private_key_path + '_rsa.pub'
# We create a stanard RSA key pair, but the format used by ADB is different, so we need to convert the public key.
rsa.create_key_pair(adb_private_key_path, adb_public_rsa_key_path)
rsa.convert_pub_rsa_to_adb(adb_public_rsa_key_path, adb_public_key_path)
####################################################################################################################
# Returns a list of ADBSKADevices. (Actually, it contains all USB devices connected to the computer).
####################################################################################################################
@staticmethod
def list_devices():
usb_devices = []
for device in AdbCommands.Devices():
usb_devices.append(ADBSKADevice(device.serial_number))
return usb_devices
####################################################################################################################
# Connects to the adb daemon on the given device (by serial number).
####################################################################################################################
def connect(self):
if self.serial_number is None:
print 'No serial number configured for device.'
return False
for device in AdbCommands.Devices():
if device.serial_number == self.serial_number:
self.adb_daemon = connect_to_adb_daemon(device)
if self.adb_daemon is not None:
self.usb_device = device
return True
else:
print 'Unable to connect to ADB daemon on device ' + self.serial_number + '.'
return False
print 'Given device ' + self.serial_number + ' not found.'
return False
####################################################################################################################
# Disconnects from the ADB daemon, and the USB device.
####################################################################################################################
def disconnect(self):
if self.adb_daemon:
disconnect_from_adb_daemon(self.adb_daemon, self.usb_device)
####################################################################################################################
# Gets a file from the device, retrying until the timeout is reached.
####################################################################################################################
def get_data_from_device(self, timeout=5):
# Wait some time to ensure the device creates the output file with the result.
start_time = time.time()
while time.time() - start_time < timeout:
print 'Attempting to download file with data.'
try:
# Wait a bit to ensure the data out file has been written.
time.sleep(1)
# Get and pars the result.
pull_timeout = 2000
data = self.adb_daemon.Pull(OUT_DATA_REMOTE_FILEPATH, timeout_ms=pull_timeout)
print 'Successfully downloaded output file.'
json_data = json.loads(data)
# Check error and log it.
if json_data[ska_constants.RESULT_KEY] != ska_constants.SUCCESS:
error_messge = 'Error processing command on device: ' + json_data[ska_constants.ERROR_MSG_KEY]
raise Exception(error_messge)
# Clean up remote output file, and give it some time to ensure it is cleaned.
start_service(self.adb_daemon, CLEANUP_SERVICE, {})
time.sleep(1)
return json_data
except AdbCommandFailureException, e:
print 'Could not get data file, file may not be ready. Will wait and retry.'
print 'Problem details: ' + str(e)
print 'Could not get data file, file may not exist on device.'
return []
####################################################################################################################
# Gets simple data through pulling a file with adb.
# DATA needs to be a dictionary of key-value pairs (the value is not used, only the key).
####################################################################################################################
def get_data(self, data):
print 'Starting data retrieval service.'
start_service(self.adb_daemon, OUT_DATA_SERVICE, data)
print 'Getting result.'
result = self.get_data_from_device()
return result
####################################################################################################################
# Sends simple data through pushing a file with adb.
# DATA needs to be a dictionary of key-value pairs.
####################################################################################################################
def send_data(self, data):
data['cloudlet_name'] = socket.gethostname()
# Everything is called "in" since, from the point of view of the Service, it is getting data.
print 'Starting data receiving service.'
start_service(self.adb_daemon, IN_DATA_SERVICE, data)
print 'Data sent.'
print 'Checking result.'
result = self.get_data_from_device()
####################################################################################################################
#
####################################################################################################################
def send_file(self, file_path, file_id):
print 'Pushing file.'
self.adb_daemon.Push(file_path, REMOTE_FOLDER + file_id)
data = {}
data['cloudlet_name'] = socket.gethostname()
data['file_id'] = file_id
print 'Starting file receiver service.'
start_service(self.adb_daemon, IN_FILE_SERVICE, data)
print 'File sent.'
print 'Checking result.'
result = self.get_data_from_device()
####################################################################################################################
# Test script.
####################################################################################################################
def test():
devices = ADBSKADevice.list_devices()
if len(devices) > 0:
device = devices[0]
print device
print device.serial_number
adbDevice = ADBSKADevice(device.serial_number)
try:
adbDevice.connect()
print 'Getting id'
data = adbDevice.get_data({'data': 'none'})
print data
print 'Sending files'
print 'Files sent'
finally:
adbDevice.disconnect()
# Execute the test.
#test()
|
import unittest
from katas.kyu_5.palindrome_chain_length import palindrome_chain_length
class PalindromeChainLengthTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(palindrome_chain_length(87), 4)
def test_equals_2(self):
self.assertEqual(palindrome_chain_length(1), 0)
def test_equals_3(self):
self.assertEqual(palindrome_chain_length(88), 0)
def test_equals_4(self):
self.assertEqual(palindrome_chain_length(89), 24)
def test_equals_5(self):
self.assertEqual(palindrome_chain_length(10), 1)
|
#!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python
#################################################################################
# #
# update_html_page.py: update disk space html page #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# last update: Mar 04, 2021 #
# #
#################################################################################
import os
import sys
import re
import string
import time
import random
#
#--- reading directory list
#
path = '/data/mta/Script/Disk_check/house_keeping/dir_list_py'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
#
#--- append a path to a private folder to python directory
#
sys.path.append(bin_dir)
sys.path.append(mta_dir)
#
#--- converTimeFormat contains MTA time conversion routines
#
import mta_common_functions as mcf
#
#--- set a temporary file name
#
rtail = int(time.time() * random.random())
zspace = '/tmp/zspace' + str(rtail)
#-----------------------------------------------------------------------------------------------
#-- update_html_page: update html page --
#-----------------------------------------------------------------------------------------------
def update_html_page():
"""
update html page
input none, but read <house_keeping>/disk_space_backup_py.html as a template
output: <web_dir>/disk_space.html
"""
#
#--- today's date
#
update = 'Last Update: ' + mcf.today_date_display()
#
#--- read the current disk capacities
#
cap1 = get_disk_capacity('/data/mta/')
cap2 = get_disk_capacity('/data/mta_www/')
cap3 = get_disk_capacity('/data/mta4/')
cap4 = get_disk_capacity('/data/mays/')
cap5 = get_disk_capacity('/data/swolk/')
cap6 = get_disk_capacity('/data/mta1/')
cap7 = get_disk_capacity('/data/mta2/')
cap8 = get_disk_capacity('/proj/rac/ops/')
#
#--- read template
#
line = house_keeping + 'disk_space_backup_py.html'
with open(line, 'r') as f:
data = f.read()
#
#--- update the blank lines
#
data = data.replace("#UPDATE#", update)
data = data.replace('#CAP1#', cap1)
data = data.replace('#CAP2#', cap2)
data = data.replace('#CAP3#', cap3)
data = data.replace('#CAP4#', cap4)
data = data.replace('#CAP5#', cap5)
data = data.replace('#CAP6#', cap6)
data = data.replace('#CAP7#', cap7)
data = data.replace('#CAP8#', cap8)
#
#--- print out the data
#
out = web_dir + 'disk_space.html'
with open(out, 'w') as fo:
fo.write(data)
#-----------------------------------------------------------------------------------------------
#-- get_disk_capacity: read a disk capacity --
#-----------------------------------------------------------------------------------------------
def get_disk_capacity(dname):
"""
read a disk capacity
input: dname --- disk name
output: capacity --- capacity
"""
cmd = 'df -k ' + dname + '>' + zspace
os.system(cmd)
data = mcf.read_data_file(zspace, remove=1)
atemp = re.split('\s+', data[1])
capacity = atemp[1]
return capacity
#--------------------------------------------------------------------
if __name__ == '__main__':
update_html_page()
|
import numpy as np
class Metric_Accuracy:
def calculate(self, output, y):
predictions = np.argmax(output, axis=1)
accuracy = np.mean(predictions == y)
return accuracy
|
from BasicFunctions import *
player1 = Player("Player 1", 1)
player2 = Player("Player 2", 2)
player3 = Player("Player 3", 3)
player4 = Player("Player 4", 4)
talon = Player("Talon",5)
PLAYERS = [player1,player2,player3,player4]
GAMENUM = 0
ROUNDNUM = 0
MONDASOK = ["Négy király", "Dupla játék", "Tuletroá", "Centrum", "Kismadár", "Nagymadár", "Pagátkismadár", "Saskismadár",
"Királykismadár","Pagát ulti", "Sas ulti","Király ulti","Pagát uhu", "Sas uhu","Király uhu",
"Párosfácan", "Volát", "Huszonegyes fogás", "Színcsalád", "Három király", "Káró", "Kőr", "Treff", "Pikk"]
def findTarokk(player):
index = 0
findThat = False
for i in range(len(player.hand)):
if player.hand[i].suit == "Tarokk":
index = i
findThat = True
break
if findThat:
return index
else:
return 0
def findTheCard(temp, player):
index = 0
findThat = False
for i in range(len(player.hand)):
if temp[0].suit == player.hand[i].suit:
index = i
findThat = True
break
if findThat == False:
if temp[0].suit == "Tarokk":
index = 0
else:
index = findTarokk(player)
if findTarokk(player) == 0:
index = 0
return index
def findIndex(list,card):
index = 0
for i in range(len(list)):
if card.suit == list[i].suit and card.value == list[i].value:
index = i
break
return index
def tempShow(temp):
for c in temp:
c.show()
def whosNextPlayer(temp):
nextplayer = 0
if temp[0].suit == temp[1].suit and temp[1].suit == temp[2].suit and temp[2].suit == temp[3].suit:
maxvalue = 0
for i in range(0,4):
if temp[i].value > maxvalue:
maxvalue = temp[i].value
nextplayer = temp[i].whichPlayer
else:
maxvalue2 = 0
for i in range(0,4):
if temp[i].suit == "Tarokk" and temp[i].value > maxvalue2:
maxvalue2 = temp[i].value
nextplayer = temp[i].whichPlayer
return nextplayer
############ INICIALIZALAS ###########
def gameInitialize():
deck = Deck()
deck.shuffle()
print("A pakli megkeverve")
for i in range(0,6):
talon.draw(deck)
for i in range(0,5):
player1.draw(deck)
player2.draw(deck)
player3.draw(deck)
player4.draw(deck)
for i in range(0,4):
player1.draw(deck)
player2.draw(deck)
player3.draw(deck)
player4.draw(deck)
print("A lapook kiosztva")
for p in PLAYERS:
p.sortingCards(p.hand)
def gameCounter(roundcounter, gamenumber):
if roundcounter == 9:
gamenumber+=1
return gamenumber
########### LICITALAS ###########
def orderOfBidding(startingplayer):
licitOrder = []
if startingplayer == PLAYERS[0]:
licitOrder.append(PLAYERS[0])
licitOrder.append(PLAYERS[1])
licitOrder.append(PLAYERS[2])
licitOrder.append(PLAYERS[3])
elif startingplayer == PLAYERS[1]:
licitOrder.append(PLAYERS[1])
licitOrder.append(PLAYERS[2])
licitOrder.append(PLAYERS[3])
licitOrder.append(PLAYERS[0])
elif startingplayer == PLAYERS[2]:
licitOrder.append(PLAYERS[2])
licitOrder.append(PLAYERS[3])
licitOrder.append(PLAYERS[0])
licitOrder.append(PLAYERS[1])
else:
licitOrder.append(PLAYERS[3])
licitOrder.append(PLAYERS[0])
licitOrder.append(PLAYERS[1])
licitOrder.append(PLAYERS[2])
return licitOrder
def gainingCard(order,actuallicit):
if actuallicit == 3:
if order[0].winBidding:
order[0].gainingCard = 3
order[1].gainingCard = 1
order[2].gainingCard = 1
order[3].gainingCard = 1
elif order[1].winBidding:
order[1].gainingCard = 3
order[2].gainingCard = 1
order[3].gainingCard = 1
order[0].gainingCard = 1
elif order[2].winBidding:
order[2].gainingCard = 3
order[3].gainingCard = 1
order[0].gainingCard = 1
order[1].gainingCard = 1
else:
order[3].gainingCard = 3
order[0].gainingCard = 1
order[1].gainingCard = 1
order[2].gainingCard = 1
elif actuallicit == 2:
if order[0].winBidding:
order[0].gainingCard = 2
order[1].gainingCard = 2
order[2].gainingCard = 1
order[3].gainingCard = 1
elif order[1].winBidding:
order[1].gainingCard = 2
order[2].gainingCard = 2
order[3].gainingCard = 1
order[0].gainingCard = 1
elif order[2].winBidding:
order[2].gainingCard = 2
order[3].gainingCard = 2
order[0].gainingCard = 1
order[1].gainingCard = 1
else:
order[3].gainingCard = 2
order[0].gainingCard = 2
order[1].gainingCard = 1
order[2].gainingCard = 1
elif actuallicit == 1:
if order[0].winBidding:
order[0].gainingCard = 1
order[1].gainingCard = 2
order[2].gainingCard = 2
order[3].gainingCard = 1
elif order[1].winBidding:
order[1].gainingCard = 1
order[2].gainingCard = 2
order[3].gainingCard = 2
order[0].gainingCard = 1
elif order[2].winBidding:
order[2].gainingCard = 1
order[3].gainingCard = 2
order[0].gainingCard = 2
order[1].gainingCard = 1
else:
order[3].gainingCard = 1
order[0].gainingCard = 2
order[1].gainingCard = 2
order[2].gainingCard = 1
else:
if order[0].winBidding:
order[0].gainingCard = 0
order[1].gainingCard = 2
order[2].gainingCard = 2
order[3].gainingCard = 2
elif order[1].winBidding:
order[1].gainingCard = 0
order[2].gainingCard = 2
order[3].gainingCard = 2
order[0].gainingCard = 2
elif order[2].winBidding:
order[2].gainingCard = 0
order[3].gainingCard = 2
order[0].gainingCard = 2
order[1].gainingCard = 2
else:
order[3].gainingCard = 0
order[0].gainingCard = 2
order[1].gainingCard = 2
order[2].gainingCard = 2
def searchPartner(partnertarock,player):
for card in player1.hand:
if card.suit == "Tarokk" and card.value == partnertarock:
player.partnerNumber = 1
player1.partnerNumber = player.position
break
for card in player2.hand:
if card.suit == "Tarokk" and card.value == partnertarock:
player.partnerNumber = 2
player2.partnerNumber = player.position
break
for card in player3.hand:
if card.suit == "Tarokk" and card.value == partnertarock:
player.partnerNumber = 3
player3.partnerNumber = player.position
break
for card in player4.hand:
if card.suit == "Tarokk" and card.value == partnertarock:
player.partnerNumber = 4
player4.partnerNumber = player.position
break
return player.partnerNumber
def isMondasIn(mondas,mondasok):
isin = False
for m in mondasok:
if mondas == m:
isin = True
break
return isin
def getMondasok(mondasok,partnertarock):
mondasokback = []
pnumber = searchPartner(partnertarock,player1)
partnerplayer = 0
if pnumber == 1:
partnerplayer = player1
if pnumber == 2:
partnerplayer = player2
if pnumber == 3:
partnerplayer = player3
if pnumber == 4:
partnerplayer = player4
if partnertarock == 20:
if isMondasIn("Négy király", mondasok):
if hasHuszegy(partnerplayer) or hasSkiz(partnerplayer):
mondasokback.append("Tuletroá")
if hasTnyolc(partnerplayer) and tarokkCounter(partnerplayer) >=7:
mondasokback.append("Dupla játék")
if isMondasIn("Dupla játék", mondasok):
if hasHuszegy(partnerplayer) or hasSkiz(partnerplayer):
mondasokback.append("Tuletroá")
if hasTkilenc(partnerplayer):
mondasokback.append("Négy király")
if isMondasIn("Tuletroá",mondasok):
if hasTkilenc(partnerplayer):
mondasokback.append("Négy király")
if hasTnyolc(partnerplayer) and tarokkCounter(partnerplayer) >=7:
mondasokback.append("Dupla játék")
def wantToBiddingMore(player,actuallicit,order):
want = False
if hasTwoBig(player):
want = True
if hasPagat(player) and not hasHusz(player) and not hasTkilenc(player):
want = True
if gainingCard(order,actuallicit) == 1:
want = True
if hasSkiz(player) and tarokkCounter(player) >=5:
want = True
if hasHuszegy(player) and tarokkCounter(player) >=7:
want = True
if hasHuszegy(player) and tarokkCounter(player)<=4 and gainingCard(order,actuallicit) == 2:
want = False
return want
def cardBidding():
order = []
gamenum = gameCounter(ROUNDNUM,GAMENUM)
licit = 0
if gamenum % 4 == 0:
order = orderOfBidding(player1)
if gamenum % 4 == 1:
order = orderOfBidding(player2)
if gamenum % 4 == 2:
order = orderOfBidding(player3)
if gamenum % 4 == 3:
order = orderOfBidding(player4)
if order[0].canLicit(order[0].hand):
licit = 3 #HÁRMAS
print("--HARMAS--")
order[0].winBidding = True
if order[1].canLicit(order[1].hand) and wantToBiddingMore(order[1],licit,order):
licit = 3 #TARTOM
print("--TARTOM--")
order[0].winBidding = False
order[1].winBidding = True
if order[2].canLicit(order[2].hand) and wantToBiddingMore(order[2],licit,order):
licit = 2 #KETTES PASSZ
print("--KETTES--")
order[1].winBidding = False
order[2].winBidding = True
elif order[3].canLicit(order[3].hand) and wantToBiddingMore(order[3],licit,order):
licit = 2
print("--KETTES--")
order[1].winBidding = False #KETTES PASSZ
order[3].winBidding = True
else:
if wantToBiddingMore(order[0],licit, order):
licit = 2 #KETTES
print("--KETTES--")
order[1].winBidding = False
order[0].winBidding = True
if wantToBiddingMore(order[1],licit,order):
licit = 2 #TARTOM
print("--TARTOM--")
order[1].winBidding = True
order[0].winBidding = False
if wantToBiddingMore(order[0],licit,order):
licit = 1 #EGYES
print("--EGYES--")
order[0].winBidding = True
order[1].winBidding = False
if wantToBiddingMore(order[1],licit,order):
licit = 1 #TARTOM
print("--TARTOM--")
order[1].winBidding = True
order[0].winBidding = False
if wantToBiddingMore(order[0],licit,order):
licit = 0 #SZÓLÓ
print("--SZÓLÓ--")
order[0].winBidding = True
order[1].winBidding = False
if wantToBiddingMore(order[1],licit,order):
licit = 0 #TARTOM
print("--TARTOM--")
order[1].winBidding = True
order[0].winBidding = False
else:
licit = 3
order[0].winBidding = False
order[1].winBidding = True
elif order[1].canLicit(order[1].hand) and wantToBiddingMore(order[1],licit,order):
licit = 3 #HÁRMAS
order[1].winBidding = True
if order[2].canLicit(order[2].hand) and wantToBiddingMore(order[2],licit,order):
licit = 3 #TARTOM
order[1].winBidding = False
order[2].winBidding = True
if order[3].canLicit(order[3].hand) and wantToBiddingMore(order[3],licit,order):
licit = 2 #KETTES PASSZ
order[2].winBidding = False
order[3].winBidding = True
elif order[0].canLicit(order[0].hand) and wantToBiddingMore(order[0],licit,order):
licit = 2
order[2].winBidding = False #KETTES PASSZ
order[0].winBidding = True
else:
if wantToBiddingMore(order[1],licit,order):
licit = 2 #KETTES
order[2].winBidding = False
order[1].winBidding = True
if wantToBiddingMore(order[2],licit,order):
licit = 2 #TARTOM
order[2].winBidding = True
order[1].winBidding = False
if wantToBiddingMore(order[1],licit,order):
licit = 1 #EGYES
order[1].winBidding = True
order[2].winBidding = False
if wantToBiddingMore(order[2],licit,order):
licit = 1 #TARTOM
order[2].winBidding = True
order[1].winBidding = False
if wantToBiddingMore(order[1],licit,order):
licit = 0 #SZÓLÓ
order[1].winBidding = True
order[2].winBidding = False
if wantToBiddingMore(order[2],licit,order):
licit = 0 #TARTOM
order[2].winBidding = True
order[1].winBidding = False
elif order[2].canLicit(order[2].hand) and wantToBiddingMore(order[2],licit,order):
licit = 3 #HÁRMAS
order[2].winBidding = True
if order[3].canLicit(order[3].hand) and wantToBiddingMore(order[3],licit,order):
licit = 3 #TARTOM
order[2].winBidding = False
order[3].winBidding = True
if order[0].canLicit(order[0].hand) and wantToBiddingMore(order[0],licit,order):
licit = 2 #KETTES PASSZ
order[3].winBidding = False
order[0].winBidding = True
elif order[1].canLicit(order[1].hand) and wantToBiddingMore(order[1],licit,order):
licit = 2
order[3].winBidding = False #KETTES PASSZ
order[1].winBidding = True
else:
if wantToBiddingMore(order[2],licit,order):
licit = 2 #KETTES
order[3].winBidding = False
order[2].winBidding = True
if wantToBiddingMore(order[3],licit,order):
licit = 2 #TARTOM
order[3].winBidding = True
order[2].winBidding = False
if wantToBiddingMore(order[2],licit,order):
licit = 1 #EGYES
order[2].winBidding = True
order[3].winBidding = False
if wantToBiddingMore(order[3],licit,order):
licit = 1 #TARTOM
order[3].winBidding = True
order[2].winBidding = False
if wantToBiddingMore(order[2],licit,order):
licit = 0 #SZÓLÓ
order[2].winBidding = True
order[3].winBidding = False
if wantToBiddingMore(order[3],licit,order):
licit = 0 #TARTOM
order[3].winBidding = True
order[2].winBidding = False
elif order[3].canLicit(order[3].hand) and wantToBiddingMore(order[3],licit,order):
licit = 3 #HÁRMAS
order[3].winBidding = True
if order[0].canLicit(order[0].hand) and wantToBiddingMore(order[0],licit,order):
licit = 3 #TARTOM
order[3].winBidding = False
order[0].winBidding = True
if order[1].canLicit(order[1].hand) and wantToBiddingMore(order[1],licit,order):
licit = 2 #KETTES PASSZ
order[0].winBidding = False
order[1].winBidding = True
elif order[2].canLicit(order[2].hand) and wantToBiddingMore(order[2],licit,order):
licit = 2
order[0].winBidding = False #KETTES PASSZ
order[2].winBidding = True
else:
if wantToBiddingMore(order[3],licit,order):
licit = 2 #KETTES
order[0].winBidding = False
order[3].winBidding = True
if wantToBiddingMore(order[0],licit,order):
licit = 2 #TARTOM
order[0].winBidding = True
order[3].winBidding = False
if wantToBiddingMore(order[3],licit,order):
licit = 1 #EGYES
order[3].winBidding = True
order[0].winBidding = False
if wantToBiddingMore(order[0],licit,order):
licit = 1 #TARTOM
order[0].winBidding = True
order[3].winBidding = False
if wantToBiddingMore(order[3],licit,order):
licit = 0 #SZÓLÓ
order[3].winBidding = True
order[0].winBidding = False
if wantToBiddingMore(order[0],licit,order):
licit = 0 #TARTOM
order[0].winBidding = True
order[3].winBidding = False
return licit
########### MAIN ###########
gameInitialize()
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 22 20:04:07 2014
@author: Goren
"""
import csv
import os
import numpy as np
import random
class train_loader:
"""Loads data """
TestcasesRatio=0.1
def __init__(self,csv_name,shuffle=False):
self.readcsv(csv_name,shuffle)
def readcsv (self,csv_name,shuffle=False):
"""Load the data from the csv file"""
csv_file=os.getcwd()+'\\data\\{}.csv'.format(csv_name)
#read csv
infile=open(csv_file,"rb")
reader=csv.reader(infile,delimiter=',')
next(reader, None)#skip the headers
x=list(reader)
if (shuffle):
random.shuffle(x)
self.data=np.array(x).astype('float')
def scale(self,cols,scale_min=0,scale_max=0):
"""
Scales the columns such that:
1 = maximal value of each column
0 = minimal value of each column
"""
for col in cols:
col_min=scale_min
col_max=scale_max
#if scale bounds aren't specified or invalid:
if (scale_min>=scale_max):
col_min=self.data[:,col].min()
col_max=self.data[:,col].max()
#do the scaling
self.data[:,col]=(self.data[:,col]-col_min)/(col_max-col_min)
def training_data(self,data_cols,target_col):
"""
returns a partial list of (1-TestcasesRatio) percent of the data
for training purposes
and splits the input data into data and target
"""
test_size=int(round(self.data.shape[0]*self.TestcasesRatio))
X= self.data[0:-test_size,data_cols]
y=self.data[0:-test_size,target_col]
return (X,y)
def test_data(self,data_cols,target_col):
"""
returns a partial list of TestcasesRatio percent of the data
for the test set
and splits the input data into data and target
"""
test_size=int(round(self.data.shape[0]*self.TestcasesRatio))
X= self.data[-test_size:,data_cols]
y=self.data[-test_size:,target_col]
return (X,y)
class bikeshare_loader(train_loader):
def __init__(self):
self.readcsv('train_processed',True)
def preprocess(self):
"""Preprocessing for the bikeshare problem"""
self.scale([1,4,8])#season(1-4),weather(1-4),humidity,windspeed
self.scale([5,6,7],0,100)#temp,atemp,humidity
self.data[:,0]=(1/24)*(self.data[:,0]%24)#time of day
#for tests
if __name__=='__main__':
loader=bikeshare_loader()
loader.preprocess()
(X,y)=loader.training_data(range(9),9)
print X[:,0]
#print train[:,[6,11]]
|
# Generated by Django 3.1 on 2020-08-04 17:34
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='CONtacts',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('lastname', models.CharField(max_length=50)),
('email', models.EmailField(max_length=254)),
('phone', models.IntegerField()),
],
),
migrations.CreateModel(
name='LastDay',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('month', models.IntegerField()),
('year', models.IntegerField()),
],
),
migrations.CreateModel(
name='Employer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('identifier', models.IntegerField()),
('phone', models.IntegerField()),
('email', models.EmailField(max_length=254)),
('contacts', models.ManyToManyField(to='employer.CONtacts')),
],
),
]
|
#!/usr/bin/env python3
"""Script to run the auacm cli app"""
import sys
from auacm import main
main.main(sys.argv[1:])
|
from Token import Token
class AST(object):
def __init__(self, nome):
self.nome = nome;
self.children = []
self.tipo = None #tipo do nó. Compound, Assign, ArithOp, etc
self.value = None
def __str__(self, level=0):
ret = "\t"*level+ repr(self) +"\n"
for child in self.children:
if (child != None):
ret += child.__str__(level+1) #level+1
return ret
def __repr__(self):
return self.nome
def __evaluate__(self):
for child in self.children:
if (child != None):
return child.__evaluate__()
class Compound(AST):
"""Represents a 'BEGIN ... END' block"""
def __init__(self):
AST.__init__(self,'Block')
print('Criando um nó do tipo Block.')
#self.children = []
def __repr__(self):
return self.nome
class Assign(AST):
def __init__(self, left, op, right):
AST.__init__(self,'Assign');
print('Criando um nó do tipo Assign.')
if(not(left is None)):
self.children.append(left)
if(not(right is None)):
self.children.append(right)
self.left = left
self.token = self.op = op
self.right = right
def __repr__(self):
return self.nome
class If(AST):
def __init__(self, exp, c_true, c_false):
AST.__init__(self, 'If')
print('Criando um nó do tipo If.')
if(not(exp is None)):
self.children.append(exp)
if(not(c_true is None)):
self.children.append(c_true)
if(not(c_false is None)):
self.children.append(c_false)
self.exp = exp;
self.c_true = c_true;
self.c_false = c_false;
def __repr__(self):
return self.nome
class While(AST):
def __init__(self, exp, commands):
AST.__init__(self,'While')
print('Criando um nó do tipo While.')
if(not(exp is None)):
self.children.append(exp)
if(not (commands is None)):
self.children.append(commands)
self.exp = exp;
self.commands = commands;
def __repr__(self):
return self.nome
class For(AST):
def __init__(self, attr, exp, attr2, commands):
AST.__init__(self,'For')
print('Criando um nó do tipo For.')
if (not(attr is None)):
self.children.append(attr)
self.attr = attr
if(not(exp is None)):
self.children.append(exp)
self.exp = exp;
if (not(attr is None)):
self.children.append(attr2)
self.attr2 = attr2
if(not (commands is None)):
self.children.append(commands)
self.commands = commands
def __repr__(self):
return self.nome
class Read(AST):
def __init__(self, id_):
AST.__init__(self,'Read')
print('Criando um nó do tipo Read.')
if(not(id_ is None)):
self.children.append(id_)
self.id = id_;
def __repr__(self):
return self.nome
class Print(AST):
def __init__(self, exp):
AST.__init__(self,'Print')
print('Criando um nó do tipo Print.')
if(not(exp is None)):
self.children.append(exp)
self.exp = exp;
def __repr__(self):
return self.nome
class Expr(AST):
def __init__(self, nome, op, left, right):
AST.__init__(self,nome)
if(not(left is None)):
self.children.append(left)
if(not(right is None)):
self.children.append(right)
self.left = left
self.op = op
self.right = right
def __repr__(self):
#self.left.repr();
return self.op
class LogicalOp(Expr):
def __init__(self, op, left, right):
Expr.__init__(self,'LogicalOp', op, left, right)
print('Criando um nó do tipo LogicalOp com operador ' + str(op))
class ArithOp(Expr):
def __init__(self, op, left, right):
Expr.__init__(self,'ArithOp', op, left, right)
print('Criando um nó do tipo ArithOp com operador ' + str(op))
class RelOp(Expr):
def __init__(self, left, op, right):
Expr.__init__(self,'RelOp', op, left, right)
print('Criando um nó do tipo RelOp com operador ' + str(op))
class Id(AST):
"""The Var node is constructed out of ID token."""
def __init__(self, token):
AST.__init__(self,'Id')
print('Criando um nó do tipo Id.')
#self.children.append(token)
self.token = token
self.value = token.value
def __repr__(self):
return repr(self.token.getLexema())
def __evaluate__(self):
return self.value
class Num(AST):
def __init__(self, token):
AST.__init__(self,'Num')
print('Criando um nó do tipo Num.')
#self.children.append(token)
self.token = token
self.value = token.value #em python, não precisamos nos preocupar com o tipo de value
def __repr__(self):
return repr(self.token.getLexema())
def __evaluate__(self):
return self.value
def print_tree(current_node, indent="", last='updown'):
nb_children = lambda node: sum(nb_children(child) for child in node.children) + 1
size_branch = {child: nb_children(child) for child in current_node.children}
""" Creation of balanced lists for "up" branch and "down" branch. """
up = sorted(current_node.children, key=lambda node: nb_children(node))
down = []
while up and sum(size_branch[node] for node in down) < sum(size_branch[node] for node in up):
down.append(up.pop())
""" Printing of "up" branch. """
for child in up:
next_last = 'up' if up.index(child) is 0 else ''
next_indent = '{0}{1}{2}'.format(indent, ' ' if 'up' in last else '│', " " * len(current_node.__repr__()))
print_tree(child, indent=next_indent, last=next_last)
""" Printing of current node. """
if last == 'up': start_shape = '┌'
elif last == 'down': start_shape = '└'
elif last == 'updown': start_shape = ' '
else: start_shape = '├'
if up: end_shape = '┤'
elif down: end_shape = '┐'
else: end_shape = ''
print('{0}{1}{2}{3}'.format(indent, start_shape, current_node.__repr__(), end_shape))
""" Printing of "down" branch. """
for child in down:
next_last = 'down' if down.index(child) is len(down) - 1 else ''
next_indent = '{0}{1}{2}'.format(indent, ' ' if 'down' in last else '│', " " * len(current_node.__repr__()))
print_tree(child, indent=next_indent, last=next_last)
class ToXML:
@staticmethod
def toXML(no):
count = 1
arvoreToXML = open('../../tp2/output/arvoreToXML.txt','w')
arvoreToXML.close()
arvoreToXML = open('../../tp2/output/arvoreToXML.txt','w')
arvoreToXML.write('<' + no.nome + '>\r\n')
for child in no.children:
i = 0
for i in range(0,count):
arvoreToXML.write('\t')
if(child.nome == 'Id' or child.nome == 'Num'):
arvoreToXML.write('<' + child.nome + ToXML.classifierPrint(child) + '/\r\n')
else:
arvoreToXML.write('<' + child.nome + ToXML.classifierPrint(child) + '>\r\n')
ToXML.deepSearch(child, count, arvoreToXML)
for i in range(0,count):
arvoreToXML.write('\t')
arvoreToXML.write('</' + child.nome + '>\r\n')
arvoreToXML.write('</' + no.nome + '>\r\n')
@staticmethod
def deepSearch( no, count,arvoreToXML):
count = count + 1
for child in no.children:
i = 0
for i in range(0,count):
arvoreToXML.write('\t')
if(child.nome == 'Id' or child.nome == 'Num'):
arvoreToXML.write('<' + child.nome + ToXML.classifierPrint(child) + '/>\r\n')
else:
arvoreToXML.write('<' + child.nome + ToXML.classifierPrint(child) + '>\r\n')
ToXML.deepSearch(child, count, arvoreToXML)
for i in range(0,count):
arvoreToXML.write('\t')
arvoreToXML.write('</' + child.nome + '>\r\n')
@staticmethod
def classifierPrint(no):
if(no.nome == 'Id'):
return ' lexema=\'' + no.token.getLexema() + '\''
elif(no.nome == 'Num'):
return ' value=\'' + no.token.getLexema() + ' type:\'' + no.value + '\''
elif(no.nome == 'ArithOp' or no.nome == 'RelOp' or no.nome == 'LogicalOp'):
return ' op=\'' + no.op + '\''
else:
return ''
|
# import urllib.request
# url = 'http://www.pythonchallenge.com/pc/def/linkedlist.php?nothing='
# data = urllib.request.urlopen(url + '37278').read().decode('utf-8')
# while data.startswith('and'):
# print(data)
# data = urllib.request.urlopen(url + data.split(' ')[-1]).read().decode('utf-8')
# print(data)
# peak.html
import requests
url = 'http://www.pythonchallenge.com/pc/def/linkedlist.php?nothing='
num = '63579'
while True:
text = requests.get(url + num).text
print(text)
if text.startswith('and'): # and the next nothing is 53548
num = text.split()[-1]
else:
break
# initail num = 12345
# hint1: <font color=red>Your hands are getting tired </font>and the next nothing is 94485
# hint2: and the next nothing is 16044
# Yes. Divide by two and keep going.
# hint3: There maybe misleading numbers in the
# text. One example is 82683. Look only for the next nothing and the next nothing is 63579
# hint4: peak.html
|
#!usr/bin/env python3
# @File:send_email.py
# @Date:2018/05/27
# Author:Cat.1
from email.mime.text import MIMEText
import smtplib
import config
msg_from = config.getConfig("send_email", "msg_from")
passwd = config.getConfig("send_email", "passwd")
msg_to = config.getConfig("send_email", "msg_to")
smtp_server = config.getConfig("send_email", "smtp_server")
subject = "测试专用"
content = "厉害了我的哥"
msg = MIMEText(content)
msg['Subject'] = subject
msg['From'] = msg_from
msg['To'] = msg_to
server = smtplib.SMTP_SSL(smtp_server, 465)
server.login(msg_from, passwd)
server.sendmail(msg_from, msg_to, msg.as_string())
server.quit()
# print(server.set_debuglevel(1))
|
#2. 读入文件‘a.txt’.统计文件中每个单词的数量并且进行输出。
#txt的文本文件为
#a:a. Every single time you access a website,
# you leave tracks. Tracks that others can access.
# If you don't like the idea, find out what software can help you cover them
f=open("D://a.txt","r")
a=f.readlines()
print(a)
|
#! -*- coding:utf8 -*-
import os
import sys
import json
reload(sys)
sys.setdefaultencoding("utf-8")
program_path = os.path.abspath(__file__ + "/../..")
def gen_file_abspath(file_path, root_path=None):
if root_path is None:
return program_path + "/" + file_path
else:
return root_path + "/" + file_path
|
#!/usr/bin/env python3
"""
Revision By Changes
---------------------------------------------------------------
0.0.1 Ramanuj Pandey[ramanuj.p7@gmail.com] Ported whole to code to Python3
from CPP, level one optimization
on code done which reduces source
lines to less than half.
License
-------
Same as original CPP code.
"""
import os
import sys
import logging
import argparse
# TODO: Change variable names and functions to more meaningful ones,
# Initially I didn't changed them to keep my porting easy when comparing to original source.
pgm_ver = '0.0.1-beta'
separator = '\t'
rules = {}
words = {}
weights = {}
check = 0
index = [0,]
no_of_splits = [0,]
split = []
rule_applied = ['',]
new_splits = []
new_rule_applied = []
initial = 0
op = {}
costs = {}
final_costs = []
output1 = {}
temp_result_file='temp_result'
ltproc_bin = '/usr/bin/lt-proc'
# morph_bin = '../../../morph_bin/skt_morf.bin'
morph_bin = '../scl/morph_bin/all_morf.bin'
temp_res_intrm_file = 'temp_result_mo'
res_file = 'result'
input_files = {
'sandhi': ('sandhi_rules.txt', 'sandhi_words.txt', 'skt_morf.bin'),
'samasa': ('samAsa_rules.txt', 'samAsa_words.txt', 'skt_samAsanoun_morf.bin'),
'both': ('all_rules.txt', 'word_freq.txt', 'all_morf.bin')
}
"""
Datastructure selection thoughts:
1. We need a better searchable datastructure as search happens very frequently
to find words and their expansion.
2. We need dict item to be a list so that we can add components, as for same result many
expansions are possible.
With above considerations, rule db is a dictonary which has value as a list.
"""
def load_rules_and_words(rules_file, words_file):
logging.info("Rule loader called with option: (" + rules_file + ', ' + words_file + ')')
global separator
rules['tot_freq'] = 0
rule_val_extra = ''
# If we load with default utf encoding some strings from file fail to load.
with open(rules_file, encoding='latin-1') as fr:
for line in fr:
line_by_line = line.strip().split('{}'.format(separator))
rules['tot_freq'] += int(line_by_line[2])
with open(rules_file, encoding='latin-1') as fr:
for line in fr:
line_by_line = line.strip().split('{}'.format(separator))
rule_index = line_by_line[0]
rule_val = line_by_line[1]
if args.choice == 'sandhi':
weights[rule_val+'='+rule_index] = int(line_by_line[2])/rules['tot_freq']
elif args.choice == 'samasa':
rule_val = rule_val.split('+')[0]+"-+"+rule_val.split('+')[1]+'='+rule_index
weights[rule_val] = int(line_by_line[2])/rules['tot_freq']
elif args.choice == 'both':
rule_val_extra = rule_val.split('+')[0]+"-+"+rule_val.split('+')[1]+'='+rule_index
weights[rule_val_extra] = int(line_by_line[2])/rules['tot_freq']
weights[rule_val+'='+rule_index[0]] = int(line_by_line[2])/rules['tot_freq']
if rule_index in rules.keys():
rules[rule_index].append(rule_val)
if rule_val_extra != '':
rules[rule_index].append(rule_val_extra)
else:
rules[rule_index] = [rule_val]
if rule_val_extra != '':
rules[rule_index] = [rule_val_extra]
separator = ' '
words['corpus_size'] = 0
with open(words_file, encoding='latin-1') as fr:
for line in fr:
line_by_line = line.strip().split('{}'.format(separator))
words[line_by_line[1]] = line_by_line[0]
words['corpus_size'] += 1
def split_word(input_word):
global check, index, no_of_splits, split, rule_applied, rules
first = 0
split.append(input_word[0])
while check == 0:
check = 1
temp_split = []
temp_index = []
temp_no_of_splits = []
temp_rule_applied = []
for split_cntr in range(0, len(split)):
if no_of_splits[split_cntr] <= 4:
if index[split_cntr] >= len(input_word):
temp_split.append(split[split_cntr])
temp_index.append(index[split_cntr])
temp_no_of_splits.append(no_of_splits[split_cntr])
temp_rule_applied.append(rule_applied[split_cntr])
else:
one_char_tok = input_word[index[split_cntr]]
two_char_tok = input_word[index[split_cntr] : (index[split_cntr] + 2)]
tre_char_tok = input_word[index[split_cntr] : (index[split_cntr] + 3)]
tokens=[one_char_tok, two_char_tok, tre_char_tok]
if first == 0:
temp_split.append(one_char_tok)
else:
temp_split.append(split[split_cntr]+one_char_tok)
temp_index.append(index[split_cntr]+1)
temp_no_of_splits.append(no_of_splits[split_cntr])
temp_rule_applied.append(rule_applied[split_cntr])
if index[split_cntr] + 1 < len(input_word):
check = 0
for token in tokens:
if token in rules.keys():
for rule_cntr in range(0, len(rules[token])):
sutra = rules[token][rule_cntr]
if first == 0:
temp_split.append(sutra)
else:
temp_split.append(split[split_cntr] + sutra)
temp_index.append(index[split_cntr] + len(token))
temp_no_of_splits.append(no_of_splits[split_cntr]+1)
temp_rule_applied.append(rule_applied[split_cntr]+sutra+"="+token+"|");
if index[split_cntr] + 1 < len(input_word):
check = 0
first = 1
split = temp_split
index = temp_index
no_of_splits = temp_no_of_splits
rule_applied = temp_rule_applied
def split_final():
vechar = []
vechar1 = []
output = {}
global new_splits, new_rule_applied, initial, temp_result_file, op, res_file
for cntr in range(0, len(split)):
split_word = split[cntr]
if split_word[-2] != '+':
rule_token = rule_applied[cntr]
pada_list = split_word.split('+')
rule_list = rule_token[:-1].split('|')
vechar.append(pada_list)
vechar1.append(rule_list)
for p in pada_list:
output[p] = 1
new_splits = vechar
new_rule_applied = vechar1
initial=initial + len(split)
with open(temp_result_file, mode='wt', encoding='utf-8') as myfile:
myfile.write('\n'.join(output.keys()))
myfile.write('\n')
cmd_buf = "%s -c %s < %s > %s; grep '*' %s > %s" % (ltproc_bin, morph_bin, temp_result_file,
temp_res_intrm_file, temp_res_intrm_file,
res_file)
logging.info("Going to call ltproc as: " + cmd_buf)
if os.system(cmd_buf):
logging.error("Executing command (%s) failed" % cmd_buf)
sys.exit(1)
with open(res_file) as myfile:
content = myfile.readlines()
for val in content:
op[val.split('/')[0].split('^')[1]] = 1
def calculate_costs():
global tot_cost, costs, final_costs, output1, op
output = op
tot_cost = [1] * len(new_splits)
for cntr in range(1, len(new_splits)):
flag = 0
for k in range(0, len(new_splits[cntr]) -1):
if not new_splits[cntr][k] in output.keys():
output[new_splits[cntr][k]] = 0
if not new_splits[cntr][k+1] in output.keys():
output[new_splits[cntr][k+1]] = 0
if ((output[new_splits[cntr][k]] == 1) or (output[new_splits[cntr][k+1]] == 1)):
logging.debug("Got %s or %s in ouput keys" % (new_splits[cntr][k],new_splits[cntr][k+1]))
flag = 1
break
else:
if new_splits[cntr][k][:-1] == '-':
val = new_splits[cntr][k].split('-')
if not words[val[0]]:
tot_cost[cntr] = tot_cost[cntr] * (1 / (words['corpus_size'] * 1.0) ) * weights[new_rule_applied[cntr][k]]
else:
tot_cost[cntr] = tot_cost[cntr] * (words[val[0]] / words['corpus_size'] * 1.0 ) * weights[new_rule_applied[cntr][k]]
else:
if not new_splits[cntr][k] in words.keys():
tot_cost[cntr] = tot_cost[cntr] * (1 / (words['corpus_size'] * 1.0) ) * weights[new_rule_applied[cntr][k]]
logging.debug("1 Total cost is %s for %s" % (tot_cost[cntr], new_splits[cntr][k]))
else:
tot_cost[cntr] = tot_cost[cntr] * (int(words[new_splits[cntr][k]]) / words['corpus_size'] * 1.0 ) * weights[new_rule_applied[cntr][k]]
logging.debug("1.5 Total cost is %s for %s" % (tot_cost[cntr], new_splits[cntr][k]))
if not (new_splits[cntr][-1] in words.keys()):
tot_cost[cntr] = tot_cost[cntr] * (1 / (words['corpus_size'] * 1.0))
logging.debug("2 Got %s %s in ouput keys" % (new_splits[cntr][-1], tot_cost[cntr]))
else:
tot_cost[cntr] = tot_cost[cntr] * ((int(words[new_splits[cntr][-1]]) / (words['corpus_size'] * 1.0 )))
logging.debug("3 Got %s %s in ouput keys" % (new_splits[cntr][-1], tot_cost[cntr]))
if not flag:
logging.debug("Came in cost updater.")
tot_cost[cntr] = tot_cost[cntr] / (len(new_splits[cntr]))
if not (tot_cost[cntr] in costs.keys()):
final_costs.append(tot_cost[cntr])
costs[tot_cost[cntr]] = 1
if tot_cost[cntr] in output1.keys():
output1[tot_cost[cntr]].append(new_splits[cntr])
else:
output1[tot_cost[cntr]] = [new_splits[cntr],]
logging.debug(tot_cost)
if not len(output1):
if args.switch == "compare":
print("%s\t=>\t%s" % (args.word, args.word))
else:
print("%s\t=>No splittings found" % args.word)
print("%s\t=>\t0" % args.word)
def is_readable(binary):
return os.path.isfile(binary) and os.access(binary, os.R_OK)
def is_executable(binary):
return os.path.isfile(binary) and os.access(binary, os.X_OK)
def is_writable(binary):
return os.path.isfile(binary) and os.access(binary, os.W_OK)
# ---------------------------------------------------- Main section ----------------------------------------------------
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Sandhi splitting program:\nVersion: %s.' % pgm_ver)
parser.add_argument(
'-c', '--choice', nargs='?',
choices=('sandhi', 'samasa', 'both'), default='sandhi', help="Choose what openration to do.")
parser.add_argument('-m', '--morphbin', required=True, help='Path of morph binary to use.')
parser.add_argument('-o', '--output', help='Result should be written to this file.')
parser.add_argument(
'-s', '--switch', nargs='?', choices=('compare', 'testing'),
default='testing', help="Choose the right switch.")
parser.add_argument('-v', '--verbose', type=int, choices=[1,2], help='Adjust verbocity level.')
parser.add_argument('word', nargs='?', help='Word to split.')
args = parser.parse_args()
if not args.verbose:
logging.basicConfig(level=logging.WARNING)
elif args.verbose == 1:
logging.basicConfig(level=logging.INFO)
elif args.verbose == 2:
logging.basicConfig(level=logging.DEBUG)
logging.info("Arguments to program status: \n %s" % args)
logging.info("This run is for: " + args.choice)
''' Load rules '''
optn_selected = input_files[args.choice]
load_rules_and_words(optn_selected[0], optn_selected[1])
''' Load rule ends'''
''' Sanity begins '''
# Not doing sanity on rule and word files as we are not taking these from user now.
if not args.output:
logging.info("Will use default file name and location.")
if not is_readable(args.morphbin):
logging.error("Morph bin not reable, Exiting...")
sys.exit(1)
if not (is_readable(optn_selected[0]) and is_readable(optn_selected[1])):
logging.error("Check rules and word file, Exiting...")
sys.exit(1)
if not args.word:
logging.error("Provide word to split. Exiting...")
sys.exit(2)
''' Sanity ends '''
logging.info("Loaded corpus size: %d" % words['corpus_size'])
logging.info("Total frequency of rules: %d" % rules['tot_freq'])
logging.debug("Loaded rules: %s " % rules)
logging.debug("Loaded weight: %s" % weights)
''' Call word split routing and update globals with output. '''
split_word(args.word)
logging.debug("------------------------")
logging.debug("Printing Split: \n%s" % split)
logging.debug("Printing Index: \n%s" % index)
logging.debug("Printing no of splits: \n%s" % no_of_splits)
logging.debug("Printing rules applied: \n%s" % rule_applied)
logging.debug("------------------------")
''' Does further split of rules and words and queries morpholigical place and writes result in a result file '''
split_final()
''' Output of result file is matching in b/w Python and CPP code with only order difference. '''
logging.debug("New splits are: %s" % new_splits)
logging.debug("Size of new splits is: %d" % len(new_splits))
logging.debug("New rules applied are: %s" % new_rule_applied)
logging.debug("Value loaded in os is: %s" % op)
# Many of the code snippets are just translated, not optimized
calculate_costs()
logging.debug('Total cost (tot_cost) :\n %s' % tot_cost)
logging.debug('Cost (costs) :\n %s' % costs)
logging.debug('Output1 (output1) :\n %s' % output1)
logging.debug('Final_cost before sorting :\n %s' % final_costs)
final_costs.sort(reverse = True)
logging.debug('Final cost after sorting :\n %s' % final_costs)
count = 0
fount = 0
correct_ones = 0
for each in range(0, len(final_costs)):
temp1 = output1[final_costs[each]]
count = each + 1
logging.debug(temp1[0])
word = "+".join(temp1[0])
logging.debug(args.switch)
if args.switch == "testing" and (count == 1):
print("%s = %s\t%s" % (args.word, word, final_costs[each]))
found = 1
elif args.switch == "compare" and (word == 'sandhi'):
correct_ones += 1
print("\nThe expected split for : " + args.word)
print("%s\t %s\t %s" % (word, final_costs[i], count))
found = 1
# Put ranks logic
# ranks[count]=ranks[count]+1;
# Not porting further as it seems all is for -C option, which is not used.
|
from django.conf.urls import url, include
from django.contrib import admin
import helpdesk_portal.views as views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^admin/', admin.site.urls),
url(r'^accounts/', include('accounts.urls')),
url(r'^accounts/', include('django.contrib.auth.urls', namespace='accounts')),
url(r'^tickets/', include('tickets.urls', namespace='tickets')),
] + static(settings.MEDIA_URL, document_root= settings.MEDIA_ROOT)
|
from django.shortcuts import render
from django.shortcuts import render_to_response
from .email import send_welcome_email
from django.views.generic.edit import FormView
from django.utils import timezone
from django.contrib.gis.geos import Point
from django.contrib.gis.db.models.functions import Distance
from .forms import SignUpForm, PlacesForm, LookupForm, NowForm, ProneForm, ProfileForm
from .models import Places, Profile, RiotPronePlaces, NowRioting
from django.contrib.auth.decorators import login_required
from django.views.generic.base import View
from django.template import RequestContext
# Create your views here.
class LookupView(View):
form_class = LookupForm
def get(self, request):
return render(request, 'riot/lookup.html')
def form_valid(self, form):
# Get data
latitude = form.cleaned_data['latitude']
longitude = form.cleaned_data['longitude']
# Get today's date
now = timezone.now()
# Get next week's date
next_week = now + timezone.timedelta(weeks=1)
# Get Point
location = Point(longitude, latitude, srid=4326)
# Look up events
events = Profile.objects.filter(datetime__gte=now).filter(datetime__lte=next_week).annotate(distance=Distance('venue__location', location)).order_by('distance')[0:5]
# Render the template
return render_to_response('riot/lookupresults.html', {
'events': events
})
def signUp(request):
form = SignUpForm(request.POST)
if request.method == 'POST':
if form.is_valid():
username = form.cleaned_data['your_name']
email = form.cleaned_data['email']
recipient = Profile(username = username,email =email)
recipient.save()
send_welcome_email(username,email)
HttpResponseRedirect('home')
#.................
return render(request, 'registration/signUp.html', {"SignUpForm":form})
|
import copy
from typing import List, Any
from kts_linguistics.string_transforms.abstract_transform import AbstractTransform
class TransformPipeline:
def __init__(self, do_cache=False, cache=None):
self.transforms = list()
self.cache = cache if cache is not None else dict()
self.do_cache = do_cache
def add_transform(self, transform: AbstractTransform):
self.transforms.append(transform)
def remove_transform_by_class(self, cls):
self.transforms = [t for t in self.transforms if not isinstance(t, cls)]
def copy(self):
c = copy.copy(self)
c.transforms = [it for it in c.transforms] # copy list
return c
def fit(self, groups: List[List[str]]):
for t in self.transforms:
t.fit(groups, pipeline=self)
def transform(self, s: Any) -> Any:
if isinstance(s, list):
s = tuple(s)
if s in self.cache:
return self.cache[s]
transformed_s = s
for t in self.transforms:
transformed_s = t.transform(transformed_s)
if self.do_cache:
if isinstance(transformed_s, list):
transformed_s = tuple(transformed_s)
self.cache[s] = transformed_s
return transformed_s
def custom_transform(self, s: Any, apply_before_transform: AbstractTransform) -> Any:
for t in self.transforms:
if t == apply_before_transform:
break
s = t.transform(s)
return s
|
# Definition for a singly-linked list
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def __str__(self):
result = str(self.val)
if self.next:
result += str(self.next)
return result
class Solution:
# ex. head = 1,2,3,4
def reverseList(self, head):
# CALL STACK 1 reverseList(1)
if head is None or head.next is None:
return head
p = self.reverseList(head.next)
# p = reverseList(2)
head.next.next = head
# head.next.next = 1
head.next = None
# head.next = None
return p
# CALL STACK 2 reverseList(2)
node = ListNode(1)
node.next = ListNode(2)
node.next.next = ListNode(3)
node.next.next.next = ListNode(4)
print(Solution().reverseList(node))
# 4321
|
from __future__ import print_function
import torch
# somehow contains values
x = torch.empty(5, 3)
print(x)
x = torch.rand(5, 3)
print(x)
x = torch.zeros(5, 3, dtype=torch.long)
print(x)
x = torch.tensor([5.5, 3])
print(x)
x = x.new_ones(5, 3, dtype=torch.double)
print(x)
x = torch.randn_like(x, dtype=torch.float)
print(x)
print(x.size())
# add, get result
y = torch.rand(5, 3)
print(x + y)
# add, get result syntax 2
print(torch.add(x, y))
# add and set result to other tensor
result = torch.empty(5, 3)
torch.add(x, y, out=result)
print(result)
# add x to y, overwriting y
y.add_(x)
print(y)
# all rows, first column
print(x[:, 1])
x = torch.randn(4, 4)
y = x.view(16)
z = x.view(-1, 8) # the size -1 is inferred from other dimensions
print(x.size(), y.size(), z.size())
# tensor with one element, get value as number
x = torch.randn(1)
print(x)
print(x.item())
if torch.cuda.is_available():
device = torch.device("cuda")
y = torch.ones_like(x, device=device) # tensor on GPU
x = x.to(device)
z = x + y
print(z)
print(z.to("cpu", torch.double))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.