max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
Truck.py
|
GLayton-TX/Delivery_application
| 0
|
12776751
|
# class for the truck object
# max of 16 packages
# travel at 18mph
# 3 trucks but only 2 drivers
# 8am earliest departure from hub
import Location
import Utility
import Package
# the truck object, initializes with parameters set in the assessment
# time_space complexity of O(1)
class Truck:
def __init__(self):
self.truck_number = 1
self.speed = 18
self.trip_odometer = 0.0
self.odometer = 0.0
self.time_out = 0.0
self.location: Location = Location.get_location(0)
self.max_cargo = 16
self.cargo = []
self.is_full = False
self.delivered = []
self.start_time = 0
# takes a package object and loads it into the truck objects cargo list
# updates packages values
# ensures that the maximum of packages the truck can hold is not exceeded
# time_space complexity of O(1)
def load_package(self, package: Package):
if len(self.cargo) < self.max_cargo:
self.cargo.append(package)
package.package_status = "En Route"
package.package_truck_number = self.truck_number
package.package_load_time = Utility.format_min_to_time(self.time_out + self.start_time)
else:
self.is_full = True
print(f"Truck is full did not load package #{package.package_id}")
# removes a package from the trucks cargo
# could be used if there was a transfer of packages between trucks or returned to hub without being delivered
# time_space complexity of O(1)
def remove_package(self, package):
self.cargo.remove(package)
# delivers a package from a trucks cargo
# updates package's info
# moves package data from cargo to delivered
# time_space complexity of O(N))
def deliver_package(self, package_id):
delivered_at = self.start_time + self.time_out
# updates the relevant package data upon delivery
# time_space complexity of O(1)
def update_on_delivery(package):
package.package_delivered_at = delivered_at
package.package_status = "Delivered"
self.delivered.append(package)
self.remove_package(package)
[update_on_delivery(package) for package in self.cargo if package.package_id == package_id]
# resets truck data for the start of a route
# could be used if you wanted to see data from each run the truck makes as opposed to total data
# time_space complexity of O(1)
def start_route(self):
self.time_out = 0.0
self.location = Location.get_location(0)
self.trip_odometer = 0.0
self.cargo = []
self.is_full = False
self.delivered = []
# simulates the truck moving from location to location
# updates the location attribute, as well as the odometer's and timers
# time_space complexity of O(1)
def drive_truck(self, destination_id):
destination = Location.get_location(destination_id)
distance = Location.get_distance(self.location, destination)
self.time_out += (distance / self.speed) * 60
self.trip_odometer += distance
self.odometer += distance
self.location = destination
# boolean value for whether the truck has no more packages in cargo
# time_space complexity of O(1)
def truck_is_empty(self):
if len(self.cargo) == 0:
return True
| 3.921875
| 4
|
tests/core/tests_category.py
|
caputomarcos/django-bmf
| 0
|
12776752
|
#!/usr/bin/python
# ex:set fileencoding=utf-8:
# flake8: noqa
from __future__ import unicode_literals
from django.test import TestCase
from django.utils.translation import ugettext_lazy as _
from djangobmf.core.category import Category
from collections import OrderedDict
class ClassTests(TestCase):
pass
# def setUp(self): # noqa
# self.view1 = View(model='empty', name="Test1", slug="test1")
# self.view2 = View(model='empty', name="Test2", slug="test2")
# super(ClassTests, self).setUp()
# def test_init_empty(self):
# class TestCategory(Category):
# name = "test"
# slug = "test"
# td = TestCategory()
# self.assertEqual(td.data, OrderedDict([
# ]))
# def test_init_data1(self):
# class TestCategory(Category):
# name = "test"
# slug = "test"
# td = TestCategory(self.view1)
# self.assertEqual(td.data, OrderedDict([
# ('test1', self.view1),
# ]))
# def test_init_data2(self):
# class TestCategory(Category):
# name = "test"
# slug = "test"
# td = TestCategory(self.view1, self.view2)
# self.assertEqual(td.data, OrderedDict([
# ('test1', self.view1),
# ('test2', self.view2),
# ]))
# def test_add_view(self):
# class TestCategory(Category):
# name = "test"
# slug = "test"
# td = TestCategory()
# td.add_view(self.view1)
# self.assertEqual(td.data, OrderedDict([
# ('test1', self.view1),
# ]))
# td.add_view(self.view1)
# self.assertEqual(td.data, OrderedDict([
# ('test1', self.view1),
# ]))
# td.add_view(self.view2)
# self.assertEqual(td.data, OrderedDict([
# ('test1', self.view1),
# ('test2', self.view2),
# ]))
# td.add_view(self.view1)
# self.assertEqual(td.data, OrderedDict([
# ('test1', self.view1),
# ('test2', self.view2),
# ]))
# td.add_view(self.view2)
# self.assertEqual(td.data, OrderedDict([
# ('test1', self.view1),
# ('test2', self.view2),
# ]))
# def test_key(self):
# class TestCategory(Category):
# name = "Test"
# slug = "test"
# td = TestCategory()
# self.assertEqual(td.key, "test")
# def test_merge(self):
# class TestCategory1(Category):
# name = "Test1"
# slug = "test1"
# class TestCategory2(Category):
# name = "Test1"
# slug = "test1"
# td1 = TestCategory1()
# td2 = TestCategory2()
# td1.merge(td2)
# self.assertEqual(td1.data, OrderedDict([
# ]))
# td1 = TestCategory1()
# td2 = TestCategory2(self.view1)
# td1.merge(td2)
# self.assertEqual(td1.data, OrderedDict([
# ('test1', self.view1),
# ]))
# td1 = TestCategory1(self.view1)
# td2 = TestCategory2(self.view2)
# td1.merge(td2)
# self.assertEqual(td1.data, OrderedDict([
# ('test1', self.view1),
# ('test2', self.view2),
# ]))
# td1 = TestCategory1(self.view2)
# td2 = TestCategory2(self.view2)
# td1.merge(td2)
# self.assertEqual(td1.data, OrderedDict([
# ('test2', self.view2),
# ]))
# td1 = TestCategory1(self.view2, self.view1)
# td2 = TestCategory2(self.view2)
# td1.merge(td2)
# self.assertEqual(td1.data, OrderedDict([
# ('test2', self.view2),
# ('test1', self.view1),
# ]))
# def test_bool(self):
# class TestCategory(Category):
# name = "test"
# slug = "test"
# td = TestCategory()
# self.assertFalse(td)
# td.add_view(self.view1)
# self.assertTrue(td)
# def test_len(self):
# class TestCategory(Category):
# name = "test"
# slug = "test"
# td = TestCategory()
# self.assertEqual(len(td), 0)
# td = TestCategory(self.view1, self.view2)
# self.assertEqual(len(td), 2)
# def test_eq(self):
# class TestCategory1(Category):
# name = "test1"
# slug = "test1"
# class TestCategory2(Category):
# name = "test2"
# slug = "test2"
# class TestCategory3(Category):
# name = "test1"
# slug = "test1"
# td1 = TestCategory1()
# td2 = TestCategory2()
# td3 = TestCategory3()
# self.assertEqual(td1, td3)
# self.assertNotEqual(td1, td2)
# self.assertNotEqual(td1, self.view1)
# def test_contains(self):
# class TestCategory(Category):
# name = "test"
# slug = "test"
# td = TestCategory(self.view1)
# self.assertFalse('test2' in td)
# self.assertFalse(self.view2 in td)
# self.assertTrue('test1' in td)
# self.assertTrue(self.view1 in td)
# def test_getitem(self):
# class TestCategory(Category):
# name = "test"
# slug = "test"
# td = TestCategory(self.view1, self.view2)
# self.assertEqual(td['test1'], self.view1)
# self.assertEqual(td['test2'], self.view2)
# with self.assertRaises(KeyError):
# test = td['test3']
# def test_iter(self):
# class TestCategory(Category):
# name = "test"
# slug = "test"
# td = TestCategory(self.view1)
# for i in td:
# self.assertEqual(i, self.view1)
# td = TestCategory(self.view1, self.view2)
# self.assertEqual([i for i in td], [self.view1, self.view2])
| 2.390625
| 2
|
vega/core/trainer/callbacks/model_checkpoint.py
|
qixiuai/vega
| 0
|
12776753
|
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""ModelCheckpoint callback defination."""
from .callbacks import Callback
from vega.core.common.class_factory import ClassFactory, ClassType
@ClassFactory.register(ClassType.CALLBACK)
class ModelCheckpoint(Callback):
"""Callback that saves the evaluated Performance."""
def before_train(self, logs=None):
"""Be called before the training process."""
self.is_chief = self.params['is_chief']
self.do_validation = self.params['do_validation']
def after_epoch(self, epoch, logs=None):
"""Be called after each epoch."""
if self.is_chief:
self.trainer._save_checkpoint(epoch)
if not self.trainer.cfg.get('save_best_model', False):
return
self.performance = logs.get('summary_perfs', None)
best_changed = self.performance['best_valid_perfs_changed']
if best_changed:
self.trainer.output_model()
| 2.171875
| 2
|
kernel/type.py
|
zhouwenfan/temp
| 0
|
12776754
|
# Author: <NAME>
from collections import OrderedDict
class TypeMatchException(Exception):
pass
class HOLType():
"""Represents a type in higher-order logic.
Types in HOL are formed by two kinds of constructors: TVar and Type.
TVar(name) represents a type variable with the given name. Type(f, args)
represents a type constant applied to a list of arguments.
There are two fundamental type constants:
- booleans, with name "bool" and no arguments.
- functions, with name "fun" and two arguments: the domain and codomain
types. Type("fun", a, b) is printed as a => b. The => sign associates to
the right.
Further defined type constants include:
- natural numbers, with name "nat" and no arguments.
- lists, with name "list" and one argument.
- product, with name "prod" and two arguments. Type("prod", a, b) is
printed as a * b.
Examples:
nat => bool: functions from natural numbers to booleans (predicates on
natural numbers).
nat => nat: functions from natural numbers to natural numbers.
nat => nat => nat: or nat => (nat => nat), functions from two natural
numbers to natural numbers.
nat * nat => nat: functions from a pair of natural numbers to natural
numbers.
nat list: list of natural numbers.
nat list list: list of lists of natural numbers.
"""
(TVAR, TYPE) = range(2)
def is_fun(self):
"""Whether self is of the form a => b."""
return self.ty == HOLType.TYPE and self.name == "fun"
def domain_type(self):
"""Given a type of form a => b, return a."""
assert(self.is_fun())
return self.args[0]
def range_type(self):
"""Given a type of form a => b, return b."""
assert(self.is_fun())
return self.args[1]
def strip_type(self):
"""Given a type of form a_1 => ... => a_n, b, return the pair
[a_1, ... a_n], b.
"""
if self.is_fun():
domains, range = self.range_type().strip_type()
return ([self.domain_type()] + domains, range)
else:
return ([], self)
def __str__(self):
if self.ty == HOLType.TVAR:
return "'" + self.name
elif self.ty == HOLType.TYPE:
if len(self.args) == 0:
return self.name
elif len(self.args) == 1:
# Insert parenthesis if the single argument is a function.
if HOLType.is_fun(self.args[0]):
return "(" + str(self.args[0]) + ") " + self.name
else:
return str(self.args[0]) + " " + self.name
elif HOLType.is_fun(self):
# 'a => 'b => 'c associates to the right. So parenthesis is
# needed to express ('a => 'b) => 'c.
if HOLType.is_fun(self.args[0]):
return "(" + str(self.args[0]) + ") => " + str(self.args[1])
else:
return str(self.args[0]) + " => " + str(self.args[1])
else:
return "(" + ", ".join(str(t) for t in self.args) + ") " + self.name
else:
raise TypeError()
def __repr__(self):
if self.ty == HOLType.TVAR:
return "TVar(" + self.name + ")"
elif self.ty == HOLType.TYPE:
return "Type(" + self.name + ", " + str(list(self.args)) + ")"
else:
raise TypeError()
def __hash__(self):
if hasattr(self, "_hash_val"):
return self._hash_val
if self.ty == HOLType.TVAR:
self._hash_val = hash(("VAR", self.name))
elif self.ty == HOLType.TYPE:
self._hash_val = hash(("COMB", self.name, tuple(hash(arg) for arg in self.args)))
return self._hash_val
def __eq__(self, other):
if not isinstance(other, HOLType):
return False
if self.ty != other.ty:
return False
elif self.ty == HOLType.TVAR:
return self.name == other.name
elif self.ty == HOLType.TYPE:
return self.name == other.name and self.args == other.args
else:
raise TypeError()
def subst(self, tyinst):
"""Given a dictionary tyinst mapping from names to types,
simultaneously substitute for the type variables using the
dictionary.
"""
assert isinstance(tyinst, dict), "tyinst must be a dictionary"
if self.ty == HOLType.TVAR:
if self.name in tyinst:
return tyinst[self.name]
else:
return self
elif self.ty == HOLType.TYPE:
return Type(self.name, *(T.subst(tyinst) for T in self.args))
else:
raise TypeError()
def match_incr(self, T, tyinst, internal_only=False):
"""Incremental match. Match self (as a pattern) with T. Here tyinst
is the current instantiation. This is updated by the function.
"""
if self.ty == HOLType.TVAR:
if internal_only and not self.name.startswith('_'):
if self != T:
raise TypeMatchException()
elif self.name in tyinst:
if T != tyinst[self.name]:
raise TypeMatchException()
else:
tyinst[self.name] = T
elif self.ty == HOLType.TYPE:
if T.ty != HOLType.TYPE or T.name != self.name:
raise TypeMatchException()
else:
for arg, argT in zip(self.args, T.args):
arg.match_incr(argT, tyinst, internal_only=internal_only)
else:
raise TypeError()
def match(self, T, internal_only=False):
"""Match self (as a pattern) with T. Returns either a dictionary
containing the match, or raise TypeMatchException.
"""
tyinst = dict()
self.match_incr(T, tyinst, internal_only=internal_only)
return tyinst
def get_tvars(self):
"""Return the list of type variables."""
def collect(T):
if T.ty == HOLType.TVAR:
return [T]
else:
return sum([collect(arg) for arg in T.args], [])
return list(OrderedDict.fromkeys(collect(self)))
def get_tsubs(self):
"""Return the list of types appearing in self."""
def collect(T):
if T.ty == HOLType.TVAR:
return [T]
else:
return sum([collect(arg) for arg in T.args], [T])
return list(OrderedDict.fromkeys(collect(self)))
class TVar(HOLType):
"""Type variable."""
def __init__(self, name):
self.ty = HOLType.TVAR
self.name = name
class Type(HOLType):
"""Type constant, applied to a list of arguments."""
def __init__(self, name, *args):
self.ty = HOLType.TYPE
self.name = name
self.args = args
def TFun(*args):
"""Returns the function type arg1 => arg2 => ... => argn."""
if isinstance(args[0], list):
args = tuple(args[0])
assert all(isinstance(arg, HOLType) for arg in args), \
"TFun: each argument of TFun must be a type."
res = args[-1]
for arg in reversed(args[:-1]):
res = Type("fun", arg, res)
return res
"""Boolean type."""
boolT = Type("bool")
| 3.953125
| 4
|
test/fake_combo_dataset_generation.py
|
EdwardDixon/facenet
| 3
|
12776755
|
<gh_stars>1-10
import json
import numpy as np
from itertools import combinations
from sklearn.externals import joblib
def compare(x,y):
dist = np.sqrt(np.sum(np.square(np.subtract(x, y))))
return (dist)
def get_sim(vecs):
n=len(vecs)
if(n<=1):
return 1,0,0,n
combe12=list(combinations(range(n),2))
ncomb=len(combe12)
sim=np.zeros(ncomb)
for i in range(ncomb):
e12=combe12[i]
sim[i]=compare(vecs[e12[0]],vecs[e12[1]].T)
mean=np.mean(sim)
std=np.std(sim)
min_sim=np.min(sim) ## why do I use np.min here and np.argmin later?
return mean, min_sim, std, n ### what else can I have?? max sim is far less useful,
def print_summary_stats(scores):
print("Average distance: " + str(np.average(scores)))
print("Minimum distance: " + str(np.min(scores)))
print("Maximum distance: " + str(np.max(scores)))
return
def make_fake_combos(max_size): # maybe start with 10k
fake_women_combo_counter = 0
while fake_women_combo_counter < max_size:
holder = np.asarray(d.keys())
random_numbers = np.random.randint(len(holder), size=2) ## how to ensure it's not the same number?
if random_numbers[0] != random_numbers[1]:
selector_a = holder[random_numbers[0]]
a = d[selector_a].keys()
woman_1_sample = np.empty(shape=(2, 128))
if len(a) > 1:
#print("len a" + " " + str(len(a)))
woman_1 = []
for x in a:
woman_1.append(d[selector_a][x])
all_woman1_array = np.asarray(woman_1)
#print("all_woman1_array" , all_woman1_array.shape)
woman_1_sample = all_woman1_array[0:2, :]
#print(woman_1_sample.shape)
selector_b = holder[random_numbers[1]]
b = d[selector_b].keys()
woman_2_sample = np.empty(shape=(2, 128))
if len(b) > 1:
#print("len b" + " " + str(len(b)))
woman_2 = []
for y in b:
woman_2.append(d[selector_b][y])
all_woman2_array = np.asarray(woman_2)
woman_2_sample = all_woman2_array[0:2, :]
#print(woman_2_sample.shape)
fake_combo_woman = np.append(woman_1_sample, woman_2_sample, axis = 0)
#print(fake_combo_woman.shape)
if len(fake_combo_woman) == 4:
sim = get_sim(fake_combo_woman)
simapp = np.append(selector_b, sim)
name_and_score = np.append(selector_a, simapp)
flag_name_score = np.append("1", name_and_score)
print(flag_name_score)
fake_combos_scores.append(flag_name_score)
fake_women_combo_counter = fake_women_combo_counter +1
return fake_combos_scores
output_path = ('/home/iolie/thorn/sharon/MINIVECS')
d = json.load(open(output_path))
fake_combos_scores = [] ## why can't this be defined inside the function??
make_fake_combos(10)
print(len(fake_combos_scores))
aa = np.asarray(fake_combos_scores)
print(aa.shape)
# Need to shuffle the two sets together
# pull in earlier stuff
# Need to add a classifier - try: svm, decision tree, ??
# how do I ensure here that the labels are kept == will need to make column(s) in the data to match up = (Y?N same girl; girl 1; girl 2; stat1;stat2; etc)
## make me a classifier
| 2.921875
| 3
|
Stat_Calculator/Median.py
|
cy275/Statistics_Calculator
| 0
|
12776756
|
def median(a):
a = sorted(a)
list_length = len(a)
num = list_length//2
if list_length % 2 == 0:
median_num = (a[num] + a[num + 1])/2
else:
median_num = a[num]
return median_num
| 3.734375
| 4
|
tests/test_sample.py
|
harshkothari410/refocus-python
| 0
|
12776757
|
<reponame>harshkothari410/refocus-python<gh_stars>0
import sys, os
sys.path.insert(0, os.path.abspath('..'))
from refocus import Refocus
r = Refocus()
def test_post_success_sample():
data = {
'name' : 'test_subject',
'isPublished' : True
}
subject = r.subject.post(data)
subjectId = subject['id']
name = 'example'
data = {
'name' : 'test_aspect',
'timeout' : '0m',
'isPublished' : True
}
aspect = r.aspect.post(data)
aspectId = aspect['id']
print subjectId, aspectId
data = {
'name' : name,
'subjectId' : subjectId,
'aspectId' : aspectId
}
response = r.sample.post(data)
print response
assert response['name'], 'test_subject|test_aspect'
def test_post_fail_sample():
name = 'example'
data = {
'name' : name
}
response = r.sample.post(data)
assert len(response['errors']) > 0, True
def test_get_all_success_sample():
response = r.sample.get()
print response
assert response[0]['name'], 'test_subject|test_aspect'
def test_get_success_sample():
name = 'test_subject|test_aspect'
response = r.sample.get(name=name)
print response
assert response['name'], name
def test_get_fail_sample():
name = 'example1'
response = r.sample.get(name=name)
assert len(response['errors']) > 0, True
def test_patch_success_sample():
name = 'test_subject|test_aspect'
data = {
'relatedLinks': [{ 'name': 'xyz', 'url': 'http://xyz.com'}]
}
response = r.sample.patch(data, name=name)
assert response['relatedLinks'][0]['url'], 'http://xyz.com'
def test_patch_fail_sample():
name = 'example1'
data = {
'name' : 'hello'
}
response = r.sample.patch(data, name=name)
assert len(response['errors']) > 0, True
def test_delete_success_sample():
name = 'test_subject|test_aspect'
response = r.sample.delete(name=name)
assert response['name'], name
def test_delete_fail_sample():
name = 'example1'
response = r.sample.delete(name=name)
assert len(response['errors']) > 0, True
def test_upsert_success_sample():
data = {
'name' : 'test_subject|test_aspect'
}
response = r.sample.upsert(data)
assert response['name'], 'test_subject|test_aspect'
response = r.sample.delete(name=data['name'])
def test_upsesrt_fail_sample():
data = {
'name' : 'example1'
}
response = r.sample.upsert(data)
assert len(response['errors']) > 0, True
r.subject.delete(absolute_path='test_subject')
r.aspect.delete(name='test_aspect')
| 2.421875
| 2
|
examples/readdir.py
|
jorge-imperial/mongo_ftdc
| 2
|
12776758
|
import pyftdc
import datetime
p = pyftdc.FTDCParser()
start = datetime.datetime.now()
p.parse_dir('/home/jorge/diagnostic.data', lazy=False)
end = datetime.datetime.now()
t = end - start
print(t)
| 2.4375
| 2
|
python/lambda-dlq-destinations/dlq/core_lambda.py
|
chejef/aws-cdk-examples-proserve
| 0
|
12776759
|
from constructs import Construct
from aws_cdk import (
Duration,
aws_sqs as sqs,
aws_sns as sns,
aws_lambda as _lambda,
aws_lambda_event_sources as events,
)
lambda_timeout = Duration.seconds(15)
visibility_timeout = lambda_timeout.plus(Duration.seconds(5))
retention_period = Duration.minutes(60)
# for lambda dlq and destinations - maximum number of times to retry when the function returns an error,
# should be between 0 and 2, default 2.
lambda_retry_attempt = 2
# for sqs dlq - number of times the failed message can be dequeued from sqs before send to dead-letter queue,
# should be between 1 and 1000, default none.
sqs_max_receives = 3
def add_sns_event_source(scope: Construct, function: _lambda.Function, topic: sns.Topic):
"""
Add SNS topic as Lambda event source.
Args:
scope (Construct): the scope object, all child constructs are defined within this scope.
function: Lambda function to add event source to.
topic: SNS topic as the Lambda event source.
"""
sns_source = events.SnsEventSource(topic)
function.add_event_source(sns_source)
def add_sqs_event_source(scope: Construct, function: _lambda.Function, queue: sqs.Queue):
"""
Add SQS as Lambda event source.
Args:
scope (Construct): the scope object, all child constructs are defined within this scope.
function: Lambda function to add event source to.
queue: SQS queue as the Lambda event source.
"""
sqs_source = events.SqsEventSource(queue, batch_size=1)
alias = _lambda.Alias(scope, "alias", alias_name="CURRENT", version=function.current_version)
alias.add_event_source(sqs_source)
| 2.34375
| 2
|
alex/applications/PublicTransportInfoCS/slu/dailogregclassifier/download_models.py
|
oplatek/alex
| 184
|
12776760
|
<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
if __name__ == '__main__':
import autopath
from alex.utils.config import online_update
if __name__ == '__main__':
online_update("applications/PublicTransportInfoCS/slu/dailogregclassifier/dailogreg.nbl.model.all")
| 1.140625
| 1
|
MA/__main__.py
|
highvelcty/MediaArchivist
| 0
|
12776761
|
<filename>MA/__main__.py
# === Imports ======================================================================================
# Standard library
# Local library
from .gui import root
# === Main =========================================================================================
gui = root.MediaArchivistGUI()
gui.mainloop()
| 1.609375
| 2
|
tron/Nubs/deprecated/rawin.py
|
sdss/tron
| 0
|
12776762
|
<reponame>sdss/tron<filename>tron/Nubs/deprecated/rawin.py<gh_stars>0
from tron import g, hub
name = 'rawin'
listenPort = 6090
def acceptStdin(in_f, out_f, addr=None):
""" Create a command source with the given fds as input and output. """
d = Hub.RawCmdDecoder('gcam', EOL='\r\n', debug=9)
e = Hub.RawReplyEncoder(keyName='RawTxt', EOL='\n', debug=9)
c = Hub.StdinNub(g.poller, in_f, out_f, name='TC01.TC01', encoder=e, decoder=d, debug=1)
c.taster.addToFilter(['gcam', 'na2cam'], [], [])
hub.addCommander(c)
def start(poller):
stop()
l = Hub.SocketListener(poller, listenPort, name, acceptStdin)
hub.addAcceptor(l)
def stop():
l = hub.findAcceptor(name)
if l:
hub.dropAcceptor(l)
del l
| 2.34375
| 2
|
Merida/model.py
|
rahulmadanraju/Semantic-Search-Engine
| 2
|
12776763
|
from sentence_transformers import SentenceTransformer
from process import processing_combined
import pickle as pkl
# Corpus with example sentences
def model_transformer(query_data):
df_sentences_list, df = processing_combined(query_data)
embedder = SentenceTransformer('bert-base-nli-mean-tokens')
corpus = df_sentences_list
corpus_embeddings = embedder.encode(corpus,show_progress_bar = True)
filename = 'finalized_model.sav'
pkl.dump(corpus_embeddings, open(filename, 'wb'))
return embedder, corpus, df
| 2.5625
| 3
|
Amelie/views.py
|
HuMingqi/Amelie_S
| 0
|
12776764
|
from django.http import HttpResponse
from django.shortcuts import render_to_response
import json
from . import feature_vector
from . import dist
from . import top_k
import re
import codecs
#import sys
#import imp
# imp.reload(sys)
# sys.setdefaultencoding('utf-8') #python3 don't has this method,the default on Python 3 is UTF-8 already
pl_path="D:/Clothes Search System/PL/"
kinds_dic={'0':"up_clothes",'1':"down_clothes",'2':"dress"}
def get_faq(request):
return render_to_response('faq.html', {})
def get_liscense(request):
return render_to_response('liscense.html', {})
def get_about(request):
return render_to_response('about.html', {})
def get_protocol(request):
return render_to_response('protocol.html', {})
def get_uploadImage(request):
return render_to_response('uploadImage.html', {})
def search_similar_images(request):
#print('method search_similar_images')
response_dict = {}
if request.method == 'POST':
clothes_kind = kinds_dic[request.POST["kind"]];
upload_image_path = save_file(request.FILES['upload_image'],clothes_kind) #存储上传图片并返回路径,UploadImages/
upload_image_feature_vector = feature_vector.feature_vector_of_image(upload_image_path) #特征提取
distances = dist.dists(upload_image_feature_vector, pl_path+clothes_kind+'/'+clothes_kind+"_feature.txt")#json file,计算请求图片与所有库图片
k = 20 #距离,return [(img_path,dists)...] img_path : .../kind/index
top_k_clothes = top_k.top_k_dists(distances, k) #return [(image_name)...],计算出最接近的前k个图片 img_name : i_j.jpg
image_size_file = open(pl_path+clothes_kind+'/'+clothes_kind+"_size.txt", 'r') #含图片宽高信息
image_size_dict = json.loads(image_size_file.read()) #字典化,string->dict
image_size_file.close()
clothes_info_file = open(pl_path+clothes_kind+'/'+clothes_kind+"_info.txt", 'r') #图片信息字典文件
clothes_info = clothes_info_file.read()
clothes_info_file.close()
if clothes_info[:3] == codecs.BOM_UTF8:
clothes_info = clothes_info[3:] #all clothes info,去掉前三个字符(utf-8标识)
# clothes_info = clothes_info.encode('gbk')
# print clothes_info
similar_image_dict_list = []
similar_image_url_prefix = "http://172.16.58.3:8000/Images/"+clothes_kind+"/"
for image_name in top_k_clothes:
image_dict = {}
#image_name = image_path.split('/')[-1] #分离图片名,图片名格式 i_j.jpg,第i件服饰的第j张图
clothes_index = image_name.split('_')[0] #分离图片第一索引 i
similar_image_url = '%s%s' % (similar_image_url_prefix, image_name) #http://172.16.58.3:8000/Images/{kind}/image_name 仅给一张示例照片
similar_image_size = image_size_dict[image_name] #列表
image_dict['download_url'] = similar_image_url #图片下载链接,本服务器上
image_dict['width'] = similar_image_size[0] #[1:5] 当尺寸是四位时
image_dict['height'] = similar_image_size[1] #[6:10]
info = getClotheInfo(clothes_index, clothes_info) #从图片信息库中按索引取出 (tuple)
image_dict['shopping_url'] = info[-1]
image_dict['other_info'] = '\n'.join(info[:-1])
# image_dict['shopping_url'] = get_shopping_url(clothes_info, clothes_index)
# image_dict['other_info'] = get_other_info(clothes_info, clothes_index)
# print image_dict['shopping_url']
# print image_dict['other_info']
# print clothes_index
similar_image_dict_list.append(image_dict) #图片信息字典加入返回列表
response_dict["status"] = 1
response_dict["data"] = similar_image_dict_list
return HttpResponse(json.dumps(response_dict)) #返回 图片信息 ,图片本身呢?--下载链接
def getClotheInfo(clothes_id, all_clothes_info):
regex_expression = r'"id":' + clothes_id +r'.*?"brand":"(.*?)".*?"productName":"(.*?)".*?"material":"(.*?)".*?"price":"(.*?)".*?"buyUrl":"(.*?)"'
pattern = re.compile(regex_expression)
match = pattern.search(all_clothes_info)
if match:
cinfo=list(match.groups()) #tuple can't be assigned!!!
cinfo[0]='品牌:' +cinfo[0]
cinfo[1]='品名:' +cinfo[1]
cinfo[2]='材质:' +cinfo[2]
cinfo[3]='价格:' +cinfo[3]
return cinfo #返回信息元组
else:
return ("Unknown", "Unknown", "Unknown", "Unknown", "http://item.jd.com/1547204870.html")
def save_file(file, clothes_kind): #保存上传文件
''' Little helper to save a file
'''
filename = file._get_name()
# fd = open('%s/%s' % (MEDIA_ROOT, str(path) + str(filename)), 'wb')
#print(filename)
upload_image_path = pl_path+"upload_images/"+clothes_kind+"/"+str(filename)
fd = open(upload_image_path, 'wb')
for chunk in file.chunks():
fd.write(chunk)
fd.close()
return upload_image_path
# assert False
#TODO analyse image_name, get the type of wanted image, and treat them distingushly
def get_similar_image(request, clothes_kind, image_name): #image_name 请求url中的一个capture组 , 传回请求图片
response_dict = {}
image_path = pl_path+clothes_kind+'/'+clothes_kind+'_src/'+ image_name
try:
image_data = open(image_path, 'rb').read() #读图片数据
except Exception as e:
# raise e
print(e)
response_dict["status"] = 0
response_dict["data"] = "open image error"
return HttpResponse(json.dumps(response_dict))
# check image type
# image_type = image_name.split('.')[-1]
# print image_type
if image_name.endswith('jpeg') or image_name.endswith('jpg'):
return HttpResponse(image_data, content_type="image/jpeg")
else:
return HttpResponse(image_data, content_type="image/png")
'''
def get_clothes_info(path='D:\\clothes_info.txt'): #弃用
target = open(path, 'r')
clothes_info_str = target.read()
target.close()
clothes_info_dic = json.loads(clothes_info_str)
return clothes_info_dic
def get_shopping_url(clothes_info, clothes_index): #弃用
# regExp = r'\{.+\"id\":' + clothes_index + r',.+\"buyUrl\":\"(.+)\"\}'
regExp = r'\{[^\{\}]+\"id\":' + clothes_index + r',[^\{\}]+\"buyUrl\":\"([^\{\}]+)\"\}'
# print regExp
searchObj = re.search(regExp, clothes_info, re.I|re.M)
return searchObj.groups()[0];
def get_other_info(clothes_info, clothes_index): #弃用
regExp = r'\{[^\{\}]+\"id\":' + clothes_index + r',[^\{\}]+\"brand\":\"([^\{\}\"]+)\"[^\{\}]+\"productName\":\"([^\{\}\"]+)\"[^\{\}]+\"material\":\"([^\{\}\"]+)\"[^\{\}]+\"price\":\"([^\{\}\"]+)\"\}'
searchObj = re.search(regExp, clothes_info, re.I|re.M)
other_info_dict = {}
other_info_dict['brand'] = searchObj.groups()[0]
other_info_dict['productName'] = searchObj.groups()[1]
other_info_dict['material'] = searchObj.groups()[2]
other_info_dict['price'] = searchObj.groups()[3]
return other_info_dict;
if __name__ == '__main__': #编码
f = open('clothes_info_1000_utf8.txt')
all_clothes_info = f.read()
f.close()
if all_clothes_info[:3] == codecs.BOM_UTF8:
all_clothes_info = all_clothes_info[3:]
all_clothes_info = all_clothes_info.encode('gbk')
print(getClotheInfo('1', all_clothes_info))
print(getClotheInfo('20', all_clothes_info))
print(getClotheInfo('39', all_clothes_info))
'''
| 2.171875
| 2
|
day4.py
|
Camology/AdventOfCode2020
| 0
|
12776765
|
import re
input = open("inputs/day4.txt", "r")
#credit to themanush on r/adventofcode I was very confused how to nicely read this in
lines = [line.replace("\n", " ") for line in input.read().split("\n\n")]
#part1
requiredItems = ["byr","iyr","eyr","hgt","hcl","ecl","pid"]
acceptedPP = 0
for line in lines:
if all(item in line for item in requiredItems):
acceptedPP+=1
print(acceptedPP)
#part2
acceptedPP2 = 0
for line in lines:
if all(item in line for item in requiredItems):
print("Original:", line)
fields = []
birthYear = re.search("byr:\\d{4}",line)
if (int(birthYear.group()[4:]) >= 1920 and int(birthYear.group()[4:]) <= 2002):
fields.insert(0,birthYear.group())
print("birth year is good")
issueYear = re.search("iyr:\\d{4}",line)
if (int(issueYear.group()[4:]) >= 2010 and int(issueYear.group()[4:]) <= 2020):
fields.insert(0,issueYear.group())
print("issue year is good")
expYear = re.search("eyr:\\d{4}",line)
if (int(expYear.group()[4:]) >= 2020 and int(expYear.group()[4:]) <= 2030):
fields.insert(0,expYear.group())
print("exp year is good")
height = re.search("hgt:\\d{2,3}[a-z]{2}",line)
if height:
value = re.search("\\d{2,3}", height.group())
if value:
if height.group().find("cm") != -1:
if int(value.group()) >= 150 and int(value.group()) <= 193:
print("height is good")
fields.insert(0,height.group())
else:
if int(value.group()) >= 59 and int(value.group()) <= 76:
print("height is good")
fields.insert(0,height.group())
hairColor = re.search("hcl:#[a-f0-9]{6}", line)
if hairColor:
print("hair color is good")
fields.insert(0,hairColor.group())
eyeColor = re.search("ecl:(amb|blu|brn|gry|grn|hzl|oth)",line)
if eyeColor:
print("eye color is good")
fields.insert(0,eyeColor.group())
passportID = re.search("pid:\\d{9}",line)
if passportID:
print("ID is good")
fields.insert(0,passportID.group())
if len(fields) == 7:
print("Accepted:", line)
acceptedPP2+=1
print("------")
print(acceptedPP2)
| 3.78125
| 4
|
src/abaqus/StepOutput/OutputModel.py
|
Haiiliin/PyAbaqus
| 7
|
12776766
|
<filename>src/abaqus/StepOutput/OutputModel.py
import typing
from abaqusConstants import *
from .FieldOutputRequest import FieldOutputRequest
from .HistoryOutputRequest import HistoryOutputRequest
from .IntegratedOutputSection import IntegratedOutputSection
from .TimePoint import TimePoint
from ..Model.ModelBase import ModelBase
from ..Region.Region import Region
class OutputModel(ModelBase):
"""Abaqus creates a Model object named `Model-1` when a session is started.
Notes
-----
This object can be accessed by:
.. code-block:: python
mdb.models[name]
"""
def FieldOutputRequest(self, name: str, createStepName: str, region: SymbolicConstant = MODEL,
variables: SymbolicConstant = PRESELECT, frequency: SymbolicConstant = 1,
modes: SymbolicConstant = ALL,
timeInterval: typing.Union[SymbolicConstant, float] = EVERY_TIME_INCREMENT,
numIntervals: int = 20, timeMarks: Boolean = OFF, boltLoad: str = '',
sectionPoints: SymbolicConstant = DEFAULT, interactions: str = None,
rebar: SymbolicConstant = EXCLUDE, filter: SymbolicConstant = None,
directions: Boolean = ON, fasteners: str = '', assembledFastener: str = '',
assembledFastenerSet: str = '', exteriorOnly: Boolean = OFF, layupNames: str = '',
layupLocationMethod: str = SPECIFIED, outputAtPlyTop: Boolean = False,
outputAtPlyMid: Boolean = True, outputAtPlyBottom: Boolean = False,
position: SymbolicConstant = INTEGRATION_POINTS):
"""This method creates a FieldOutputRequest object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].FieldOutputRequest
Parameters
----------
name
A String specifying the repository key.
createStepName
A String specifying the name of the step in which the object is created.
region
The SymbolicConstant MODEL or a Region object specifying the region from which output is
requested. The SymbolicConstant MODEL represents the whole model. The default value is
MODEL.
variables
A sequence of Strings specifying output request variable or component names, or the
SymbolicConstant PRESELECT or ALL. PRESELECT represents all default output variables for
the given step. ALL represents all valid output variables. The default value is
PRESELECT.
frequency
The SymbolicConstant LAST_INCREMENT or an Int specifying the output frequency in
increments. The default value is 1.
modes
The SymbolicConstant ALL or a sequence of Ints specifying a list of eigenmodes for which
output is desired. The default value is ALL.
timeInterval
The SymbolicConstant EVERY_TIME_INCREMENT or a Float specifying the time interval at
which the output states are to be written. The default value is EVERY_TIME_INCREMENT.
numIntervals
An Int specifying the number of intervals during the step at which output database
states are to be written. The default value is 20.
timeMarks
A Boolean specifying when to write results to the output database. OFF indicates that
output is written immediately after the time dictated by the specified number of
intervals. ON indicates that output is written at the exact times dictated by the
specified number of intervals. The default value is OFF.
boltLoad
A String specifying a bolt load from which output is requested.
sectionPoints
The SymbolicConstant DEFAULT or a sequence of Ints specifying the section points for
which output requested. The default is DEFAULT.
interactions
None or a sequence of Strings specifying the interaction names. The default value is
None.The sequence can contain only one String.
rebar
A SymbolicConstant specifying whether output is requested for rebar. Possible values are
EXCLUDE, INCLUDE, and ONLY. The default value is EXCLUDE.
filter
The SymbolicConstant ANTIALIASING or a String specifying the name of an output filter
object. The default value is None.
directions
A Boolean specifying whether to output directions of the local material coordinate
system. The default value is ON.
fasteners
A String specifying the fastener name. The default value is an empty string.
assembledFastener
A String specifying the assembled fastener name. The default value is an empty string.
assembledFastenerSet
A String specifying the set name from the model referenced by the assembled fastener,
*assembledFastener*. The default value is an empty string.
exteriorOnly
A Boolean specifying whether the output domain is restricted to the exterior of the
model. This argument is only valid if *region*=MODEL. The default value is OFF.
layupNames
A List of Composite Layer Names.
layupLocationMethod
A Symbolic constant specifying the method used to indicate the output locations for
composite layups. Possible values are ALL_LOCATIONS, SPECIFIED and TYPED_IN. The default
value is SPECIFIED.
outputAtPlyTop
A Boolean specifying whether to output at the ply top section point. The default value
is False.
outputAtPlyMid
A Boolean specifying whether to output at the ply mid section point. The default value
is True.
outputAtPlyBottom
A Boolean specifying whether to output at the ply bottom section point. The default
value is False.
position
A SymbolicConstant specifying the position on an element where output needs to be
written. Possible values are INTEGRATION_POINTS, AVERAGED_AT_NODES, CENTROIDAL, and
NODES. The default value is INTEGRATION_POINTS.
Returns
-------
A FieldOutputRequest object.
"""
self.fieldOutputRequests[name] = FieldOutputRequest(name, createStepName, region, variables, frequency, modes,
timeInterval, numIntervals, timeMarks, boltLoad,
sectionPoints, interactions, rebar, filter, directions,
fasteners, assembledFastener, assembledFastenerSet,
exteriorOnly, layupNames, layupLocationMethod,
outputAtPlyTop, outputAtPlyMid, outputAtPlyBottom, position)
return self.fieldOutputRequests[name]
def HistoryOutputRequest(self, name: str, createStepName: str, region: SymbolicConstant = MODEL,
variables: SymbolicConstant = PRESELECT, frequency: SymbolicConstant = 1,
modes: SymbolicConstant = ALL,
timeInterval: typing.Union[SymbolicConstant, float] = EVERY_TIME_INCREMENT,
numIntervals: int = 20, boltLoad: str = '', sectionPoints: SymbolicConstant = DEFAULT,
stepName: str = '', interactions: str = None, contourIntegral: str = None,
numberOfContours: int = 0, stressInitializationStep: str = None,
contourType: SymbolicConstant = J_INTEGRAL, kFactorDirection: SymbolicConstant = MTS,
rebar: SymbolicConstant = EXCLUDE, integratedOutputSection: str = '',
springs: tuple = None, filter: SymbolicConstant = None, fasteners: str = '',
assembledFastener: str = '', assembledFastenerSet: str = '', sensor: Boolean = OFF,
useGlobal: Boolean = True):
"""This method creates a HistoryOutputRequest object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].HistoryOutputRequest
Parameters
----------
name
A String specifying the repository key.
createStepName
A String specifying the name of the step in which the object is created.
region
The SymbolicConstant MODEL or a Region object specifying the region from which output is
requested. The SymbolicConstant MODEL represents the whole model. The default value is
MODEL.If the region is a surface region, the surface must lie within the general contact
surface domain.
variables
A sequence of Strings specifying output request variable or component names, or the
SymbolicConstant PRESELECT or ALL. PRESELECT represents all default output variables for
the given step. ALL represents all valid output variables. The default value is
PRESELECT.
frequency
The SymbolicConstant LAST_INCREMENT or an Int specifying the output frequency in
increments. The default value is 1.
modes
The SymbolicConstant ALL or a sequence of Ints specifying a list of eigenmodes for which
output is desired. The default value is ALL.
timeInterval
The SymbolicConstant EVERY_TIME_INCREMENT or a Float specifying the time interval at
which the output states are to be written. The default value is EVERY_TIME_INCREMENT.
numIntervals
An Int specifying the number of intervals during the step at which output database
states are to be written. The default value is 20.
boltLoad
A String specifying a bolt load from which output is requested. The default value is an
empty string.
sectionPoints
The SymbolicConstant DEFAULT or a sequence of Ints specifying the section points for
which output is requested. The default value is DEFAULT.
stepName
A String specifying the name of the step. The default value is an empty string.
interactions
None or a sequence of Strings specifying the interaction names. The default value is
None.The sequence can contain only one String.
contourIntegral
A String specifying the contour integral name. The default value is None.
numberOfContours
An Int specifying the number of contour integrals to output for the contour integral
object. The default value is 0.
stressInitializationStep
A String specifying the name of the stress initialization step. The default value is
None.
contourType
A SymbolicConstant specifying the type of contour integral. Possible values are
J_INTEGRAL, C_INTEGRAL, T_STRESS, and K_FACTORS. The default value is J_INTEGRAL.
kFactorDirection
A SymbolicConstant specifying the stress intensity factor direction. Possible values are
MTS, MERR, and K110. The *kFactorDirection* argument is valid only if
*contourType*=K_FACTORS. The default value is MTS.
rebar
A SymbolicConstant specifying whether output is requested for rebar. Possible values are
EXCLUDE, INCLUDE, and ONLY. The default value is EXCLUDE.
integratedOutputSection
A String specifying the integrated output section. The default value is an empty string.
springs
A sequence of Strings specifying the springs/dashpots names. The default value is None.
The sequence can contain only one String.
filter
The SymbolicConstant ANTIALIASING or a String specifying the name of an output filter
object. The default value is None.
fasteners
A String specifying the fastener name. The default value is an empty string.
assembledFastener
A String specifying the assembled fastener name. The default value is an empty string.
assembledFastenerSet
A String specifying the set name from the model referenced by the assembled fastener,
*assembledFastener*. The default value is an empty string.
sensor
A Boolean specifying whether to associate the output request with a sensor definition.
The default value is OFF.
useGlobal
A Boolean specifying whether to output vector-valued nodal variables in the global
directions. The default value is True.
Returns
-------
A HistoryOutputRequest object.
"""
self.historyOutputRequests[name] = HistoryOutputRequest(name, createStepName, region, variables, frequency,
modes, timeInterval, numIntervals, boltLoad,
sectionPoints, stepName, interactions, contourIntegral,
numberOfContours, stressInitializationStep, contourType,
kFactorDirection, rebar, integratedOutputSection,
springs, filter, fasteners, assembledFastener,
assembledFastenerSet, sensor, useGlobal)
return self.historyOutputRequests[name]
def IntegratedOutputSection(self, name: str, surface: Region = Region(), refPoint: SymbolicConstant = None,
refPointAtCenter: Boolean = OFF, refPointMotion: SymbolicConstant = INDEPENDENT,
localCsys: str = None, projectOrientation: Boolean = OFF) -> IntegratedOutputSection:
"""This method creates an IntegratedOutputSection object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].IntegratedOutputSection
Parameters
----------
name
A String specifying the repository key.
surface
A Region object specifying the surface over which the output is based.
refPoint
None or a Region object specifying the anchor point about which the integrated moment
over the output region is computed or the SymbolicConstant None representing the global
origin. The default value is None.
refPointAtCenter
A Boolean specifying that the *refPoint* be adjusted so that it coincides with the
center of the output region in the initial configuration. This argument is valid only
when you include the *refPoint* argument. The default value is OFF.
refPointMotion
A SymbolicConstant specifying how to relate the motion of *refPoint* to the average
motion of the output region. A value of INDEPENDENT will allow the *refPoint* to move
independent of the output region. A value of AVERAGE_TRANSLATION will set the
displacement of the *refPoint* equal to the average translation of the output region. A
value of AVERAGE will set the displacement and rotation of the *refPoint* equal to the
average translation of the output region. The default value is INDEPENDENT.This argument
is valid only when you include the *refPoint* argument.
localCsys
None or a DatumCsys object specifying the local coordinate system used to express vector
output. If *localCsys*=None, the degrees of freedom are defined in the global coordinate
system. The default value is None.
projectOrientation
A Boolean specifying that the coordinate system be projected onto the *surface* such
that the 1–axis is normal to the *surface*. Projection onto a planar *surface* is such
that the 1-axis is normal to the surface, and a projection onto a nonplanar *surface* is
such that a least-squares fit surface will be used. The default value is OFF.
Returns
-------
An IntegratedOutputSection object.
"""
self.integratedOutputSections[name] = integratedOutputSection = IntegratedOutputSection(
name, surface, refPoint, refPointAtCenter, refPointMotion, localCsys, projectOrientation)
return integratedOutputSection
def TimePoint(self, name: str, points: tuple) -> TimePoint:
"""This method creates a TimePoint object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].TimePoint
Parameters
----------
name
A String specifying the repository key.
points
A sequence of sequences of Floats specifying time points at which data are written to
the output database or restart files.
Returns
-------
A TimePoint object.
Raises
------
InvalidNameError
RangeError
"""
self.timePoints[name] = timePoint = TimePoint(name, points)
return timePoint
| 2.359375
| 2
|
sac/sac.py
|
iarhbahsir/rl-algorithms
| 0
|
12776767
|
import random
import numpy as np
import matplotlib.pyplot as plt
from torch import tensor
from torch import cat
from torch import clamp
from torch.distributions import normal
from torch import nn
import torch.nn.functional as F
from torch import optim
from torch.utils.tensorboard import SummaryWriter
import torch
import os
print(os.environ)
import roboschool
import gym
model_name = "SAC-RoboschoolHopper-v1"
num_iterations = 3000000
learning_rate = 0.0003
discount_rate = 0.99
replay_buffer_max_size = 1000000
target_smoothing_coefficient = 0.0005
target_update_interval = 1
num_gradient_steps = 1
num_env_steps = 1
reward_scale = 5
minibatch_size = 256
writer = SummaryWriter(log_dir="./runs/v0-1mil-iter-256-node-hidden-layers-buffer-1mil")
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device = torch.device("cpu")
cpu_device = torch.device("cpu")
# define actor network
class SACRoboschoolHopperActorNN(nn.Module):
def __init__(self):
super(SACRoboschoolHopperActorNN, self).__init__()
self.fc1 = nn.Linear(15, 256)
self.fc2 = nn.Linear(256, 256)
self.mean = nn.Linear(256, 3)
self.log_stdev = nn.Linear(256, 3)
self.normal_dist = normal.Normal(0, 1)
def forward(self, x_state):
x_state = F.relu(self.fc1(x_state))
x_state = F.relu(self.fc2(x_state))
mean = self.mean(x_state)
log_stdev = self.log_stdev(x_state)
action = mean + self.normal_dist.sample(sample_shape=log_stdev.shape) * torch.exp(log_stdev)
squashed_action = torch.tanh(action)
action_dist = normal.Normal(mean, torch.exp(log_stdev))
log_prob_squashed_a = action_dist.log_prob(action) - torch.sum(torch.log(clamp(tensor(1).view(squashed_action.shape) - squashed_action**2, min=1e-8)), dim=1) # TODO check dims
return action, log_prob_squashed_a
# define critic network
class SACRoboschoolHopperCriticNN(nn.Module):
def __init__(self):
super(SACRoboschoolHopperCriticNN, self).__init__()
self.fc1 = nn.Linear(18, 100)
self.fc2 = nn.Linear(100, 100)
self.fc3 = nn.Linear(100, 3)
def forward(self, x_state, x_action):
x = cat((x_state, x_action), dim=1) # concatenate inputs along 0th dimension
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
# define soft state value network
class SACRoboschoolHopperStateValueNN(nn.Module):
def __init__(self):
super(SACRoboschoolHopperStateValueNN, self).__init__()
self.fc1 = nn.Linear(15, 100)
self.fc2 = nn.Linear(100, 100)
self.fc3 = nn.Linear(100, 1)
def forward(self, x_state):
x = F.relu(self.fc1(x_state))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
# Initialize parameter vectors ψ, ψ¯, θ, φ.
state_value_net = SACRoboschoolHopperStateValueNN().to(device)
state_value_target_net = SACRoboschoolHopperStateValueNN().to(device)
critic_net_1 = SACRoboschoolHopperCriticNN().to(device)
critic_net_2 = SACRoboschoolHopperCriticNN().to(device)
actor_net = SACRoboschoolHopperActorNN().to(device)
# make the state value target net parameters the same
state_value_target_net.load_state_dict(state_value_net.state_dict())
# initialize replay buffer D
replay_buffer = []
# initialize train and test environments
env = gym.make('RoboschoolHopper-v1')
curr_state = env.reset()
curr_state = tensor(curr_state).float().to(device)
test_env = gym.make('RoboschoolHopper-v1')
curr_test_state = test_env.reset()
greatest_avg_episode_rewards = -np.inf
# initialize optimizers for each network except target (parameters updated manually)
state_value_net_optimizer = optim.Adam(state_value_net.parameters(), lr=learning_rate)
critic_net_1_optimizer = optim.Adam(critic_net_1.parameters(), lr=learning_rate)
critic_net_2_optimizer = optim.Adam(critic_net_2.parameters(), lr=learning_rate)
actor_net_optimizer = optim.Adam(actor_net.parameters(), lr=learning_rate)
# for each iteration do
for t in range(num_iterations):
# for each environment step do
# (in practice, at most one env step per gradient step)
# at ∼ πφ(at|st)
action, log_prob = actor_net(curr_state.view(1, -1,).float()).detach().to(cpu_device).numpy().squeeze()
# action_np = action.detach().to(cpu_device).numpy().squeeze()
# st+1 ∼ p(st+1|st, at)
next_state, reward, done, _ = env.step(action)
reward = reward * reward_scale
# D ← D ∪ {(st, at, r(st, at), st+1)}
replay_buffer.append((curr_state.view(1, -1, ), tensor(action).to(device).view(1, -1, ), log_prob.to(device).view(1, -1, ),
tensor(reward).float().to(device).view(1, 1, ), tensor(next_state).to(device).view(1, -1, ),
tensor(done).to(device).view(1, 1, )))
if len(replay_buffer) > replay_buffer_max_size + 10:
replay_buffer = replay_buffer[10:]
# for each gradient step do
for gradient_step in range(num_gradient_steps):
# Sample mini-batch of N transitions (s, a, r, s') from D
transitions_minibatch = random.choices(replay_buffer, k=minibatch_size)
minibatch_states, minibatch_actions, minibatch_action_log_probs, minibatch_rewards, minibatch_next_states, minibatch_dones = [cat(mb, dim=0) for mb in zip(*transitions_minibatch)]
minibatch_states = minibatch_states.float()
# ψ ← ψ − λV ∇ˆψJV (ψ)
state_value_net.zero_grad()
# state_value_error = torch.mean(0.5 * torch.mean(state_value_net(minibatch_states) - torch.mean(torch.min(critic_net_1(minibatch_states, minibatch_actions),critic_net_2(minibatch_states, minibatch_actions)) - torch.log(actor_net(minibatch_states)))) ** 2) # TODO fix?
state_value_net_loss = torch.mean(0.5 * (state_value_net(minibatch_states) - (torch.min(critic_net_1(minibatch_states, minibatch_actions), critic_net_2(minibatch_states, minibatch_actions)) - torch.log(clamp(actor_net(minibatch_states), min=1e-8)))) ** 2) # TODO fix?
state_value_net_loss.backward()
state_value_net_optimizer.step()
writer.add_scalar('Loss/state_value_net', state_value_net_loss.detach().to(cpu_device).numpy().squeeze(), t)
# θi ← θi − λQ∇ˆθiJQ(θi) for i ∈ {1, 2}
critic_net_1.zero_grad()
critic_net_1_loss = torch.mean(0.5 * (critic_net_1(minibatch_states, minibatch_actions) - (minibatch_rewards + discount_rate*state_value_target_net(minibatch_next_states)*(-minibatch_dones.float() + 1))) ** 2)
critic_net_1_loss.backward()
critic_net_1_optimizer.step()
writer.add_scalar('Loss/critic_net_1', critic_net_1_loss.detach().to(cpu_device).numpy().squeeze(), t)
critic_net_2.zero_grad()
critic_net_2_loss = torch.mean(0.5 * (critic_net_2(minibatch_states, minibatch_actions) - (minibatch_rewards + discount_rate * state_value_target_net(minibatch_next_states)*(-minibatch_dones.float() + 1))) ** 2)
critic_net_2_loss.backward()
critic_net_2_optimizer.step()
writer.add_scalar('Loss/critic_net_2', critic_net_2_loss.detach().to(cpu_device).numpy().squeeze(), t)
# φ ← φ − λπ∇ˆφJπ(φ)
actor_net.zero_grad()
minibatch_actions_new, minibatch_action_log_probs_new = actor_net(minibatch_states)
actor_net_loss = torch.mean(minibatch_action_log_probs_new - torch.min(critic_net_1(minibatch_states, minibatch_actions_new), critic_net_2(minibatch_states, minibatch_actions_new))) # TODO fix?
actor_net_loss.backward()
actor_net_optimizer.step()
writer.add_scalar('Loss/actor_net', actor_net_loss.detach().to(cpu_device).numpy().squeeze(), t)
# print(actor_net_loss.grad_fn())
# ψ¯ ← τψ + (1 − τ )ψ¯
for state_value_target_net_parameter, state_value_net_parameter in zip(state_value_target_net.parameters(), state_value_net.parameters()):
state_value_target_net_parameter.data = target_smoothing_coefficient*state_value_net_parameter + (1 - target_smoothing_coefficient)*state_value_target_net_parameter
# end for
if t % (num_iterations // 1000) == 0 or t == num_iterations - 1:
print("iter", t)
torch.save(state_value_net.state_dict(), 'models/current/' + model_name + '-state_value_net.pkl')
torch.save(state_value_target_net.state_dict(), 'models/current/' + model_name + '-state_value_target_net.pkl')
torch.save(critic_net_1.state_dict(), 'models/current/' + model_name + '-critic_net_1.pkl')
torch.save(critic_net_2.state_dict(), 'models/current/' + model_name + '-critic_net_2.pkl')
torch.save(actor_net.state_dict(), 'models/current/' + model_name + '-actor_net.pkl')
if not done:
curr_state = tensor(next_state).float().to(device)
else:
curr_state = env.reset()
curr_state = tensor(curr_state).float().to(device)
if t % (num_iterations // 25) == 0 or t == num_iterations - 1:
render = False
num_eval_episodes = 10
test_obs = test_env.reset()
episode_rewards = []
episode_reward = 0
while len(episode_rewards) < num_eval_episodes:
test_action = actor_net(tensor(test_obs).float().to(device)).detach().to(cpu_device).numpy().squeeze()
test_obs, test_reward, test_done, _ = test_env.step(test_action)
episode_reward += test_reward
if test_done:
episode_rewards.append(episode_reward)
episode_reward = 0
test_obs = test_env.reset()
if render:
test_env.render()
avg_episode_rewards = np.mean(np.asarray(episode_rewards))
writer.add_scalar('Reward/test', avg_episode_rewards, t)
if avg_episode_rewards > greatest_avg_episode_rewards:
torch.save(actor_net.state_dict(), 'models/current/best/best-' + model_name + '-actor_net.pkl')
# end for
render = True
num_eval_episodes = 10
obs = env.reset()
episode_rewards = []
episode_reward = 0
while len(episode_rewards) < num_eval_episodes:
action = actor_net(tensor(obs).float().to(device)).detach().to(cpu_device).numpy().squeeze()
obs, reward, done, _ = env.step(action)
episode_reward += reward
if done:
episode_rewards.append(episode_reward)
episode_reward = 0
obs = env.reset()
if render:
env.render()
episode_rewards = np.asarray(episode_rewards)
episode_length_histogram = plt.hist(episode_rewards)
plt.title("Episode Rewards")
plt.xlabel("Total Reward")
plt.ylabel("Frequency")
plt.savefig("episode_rewards_hist.png")
plt.savefig("models/current/episode_rewards_hist.png")
print("Mean total episode reward:", np.mean(episode_rewards))
| 2.46875
| 2
|
manage.py
|
dgkilolo/Blog
| 0
|
12776768
|
<gh_stars>0
from app import create_app,db
from flask_script import Manager,Server
from app.models import Quotes, Writer, Posts, Comments
from flask_migrate import Migrate, MigrateCommand
# Creating app instance
app = create_app('production')
manager = Manager(app)
migrate = Migrate(app,db)
manager.add_command('server',Server)
manager.add_command('db', MigrateCommand)
@manager.command
def test():
"""Run the Unit Test"""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
@manager.shell
def make_shell_context():
return dict(app = app, db = db, Quotes=Quotes, Writer=Writer, Posts=Posts, Comments=Comments )
if __name__ == '__main__':
manager.run()
| 2.4375
| 2
|
src/devicegroup.py
|
nlitz88/ipmifan
| 0
|
12776769
|
class DeviceGroup:
def __init__(self, name):
self.name = name
self.devices = []
def addDevice(self, newDevice):
self.devices.append(newDevice)
# Just thinking through how I want the program to work.
# A diskgroup should be initialized once every time the service is started. This way it doesn't have to keep
# reading from the json file.
# If the user adds/modifies disks.json, the simple way to update the ipmifan instance would be to just restart
# the systemd service.
# Diskgroups will basically be defined in the disks.json file. A diskgroup json object will be created, and all
# of its child disks will be defined under it.
# Will need a constructor function that goes through the disks.json file to extract all disks from there groups and add them to an instance.
# OR, could define a static factory method that reads through the file and returns an array of DiskGroup objects.
# Maybe a factory method that grabs the json, and for each json diskgroup object defined, create a new diskgroup object,
# this constructor would just accept the json, and these instances would just read the serial numbers from the json objects,
# rather than using raw data structures to avoid complexity.
# Would it be practical to instead generalize this?
# I.e., make a devices.json file where all devices are defined.
# A disk group is defined with temperature thresholds, etc.
# Here's another idea:
# While I think that recording temperature data should be easy enough, it would be nice to have these endpoints made available to me in
# case I'd ever want to record them in something like prometheus. Soooo, while I could build out a little flask application that would serve up temperature data,
# and build adapters for different data providers (that work via the command line), might just be easier to set that up with netdata.
# THEN, just build out a fan controller script that uses that data to control the script.
# The only reason that this is a little scary is because it depends on a large application like netdata running. Now granted, if I'm building up my own
# shaky service, you could make that same argument, but the controller script could always just default to MAX fan values if it can't contact it's data provider.
# Maybe that's what I can do: I can build out a data provider that collects data for all of the devices that you want to keep track of. So, here's how it's laid out:
# Data Provider Microservice
# - Data provider flask application that serves up json sensor data for various devices.
# - Data provider will have no knowledge of the actual machine it's on, it'll just execute commands to get sensor data.
# - The Data provider will NOT have hard-coded cli commands to get sensor data, but will rather have various PROVIDER-ADAPTERS (json files)
# that will specificy a command to be run in order to get the type of data that they're meant for.
# - In a more advanced application, they coulld also provide meta-data about the data they're returning. These could be interpretted by the controller/receiver
# based on how they're being used.
# This way, when the controller script requests data, it will send the provider service a json request that will specify the type of provider to use.
# This endpoint will then grab the command from the corresponding provider-adapter specified in the GET request and will return the data provided by the specified command.
# - In a more (advanced) netdata-style implementation, it would also be cool to have an endpoint that enables the tracking of certain devices (that the consumer program (like ipmifan))
# data in something like a mongo database. This way, there would be more uniform access to both current and past readings.
# ------------------------------------
# OR, MOST SIMPLY: DataProviderSource object entries added to a "source.json" file. Basically, these entries would just maintain the commands needed to return the desired data.
# Then, the controller just has to hit a generic endpoint on the provider where they specify the name of the Source entry, and they get the data from the commands of that entry back.
# OR, the controller requests data (with the commands as the GET json), and then the provider returns the results. Again, not sure which is better.
# ------------------------------------
# THIS COULD ALSO BE IMPLEMENTED AS AN MQTT application where these values are published on topics. OR some data-providers could just constantly broadcast their responses over mqtt as well.
# IPMI-FAN controller Microservice
# - This service will just implement the logic outlined in my notebook regarding how to set fan speeds, but ultimately at that point will just be
# a primitive consumer of the data from the Data Provider Microservice.
# - This could just be one of many services that get their data from the provider microservice.
# - Long term, it would be nice to implement modules that define the same functions for retrieving the sensor data that they need, but just from different sources.
# - In other words, define a "DataSource" interface that is unique to this controller application that requires the classes to implement methods for retrieving
# (in the same format) hdd temps, cpu temps, ambient temps, etc., etc.
# - Based on some configurable value (maybe in a yaml file along with other options), this controller can just instantiate a data-provider according to where
# it's getting its data from.
#
# - Additionally, this program will have a devices.json file that specifies all of the different disk groups, cpus, sensors, etc. that the user wishes to monitor
# temperatures of--in this case to control fan speeds.
# device_types.json will contain various types of devices and the general form of the commands needed to extract the data from them.
# Will also include a device type "custom" that will accept custom commands in case it requires specialized commands due to some issue.]
# I may also include
# devices.json will contain actual device definitions that allow the user to specify the actual devices (and any relevant details needed).
# Upon initialization, the data provider will take the devices specified in devices.json and load them into a dictionary.
# Subsequently, it will match the serial numbers of the drives to their current /dev/ location and store that in a new dictionary field it creates.
# Some vocabulary for the system:
# device_group: A device group defines the group of devices whose temperatures are all factored in to calculate the overall temperature / state of that group.
# - This is useful, for instanace, for a group of hard drives, as you might not want to just look at the temperature of each drive all the time,
# (assuming no extreme conditions for one device) but rather just the overall state of that group.
#
# zone: A zone is what device_groups and devices are assigned to that fans control the temperature of. Fans, for instance, are assigned to a particular zone.
# The fans that are assigned to a particular zone then are controlled according to the temperatures/states of the device_groups that have been
# assigned to that zone.
# Zones exist because, while you could assign fans to multiple device groups, you don't necessarily want the desired fan speed required for one
# device group (say CPUs) to fight over the fan control.
# While I could implement some logic to just take the highest fan speed calculated across the device_groups, I think it would be cleaner to loop through
# zones and set the fans in that zone.
# Still more efficient though to calculate all of the device_groups temps/state all in a row, as if you do it by zone, in theory a device_group could
# be assigned to multiple zones, so you'd be calculating it unecessarily more than once.
| 3.53125
| 4
|
tests/test_repo/test_estacionamento_crud_repo/base.py
|
BoaVaga/boavaga_server
| 0
|
12776770
|
<reponame>BoaVaga/boavaga_server
import pathlib
import unittest
from unittest.mock import Mock
from src.container import create_container
from src.enums import UploadStatus
from src.classes import MemoryFileStream
from src.models import AdminSistema, AdminEstacio, Estacionamento, Veiculo, Upload
from src.repo import EstacionamentoCrudRepo
from tests.factories import set_session, EstacionamentoFactory, VeiculoFactory, UploadFactory
from tests.utils import make_engine, make_general_db_setup, make_savepoint, get_adm_sistema, get_adm_estacio, \
general_db_teardown, singleton_provider
_DIAS = ('segunda', 'terca', 'quarta', 'quinta', 'sexta', 'sabado', 'domingo')
class BaseTestEstacioCrudRepo(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
config_path = str(pathlib.Path(__file__).parents[3] / 'test.ini')
cls.container = create_container(config_path)
conn_string = str(cls.container.config.get('db')['conn_string'])
cls.engine = make_engine(conn_string)
@classmethod
def tearDownClass(cls) -> None:
cls.engine.dispose()
def setUp(self) -> None:
self.maxDiff = 3000
self.crypto = self.container.crypto()
self.conn, self.outer_trans, self.session = make_general_db_setup(self.engine)
set_session(self.session) # Factories
self.estacios = EstacionamentoFactory.create_batch(10, cadastro_terminado=False)
self.veiculos = VeiculoFactory.create_batch(10)
self.session.commit()
make_savepoint(self.conn, self.session)
self.adm_sis, self.adm_sis_sess = get_adm_sistema(self.crypto, self.session)
self.adm_estacio, self.adm_estacio_sess = get_adm_estacio(self.crypto, self.session)
self.adm_estacio.estacionamento = self.estacios[0]
self.adm_estacio.admin_mestre = True
self.adm_estacio_edit, self.adm_estacio_edit_sess = get_adm_estacio(self.crypto, self.session, n=9854)
self.adm_estacio_edit.estacionamento = self.estacios[1]
self.adm_estacio_edit.admin_mestre = False
self.estacios[1].cadastro_terminado = True
self.base_upload = UploadFactory(sub_dir='foto_estacio', status=UploadStatus.CONCLUIDO)
self.uploader = Mock()
self.container.uploader.override(singleton_provider(self.uploader))
self.image_processor = Mock()
self.container.image_processor.override(singleton_provider(self.image_processor))
self.ret_fstream = Mock()
self.image_processor.compress.return_value = self.ret_fstream
self.image_processor.get_default_image_format.return_value = 'png'
self.uploader.upload.return_value = self.base_upload
self.file_data = b'abc'
self.fstream = MemoryFileStream(self.file_data)
cfg = self.container.config.get('pedido_cadastro')
width, height = int(cfg['width_foto']), int(cfg['height_foto'])
self.repo = EstacionamentoCrudRepo(width, height, uploader=self.uploader, image_proc=self.image_processor)
def tearDown(self) -> None:
general_db_teardown(self.conn, self.outer_trans, self.session)
def test_setup(self):
admin_sis = self.session.query(AdminSistema).all()
admin_estacio = self.session.query(AdminEstacio).all()
estacios = self.session.query(Estacionamento).all()
veiculos = self.session.query(Veiculo).all()
self.assertEqual([self.adm_sis], admin_sis)
self.assertIn(self.adm_estacio, admin_estacio)
self.assertEqual(self.estacios, estacios)
self.assertEqual(self.veiculos, veiculos)
@staticmethod
def copy_upload(base_upload: Upload) -> Upload:
return Upload(id=int(base_upload.id), nome_arquivo=base_upload.nome_arquivo,
sub_dir=str(base_upload.sub_dir), status=UploadStatus(base_upload.status.value))
if __name__ == '__main__':
unittest.main()
| 2.03125
| 2
|
youtube_dl/downloader/external.py
|
builder07/ytdl
| 5
|
12776771
|
<gh_stars>1-10
from __future__ import unicode_literals
import os.path
import subprocess
from .common import FileDownloader
from ..utils import (
cli_option,
cli_valueless_option,
cli_bool_option,
cli_configuration_args,
encodeFilename,
encodeArgument,
)
class ExternalFD(FileDownloader):
def real_download(self, filename, info_dict):
self.report_destination(filename)
tmpfilename = self.temp_name(filename)
retval = self._call_downloader(tmpfilename, info_dict)
if retval == 0:
fsize = os.path.getsize(encodeFilename(tmpfilename))
self.to_screen('\r[%s] Downloaded %s bytes' % (self.get_basename(), fsize))
self.try_rename(tmpfilename, filename)
self._hook_progress({
'downloaded_bytes': fsize,
'total_bytes': fsize,
'filename': filename,
'status': 'finished',
})
return True
else:
self.to_stderr('\n')
self.report_error('%s exited with code %d' % (
self.get_basename(), retval))
return False
@classmethod
def get_basename(cls):
return cls.__name__[:-2].lower()
@property
def exe(self):
return self.params.get('external_downloader')
@classmethod
def supports(cls, info_dict):
return info_dict['protocol'] in ('http', 'https', 'ftp', 'ftps')
def _option(self, command_option, param):
return cli_option(self.params, command_option, param)
def _bool_option(self, command_option, param, true_value='true', false_value='false', separator=None):
return cli_bool_option(self.params, command_option, param, true_value, false_value, separator)
def _valueless_option(self, command_option, param, expected_value=True):
return cli_valueless_option(self.params, command_option, param, expected_value)
def _configuration_args(self, default=[]):
return cli_configuration_args(self.params, 'external_downloader_args', default)
def _call_downloader(self, tmpfilename, info_dict):
""" Either overwrite this or implement _make_cmd """
cmd = [encodeArgument(a) for a in self._make_cmd(tmpfilename, info_dict)]
self._debug_cmd(cmd)
p = subprocess.Popen(
cmd, stderr=subprocess.PIPE)
_, stderr = p.communicate()
if p.returncode != 0:
self.to_stderr(stderr)
return p.returncode
class CurlFD(ExternalFD):
def _make_cmd(self, tmpfilename, info_dict):
cmd = [self.exe, '--location', '-o', tmpfilename]
for key, val in info_dict['http_headers'].items():
cmd += ['--header', '%s: %s' % (key, val)]
cmd += self._option('--interface', 'source_address')
cmd += self._option('--proxy', 'proxy')
cmd += self._valueless_option('--insecure', 'nocheckcertificate')
cmd += self._configuration_args()
cmd += ['--', info_dict['url']]
return cmd
class AxelFD(ExternalFD):
def _make_cmd(self, tmpfilename, info_dict):
cmd = [self.exe, '-o', tmpfilename]
for key, val in info_dict['http_headers'].items():
cmd += ['-H', '%s: %s' % (key, val)]
cmd += self._configuration_args()
cmd += ['--', info_dict['url']]
return cmd
class WgetFD(ExternalFD):
def _make_cmd(self, tmpfilename, info_dict):
cmd = [self.exe, '-O', tmpfilename, '-nv', '--no-cookies']
for key, val in info_dict['http_headers'].items():
cmd += ['--header', '%s: %s' % (key, val)]
cmd += self._option('--bind-address', 'source_address')
cmd += self._option('--proxy', 'proxy')
cmd += self._valueless_option('--no-check-certificate', 'nocheckcertificate')
cmd += self._configuration_args()
cmd += ['--', info_dict['url']]
return cmd
class Aria2cFD(ExternalFD):
def _make_cmd(self, tmpfilename, info_dict):
cmd = [self.exe, '-c']
cmd += self._configuration_args([
'--min-split-size', '1M', '--max-connection-per-server', '4'])
dn = os.path.dirname(tmpfilename)
if dn:
cmd += ['--dir', dn]
cmd += ['--out', os.path.basename(tmpfilename)]
for key, val in info_dict['http_headers'].items():
cmd += ['--header', '%s: %s' % (key, val)]
cmd += self._option('--interface', 'source_address')
cmd += self._option('--all-proxy', 'proxy')
cmd += self._bool_option('--check-certificate', 'nocheckcertificate', 'false', 'true', '=')
cmd += ['--', info_dict['url']]
return cmd
class HttpieFD(ExternalFD):
def _make_cmd(self, tmpfilename, info_dict):
cmd = ['http', '--download', '--output', tmpfilename, info_dict['url']]
for key, val in info_dict['http_headers'].items():
cmd += ['%s:%s' % (key, val)]
return cmd
_BY_NAME = dict(
(klass.get_basename(), klass)
for name, klass in globals().items()
if name.endswith('FD') and name != 'ExternalFD'
)
def list_external_downloaders():
return sorted(_BY_NAME.keys())
def get_external_downloader(external_downloader):
""" Given the name of the executable, see whether we support the given
downloader . """
# Drop .exe extension on Windows
bn = os.path.splitext(os.path.basename(external_downloader))[0]
return _BY_NAME[bn]
| 2.34375
| 2
|
ormar/queryset/__init__.py
|
smorokin/ormar
| 0
|
12776772
|
from ormar.queryset.filter_query import FilterQuery
from ormar.queryset.limit_query import LimitQuery
from ormar.queryset.offset_query import OffsetQuery
from ormar.queryset.order_query import OrderQuery
from ormar.queryset.queryset import QuerySet
__all__ = ["QuerySet", "FilterQuery", "LimitQuery", "OffsetQuery", "OrderQuery"]
| 1.34375
| 1
|
unimport/statement.py
|
abdulniyaspm/unimport
| 1
|
12776773
|
import operator
from typing import List, NamedTuple, Union
class Import(NamedTuple):
lineno: int
column: int
name: str
package: str
def __len__(self) -> int:
return operator.length_hint(self.name.split("."))
class ImportFrom(NamedTuple):
lineno: int
column: int
name: str
package: str
star: bool
suggestions: List[str]
def __len__(self) -> int:
return operator.length_hint(self.name.split("."))
class Name(NamedTuple):
lineno: int
name: str
def match(self, imp: Union[Import, ImportFrom]) -> bool:
return (
imp.lineno < self.lineno
and ".".join(self.name.split(".")[: len(imp)]) == imp.name
)
| 3.125
| 3
|
examples/other/export_x3d.py
|
evanphilip/vedo
| 0
|
12776774
|
"""Embed a 3D scene
in a webpage with x3d"""
from vedo import dataurl, Plotter, Volume, Text3D
plt = Plotter(size=(800,600), bg='GhostWhite')
embryo = Volume(dataurl+'embryo.tif').isosurface().decimate(0.5)
coords = embryo.points()
embryo.cmap('PRGn', coords[:,1]) # add dummy colors along y
txt = Text3D(__doc__, font='Bongas', s=350, c='red2', depth=0.05)
txt.pos(2500, 300, 500)
plt.show(embryo, txt, txt.box(pad=250), axes=1, viewup='z', zoom=1.2)
# This exports the scene and generates 2 files:
# embryo.x3d and an example embryo.html to inspect in the browser
plt.export('embryo.x3d', binary=False)
print("Type: \n firefox embryo.html")
| 2.984375
| 3
|
project/apps/keller/inlines.py
|
barberscore/archive-api
| 0
|
12776775
|
<reponame>barberscore/archive-api<gh_stars>0
# Django
from django.contrib import admin
# Local
from .models import Flat
class FlatInline(admin.TabularInline):
model = Flat
fields = [
'selection',
'complete',
'score',
]
extra = 0
show_change_link = True
classes = [
'collapse',
]
raw_id_fields = [
'selection',
'complete',
'score',
]
| 1.625
| 2
|
cluster_2.py
|
plrlhb12/my_scanpy_modules
| 0
|
12776776
|
import h5py
import os
import argparse
import numpy as np
import pandas as pd
import scanpy as sc
def cluster(args):
"""
Clustering cells after computing pca and neiborhood distances.
"""
input = args.input
out = args.out
dpi = args.dpi
figsize = args.figsize
figure_type = args.figure_type
show = args.show
project = args.project if (args.project == "") else ("_" + args.project)
resolution = args.resolution
n_neighbors = args.n_neighbors
n_pcs = args.n_pcs
#method = args.method
#metric = args.metric
color_gene = args.color_gene
key_added = args.key_added
# set scanpy parameters
sc.settings.verbosity = 3 # verbosity: errors (0), warnings (1), info (2), hints (3)
# in scanpy version 1.6.1 tutorial: sc.logging.print_header()
sc.logging.print_version_and_date()
sc.logging.print_versions()
# default figsize=None, means it doesn't change the seaborn defined default parameters
sc.settings.set_figure_params(dpi=dpi, facecolor='white', figsize=figsize)
adata = sc.read_h5ad(input)
### Computing, embedding, and clustering the neighborhood graph
# defaults are: n_neighbors= 15, n_pcs=None
sc.pp.neighbors(adata, n_neighbors=n_neighbors, n_pcs=n_pcs)
sc.tl.umap(adata)
# plot umap using raw data: normalized and logarithimized but not regressed out
# sc.pl.umap(adata, color=color, save="_on_raw_"+project+"."+figure_type)
# plot umap using scaled and corrected gene expression
# sc.pl.umap(adata, color=color, use_raw=False, save="_"+project+"."+figure_type)
# cluster using leeiden graph-clustering method
# default resolution=1.0
sc.tl.leiden(adata, resolution=resolution, key_added=key_added)
sc.pl.umap(adata, color=color_gene, show=show, save="_after_leiden"+project+"."+figure_type)
adata.write(out)
def main():
parser = argparse.ArgumentParser(description="Arguments for scRNA-seq Clustering")
# basic parameters
parser.add_argument("-i", "--input", type=str, help="the path of count_after_QC.h5ad file", default="count_after_QC.h5ad")
parser.add_argument("-d", "--dpi", type=int, help="the resolution of the output figure", default=80)
parser.add_argument("-f", "--figure_type", type=str, help="the export type of plots, e.g., png, pdf, or svg", default="pdf")
parser.add_argument("-p", "--project", type=str, help="the project name", default="")
parser.add_argument("-o", "--out", type=str, help="the file name to save the anndata object", default="after_leiden.h5ad")
parser.add_argument("-s", "--figsize", type=float, nargs=2, help="the size of output figure, use 2 numbers, e.g., 2 2")
parser.add_argument("-S", "--show", type=lambda x: (str(x).lower() in ['true', "1", "yes"]), help="block output figures on the screen by providing no, false, or 0")
# umap parmeters
parser.add_argument("-n", "--n_neighbors", type=int, help="the size of local neiborhood for manifold approximation", default=15)
parser.add_argument("-P", "--n_pcs", type=int, help="the number of PCs to use", default=None)
parser.add_argument("-m", "--method", type=str, help="the method for neighborhood graph, either ‘umap’, ‘gauss’, ‘rapids’", default="umap")
parser.add_argument("-M", "--metric", type=str, help="the metric for neighborhood graph, [‘cityblock’, ‘cosine’, ‘euclidean’, ‘l1’, ‘l2’, ‘manhattan’], Literal[‘braycurtis’, ‘canberra’, ‘chebyshev’, ‘correlation’, ‘dice’, ‘hamming’, ‘jaccard’, ‘kulsinski’, ‘mahalanobis’, ‘minkowski’, ‘rogerstanimoto’, ‘russellrao’, ‘seuclidean’, ‘sokalmichener’, ‘sokalsneath’, ‘sqeuclidean’, ‘yule’],", default="euclidean")
# leiden parameters
parser.add_argument("-r", "--resolution", type=float, help="the resolution for leiden", default=1.0)
# color parameters and key names to be stored in adata
parser.add_argument("-C", "--color_gene", type=str, nargs="*", help="define a list of genes (e.g., MAP2 TEME199 TMEM106B), a key of leiden (e.g., 'leiden' or other key_added like 'leiden_0.6'), or both as color hues in umap plot", default="leiden")
# parser.add_argument("-g", "--gene_list", type=str, nargs="+", action="store", dest="list", help="define a list of genes to show in umap, e.g., MAP2 TEME199 NIL", default=['leiden'])
parser.add_argument("-k", "--key_added", type=str, help="the key name of a ledien anaysis to be addeed to anndata", default='leiden')
parser.set_defaults(func=cluster)
args = parser.parse_args()
args.func(args)
print()
print(f"The arguments are {args}")
if __name__ == "__main__":
main()
| 2.75
| 3
|
autoclass/autoargs_.py
|
erocarrera/python-autoclass
| 33
|
12776777
|
import sys
from collections import OrderedDict
from makefun import wraps
try: # python 3+
from inspect import signature, Signature
except ImportError:
from funcsigs import signature, Signature
try: # python 3.5+
from typing import Tuple, Callable, Union, Iterable
except ImportError:
pass
from decopatch import function_decorator, DECORATED
from autoclass.utils import read_fields_from_init
@function_decorator
def autoargs(include=None, # type: Union[str, Tuple[str]]
exclude=None, # type: Union[str, Tuple[str]]
f=DECORATED
):
"""
Defines a decorator with parameters, to automatically assign the inputs of a function to self PRIOR to executing
the function. In other words:
```
@autoargs
def myfunc(a):
print('hello')
```
will create the equivalent of
```
def myfunc(a):
self.a = a
print('hello')
```
Initial code from http://stackoverflow.com/questions/3652851/what-is-the-best-way-to-do-automatic-attribute-assignment-in-python-and-is-it-a#answer-3653049
:param include: a tuple of attribute names to include in the auto-assignment. If None, all arguments will be
included by default
:param exclude: a tuple of attribute names to exclude from the auto-assignment. In such case, include should be None
:return:
"""
return autoargs_decorate(f, include=include, exclude=exclude)
def autoargs_decorate(func, # type: Callable
include=None, # type: Union[str, Tuple[str]]
exclude=None # type: Union[str, Tuple[str]]
):
# type: (...) -> Callable
"""
Defines a decorator with parameters, to automatically assign the inputs of a function to self PRIOR to executing
the function. This is the inline way to apply the decorator
```
myfunc2 = autoargs_decorate(myfunc)
```
See autoargs for details.
:param func: the function to wrap
:param include: a tuple of attribute names to include in the auto-assignment. If None, all arguments will be
included by default
:param exclude: a tuple of attribute names to exclude from the auto-assignment. In such case, include should be None
:return:
"""
# retrieve and filter the names
selected_names, func_sig = read_fields_from_init(func, include=include, exclude=exclude, caller="@autoargs")
# finally create the new function (a wrapper)
return _autoargs_decorate(func, func_sig, selected_names)
def _autoargs_decorate(func, # type: Callable
func_sig, # type: Signature
att_names # type: Iterable[str]
):
"""
Creates a wrapper around the function `func` so that all attributes in `att_names` are set to `self`
BEFORE executing the function. The original function signature may be needed in some edge cases.
:param func:
:param func_sig:
:param att_names:
:return:
"""
@wraps(func)
def init_wrapper(self, *args, **kwargs):
# bind arguments with signature: not needed anymore in nominal case since we use `makefun.wraps`
# bound_values = func_sig.bind(self, *args, **kwargs)
# apply_defaults(bound_values)
# Assign to self each of the attributes
need_introspect = False
i = -1
for i, att_name in enumerate(att_names):
try:
setattr(self, att_name, kwargs[att_name])
except KeyError:
# this may happen when the att names are BEFORE a var positional
# Switch to introspection mode
need_introspect = True
break
if need_introspect and i >= 0:
bound_values = func_sig.bind(self, *args, **kwargs)
apply_defaults(bound_values)
# noinspection PyUnboundLocalVariable
arg_dict = bound_values.arguments
for att_name in att_names[i:]:
setattr(self, att_name, arg_dict[att_name])
# finally execute the constructor function
return func(self, *args, **kwargs)
# return wrapper
return init_wrapper
if sys.version_info >= (3, 0):
# the function exists, use it
def apply_defaults(bound_values):
bound_values.apply_defaults()
else:
# the `inspect` backport (`funcsigs`) does not implement the function
# TODO when funcsigs implements PR https://github.com/aliles/funcsigs/pull/30 remove this
def apply_defaults(bound_values):
arguments = bound_values.arguments
# Creating a new one and not modifying in-place for thread safety.
new_arguments = []
for name, param in bound_values._signature.parameters.items():
try:
new_arguments.append((name, arguments[name]))
except KeyError:
if param.default is not param.empty:
val = param.default
elif param.kind is param.VAR_POSITIONAL:
val = ()
elif param.kind is param.VAR_KEYWORD:
val = {}
else:
# BoundArguments was likely created by bind_partial
continue
new_arguments.append((name, val))
bound_values.arguments = OrderedDict(new_arguments)
| 2.96875
| 3
|
tornadoes_plot/plot_single_tornado.py
|
Shom770/data-science-projects
| 0
|
12776778
|
from operator import itemgetter
from shapefile import Reader
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import cartopy.io.img_tiles as cimgt
import matplotlib.pyplot as plt
from metpy.plots import USCOUNTIES
from statsmodels.nonparametric.smoothers_lowess import lowess
# Constants
EF_COLORS = {
"EFU": "#e0e0e0",
"EF0": "#01B0F1",
"EF1": "#92D14F",
"EF2": "#FFFF00",
"EF3": "#FFC000",
"EF4": "#C00000",
"EF5": "#CB00CC"
}
PADDING = 0.15
# Reading the shapefile of DAT points
shp_pts = Reader("nws_dat_damage_pnts.shp")
info_needed = [
{"storm_time": record[2], "rating": record[9], "windspeed": record[11], "lat": record[14], "lon": record[15]}
for record in shp_pts.records()
]
# Setting up CartoPy plot
stamen_terrain = cimgt.Stamen('terrain-background')
extent = (
min(info_needed, key=itemgetter("lon"))["lon"] - PADDING,
max(info_needed, key=itemgetter("lon"))["lon"] + PADDING,
min(info_needed, key=itemgetter("lat"))["lat"] - PADDING,
max(info_needed, key=itemgetter("lat"))["lat"] + PADDING
)
fig: plt.Figure = plt.figure(figsize=(12, 6))
ax: plt.Axes = fig.add_subplot(1, 1, 1, projection=stamen_terrain.crs)
ax.set_extent(extent)
ax.add_image(stamen_terrain, 10)
ax.add_feature(cfeature.LAND.with_scale("50m"))
ax.add_feature(cfeature.OCEAN.with_scale("50m"))
ax.add_feature(USCOUNTIES.with_scale("500k"))
# Plotting it onto the map
all_lons = []
all_lats = []
for info in info_needed:
all_lons.append(info["lon"])
all_lats.append(info["lat"])
ax.scatter(info["lon"], info["lat"], c=EF_COLORS[info["rating"]], marker="v", transform=ccrs.PlateCarree())
non_linear_fit = lowess(all_lats, all_lons)
ax.plot(non_linear_fit[:, 0], non_linear_fit[:, 1], transform=ccrs.PlateCarree())
plt.show()
| 2.203125
| 2
|
Core/Block_C/RC500_Factory.py
|
BernardoB95/Extrator_SPEDFiscal
| 1
|
12776779
|
<filename>Core/Block_C/RC500_Factory.py
from Core.IFactory import IFactory
from Regs.Block_C import RC500
class RC500Factory(IFactory):
def create_block_object(self, line):
self.rc500 = _rc500 = RC500()
_rc500.reg_list = line
return _rc500
| 2.21875
| 2
|
notebooks/cognitive-services/textanalytics-sentiment.py
|
kawo123/azure-databricks
| 0
|
12776780
|
# Databricks notebook source
# MAGIC %md
# MAGIC
# MAGIC # Spark integration with Azure Cognitive Services
# MAGIC
# MAGIC At Spark + AI Summit 2019, Microsoft announced a new set of models in the SparkML ecosystem that make it easy to leverage the Azure Cognitive Services at terabyte scales. With only a few lines of code, developers can embed cognitive services within your existing distributed machine learning pipelines in Spark ML. Additionally, these contributions allow Spark users to chain or Pipeline services together with deep networks, gradient boosted trees, and any SparkML model and apply these hybrid models in elastic and serverless distributed systems.
# MAGIC
# MAGIC From image recognition to object detection using speech recognition, translation, and text-to-speech, Azure Cognitive Services makes it easy for developers to add intelligent capabilities to their applications in any scenario. This notebook demostrates the integration of PySpark (using Azure Databricks) with Azure Cognitive Service [Text Analytics](https://azure.microsoft.com/en-us/services/cognitive-services/text-analytics/) to extract valuable information from text data.
# MAGIC
# MAGIC ## Prerequisites
# MAGIC
# MAGIC - Spark 2.4 environment
# MAGIC - You can use Azure Databricks for an integrated Spark environment
# MAGIC - Install required libraries in Spark
# MAGIC - [MMLSpark](https://mmlspark.blob.core.windows.net/website/index.html#install)
# MAGIC - [azure-cognitiveservices-language-textanalytics](https://pypi.org/project/azure-cognitiveservices-language-textanalytics/)
# MAGIC - Create [Azure Cognitive Services multi-service resource](https://docs.microsoft.com/en-us/azure/cognitive-services/cognitive-services-apis-create-account?tabs=multiservice%2Clinux)
# MAGIC - Import [Customers sample dataset](https://github.com/kawo123/azure-databricks/blob/master/data/customers.csv) into Spark environment
# MAGIC
# MAGIC ## References
# MAGIC
# MAGIC - [Spark and Azure Cognitive Services blog](https://azure.microsoft.com/en-us/blog/dear-spark-developers-welcome-to-azure-cognitive-services/)
# COMMAND ----------
from azure.cognitiveservices.language.textanalytics import TextAnalyticsClient
from msrest.authentication import CognitiveServicesCredentials
from mmlspark.cognitive import TextSentiment
from pyspark.sql.functions import col
# COMMAND ----------
# Obtain Azure Text Analytics endpoint and key. Replace <<TODO>> below with your endpoint and key
textanalytics_endpoint = '<<TODO>>' # TODO
textanalytics_key = '<<TODO>>' # TODO
# Initialize Azure Text Analytics client
client = TextAnalyticsClient(textanalytics_endpoint, CognitiveServicesCredentials(textanalytics_key))
# COMMAND ----------
# Create sample text documents for analysis
docs = [
{ 'id': '1', 'language': 'en', 'text': 'This is awesome!' },
{ 'id': '2', 'language': 'en', 'text': 'This was a waste of my time. The speaker put me to sleep.' },
{ 'id': '3', 'language': 'en', 'text': None },
{ 'id': '4', 'language': 'en', 'text': 'Hello World' }
]
# Submit text documents for sentiment analysis
resp = client.sentiment(documents=docs)
# Print sentiment analysis results
for document in resp.documents:
print("Document Id: ", document.id, ", Sentiment Score: ", "{:.2f}".format(document.score))
# COMMAND ----------
# MAGIC %md
# MAGIC You should observe output similar to below
# MAGIC
# MAGIC ```
# MAGIC Document Id: 1 , Sentiment Score: 1.00
# MAGIC Document Id: 2 , Sentiment Score: 0.11
# MAGIC Document Id: 4 , Sentiment Score: 0.76
# MAGIC ```
# COMMAND ----------
# Read customers csv
df_customers = spark\
.read\
.option('header', True)\
.option('inferSchema', True)\
.csv('/FileStore/tables/customers.csv')
df_customers.show(2)
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC You should see a table with the following columns: `Customer grouping`, `Product ID`, `State`, `Customer category`, `Product market price`, `total market price`, `Notes`, `store comments`, `Customer Address`, `Gender`, `Discount`, `Date`, `Quantity`, `Discount_discrete`
# COMMAND ----------
# Define Sentiment Analysis pipeline
pipe_text_sentiment = (TextSentiment()
.setSubscriptionKey(textanalytics_key)
.setLocation('eastus')
.setLanguage('en')
.setTextCol('store comments')
.setOutputCol("StoreCommentSentimentObj")
.setErrorCol("Errors")
.setConcurrency(10)
)
# Process df_customers with the Sentiment Analysis pipeline
df_customers_sentiment = pipe_text_sentiment.transform(df_customers)
df_customers_sentiment.show(2)
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC After the customer dataframe is processed by the sentiment analysis pipeline, you should see 2 additional columns in the table: `Errors` and `StoreCommentSentimentObj`. The `Errors` column contains any error message that the text analytics pipeline encounter. The `StoreCommentSentimentObj` column contains the an array of sentiment objects returned by the Text Analytics service. The sentiment object includes sentiment score and any error messages that the Text Analytics engine encounters.
# COMMAND ----------
# Extract sentiment score from store comment sentiment complex objects
df_customers_sentiment_numeric = (df_customers_sentiment
.select('*', col('StoreCommentSentimentObj').getItem(0).getItem('score').alias('StoreCommentSentimentScore'))
.drop('Errors', 'StoreCommentSentimentObj')
)
df_customers_sentiment_numeric.show(2)
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC You should now see a new column `StoreCommentSentimentScore` which contains the numeric sentiment scores of store comments
| 2.453125
| 2
|
tasks/dataflow/dataset/create.py
|
drakesvoboda/ProGraML
| 71
|
12776781
|
<gh_stars>10-100
# Copyright 2019-2020 the ProGraML authors.
#
# Contact <NAME> <<EMAIL>>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Export LLVM-IR from legacy database."""
import codecs
import itertools
import multiprocessing
import os
import pathlib
import pickle
import random
import shutil
import subprocess
from absl import flags, logging
from MySQLdb import _mysql
from programl.ir.llvm.py import llvm
from programl.proto import ir_pb2
from programl.util.py import pbutil, progress
from programl.util.py.init_app import init_app
from programl.util.py.runfiles_path import runfiles_path
from tasks.dataflow.dataset import pathflag
from tasks.dataflow.dataset.encode_inst2vec import Inst2vecEncodeGraphs
flags.DEFINE_string(
"classifyapp",
str(pathlib.Path("~/programl/classifyapp").expanduser()),
"Path of the classifyapp database.",
)
flags.DEFINE_string("host", None, "The database to export from.")
flags.DEFINE_string("user", None, "The database to export from.")
flags.DEFINE_string("pwd", None, "The database to export from.")
flags.DEFINE_string("db", None, "The database to export from.")
FLAGS = flags.FLAGS
CREATE_LABELS = runfiles_path("programl/tasks/dataflow/dataset/create_labels")
CREATE_VOCAB = runfiles_path("programl/tasks/dataflow/dataset/create_vocab")
UNPACK_IR_LISTS = runfiles_path("programl/tasks/dataflow/dataset/unpack_ir_lists")
def _ProcessRow(output_directory, row, file_id) -> None:
source, src_lang, ir_type, binary_ir = row
# Decode database row.
source = source.decode("utf-8")
src_lang = {
"C": "c",
"CPP": "cc",
"OPENCL": "cl",
"SWIFT": "swift",
"HASKELL": "hs",
"FORTRAN": "f90",
}[src_lang.decode("utf-8")]
ir_type = ir_type.decode("utf-8")
if source.startswith("sqlite:///"):
source = "github"
else:
source = {
"github.com/av-maramzin/SNU_NPB:NPB3.3-SER-C": "npb-3_3-ser-c",
"pact17_opencl_devmap": "opencl",
"linux-4.19": "linux-4_19",
"opencv-3.4.0": "opencv-3_4_0",
}.get(source, source)
# Output file paths.
name = f"{source}.{file_id}.{src_lang}"
ir_path = output_directory / f"ir/{name}.ll"
ir_message_path = output_directory / f"ir/{name}.Ir.pb"
# Check that the files to be generated do not already exist.
# This is a defensive measure against accidentally overwriting files during
# an export. A side effect of this is that partial exports are not supported.
assert not ir_path.is_file()
assert not ir_message_path.is_file()
ir = pickle.loads(codecs.decode(binary_ir, "zlib"))
# Write the text IR to file.
with open(ir_path, "w") as f:
f.write(ir)
compiler_version = {
"LLVM_6_0": 600,
"LLVM_3_5": 350,
}[ir_type]
ir_message = ir_pb2.Ir(
type=ir_pb2.Ir.LLVM, compiler_version=compiler_version, text=ir
)
pbutil.ToFile(ir_message, ir_message_path)
# Convert to ProgramGraph.
try:
graph = llvm.BuildProgramGraph(ir)
pbutil.ToFile(graph, output_directory / f"graphs/{name}.ProgramGraph.pb")
# Put into train/val/test bin.
r = random.random()
if r < 0.6:
dst = "train"
elif r < 0.8:
dst = "val"
else:
dst = "test"
os.symlink(
f"../graphs/{name}.ProgramGraph.pb",
output_directory / dst / f"{name}.ProgramGraph.pb",
)
except (ValueError, OSError, TimeoutError, AssertionError) as e:
pass
def _ProcessRows(job) -> int:
output_directory, rows = job
for (row, i) in rows:
_ProcessRow(output_directory, row, i)
return len(rows)
def chunkify(iterable, chunk_size: int):
"""Split an iterable into chunks of a given size.
Args:
iterable: The iterable to split into chunks.
chunk_size: The size of the chunks to return.
Returns:
An iterator over chunks of the input iterable.
"""
i = iter(iterable)
piece = list(itertools.islice(i, chunk_size))
while piece:
yield piece
piece = list(itertools.islice(i, chunk_size))
class ImportIrDatabase(progress.Progress):
"""Export non-POJ104 IRs from MySQL database.
The code which populated this MySQL database is the legacy
//deeplearning/ml4pl codebase.
"""
def __init__(self, path: pathlib.Path, db):
self.path = path
db = _mysql.connect(
host=FLAGS.host, user=FLAGS.user, passwd=FLAGS.<PASSWORD>, db=FLAGS.db
)
db.query(
"""
SELECT COUNT(*) FROM intermediate_representation
WHERE compilation_succeeded=1
AND source NOT LIKE 'poj-104:%'
"""
)
n = int(db.store_result().fetch_row()[0][0].decode("utf-8"))
super(ImportIrDatabase, self).__init__("ir db", i=0, n=n, unit="irs")
def Run(self):
with multiprocessing.Pool() as pool:
# A counter used to produce a unique ID number for each exported file.
n = 0
# Run many smaller queries rather than one big query since MySQL
# connections will die if hanging around for too long.
batch_size = 512
job_size = 16
for j in range(0, self.ctx.n, batch_size):
db = _mysql.connect(
host=FLAGS.host, user=FLAGS.user, passwd=FLAGS.<PASSWORD>, db=FLAGS.db
)
db.query(
f"""\
SELECT
source,
source_language,
type,
binary_ir
FROM intermediate_representation
WHERE compilation_succeeded=1
AND source NOT LIKE 'poj-104:%'
LIMIT {batch_size}
OFFSET {j}
"""
)
results = db.store_result()
rows = [
(item, i)
for i, item in enumerate(results.fetch_row(maxrows=0), start=n)
]
# Update the exported file counter.
n += len(rows)
jobs = [(self.path, chunk) for chunk in chunkify(rows, job_size)]
for exported_count in pool.imap_unordered(_ProcessRows, jobs):
self.ctx.i += exported_count
self.ctx.i = self.ctx.n
class CopyPoj104Dir(progress.Progress):
"""Copy all files from a directory in the classifyapp dataset."""
def __init__(self, outpath, classifyapp, dirname):
self.outpath = outpath
self.paths = list((classifyapp / dirname).iterdir())
self.dirname = dirname
super(CopyPoj104Dir, self).__init__(i=0, n=len(self.paths), name=dirname)
def Run(self):
for self.ctx.i, path in enumerate(self.paths):
dst = self.outpath / self.dirname / f"poj104_{path.name}"
if not dst.is_file():
shutil.copy(path, dst)
class CopyPoj104Symlinks(progress.Progress):
"""Recreate train/val/test symlinks from the classifyapp dataset."""
def __init__(self, outpath, classifyapp, typename):
self.outpath = outpath
self.paths = list((classifyapp / typename).iterdir())
self.typename = typename
super(CopyPoj104Symlinks, self).__init__(i=0, n=len(self.paths), name=typename)
def Run(self):
for self.ctx.i, path in enumerate(self.paths):
dst = self.outpath / self.typename / f"poj104_{path.name}"
if not dst.is_symlink():
os.symlink(f"../graphs/poj104_{path.name}", dst)
def ImportClassifyAppDataset(classifyapp: pathlib.Path, path: pathlib.Path):
logging.info("Copying files from classifyapp dataset")
progress.Run(CopyPoj104Dir(path, classifyapp, "ir"))
progress.Run(CopyPoj104Dir(path, classifyapp, "graphs"))
progress.Run(CopyPoj104Symlinks(path, classifyapp, "train"))
progress.Run(CopyPoj104Symlinks(path, classifyapp, "val"))
progress.Run(CopyPoj104Symlinks(path, classifyapp, "test"))
# classifyapp uses IrList protocol buffers to store multiple IRs in a single
# file, whereas for dataflow we require one IR per file. Unpack the IrLists
# and delete them.
logging.info("Unpacking IrList protos")
subprocess.check_call([str(UNPACK_IR_LISTS), "--path", str(path)])
def main(argv):
init_app(argv)
path = pathlib.Path(pathflag.path())
db = _mysql.connect(host=FLAGS.host, user=FLAGS.user, passwd=FLAGS.<PASSWORD>, db=FLAGS.db)
# First create the output directories. Fail if they already exist.
(path / "ir").mkdir(parents=True)
(path / "graphs").mkdir()
(path / "train").mkdir()
(path / "val").mkdir()
(path / "test").mkdir()
# Export the legacy IR database.
export = ImportIrDatabase(path, db)
progress.Run(export)
# Import the classifyapp dataset.
ImportClassifyAppDataset(pathlib.Path(FLAGS.classifyapp), path)
# Add inst2vec encoding features to graphs.
logging.info("Encoding graphs with inst2vec")
progress.Run(Inst2vecEncodeGraphs(path))
logging.info("Creating vocabularies")
subprocess.check_call([str(CREATE_VOCAB), "--path", str(path)])
logging.info("Creating data flow analysis labels")
subprocess.check_call([str(CREATE_LABELS), str(path)])
if __name__ == "__main__":
app.run(main)
| 1.6875
| 2
|
betdaq/endpoints/betting.py
|
jackrhunt13/betdaq
| 13
|
12776782
|
<filename>betdaq/endpoints/betting.py<gh_stars>10-100
import datetime
from betdaq.enums import Boolean
from betdaq.utils import clean_locals, listy_mc_list
from betdaq.endpoints.baseendpoint import BaseEndpoint
from betdaq.errorparsers.betting import err_cancel_market, err_suspend_orders
from betdaq.resources.bettingresources import (
parse_orders, parse_single_order, parse_orders_receipt, parse_order_update,
parse_cancelled_order, parse_suspended_order,
)
class Betting(BaseEndpoint):
def get_orders(self, SequenceNumber=-1, wantSettledOrdersOnUnsettledMarkets=Boolean.T.value):
"""
Get the initial list of orders that's need to be taken into consideration when establishing positions.
Information about the following orders will be returned:
• active orders
• fully matched orders
• cancelled orders that have a matched portion
• suspended orders
• some settled or voided orders under some conditions
:param SequenceNumber: lower bound cutoff for sequence updates to include,
-1 will set to earliest possible sequence.
:type SequenceNumber: int
:param wantSettledOrdersOnUnsettledMarkets: Flag indicating whether or not information about settled orders
on unsettled markets should be returned.
:type wantSettledOrdersOnUnsettledMarkets: bool
:return: orders that have changed.
"""
params = clean_locals(locals())
date_time_sent = datetime.datetime.utcnow()
response = self.request('ListBootstrapOrders', params, secure=True)
data = self.process_response(response, date_time_sent, 'Orders')
return [parse_orders(order) for order in data.get('data', {}).get('Order', [])] if data.get('data') else []
def get_orders_diff(self, SequenceNumber):
"""
Get a list of orders for the logged in user that have changed since a given sequence number.
Utilised to maintain position information after initial position is established with list_orders.
:param SequenceNumber: lower bound cutoff for sequence updates to include.
:type SequenceNumber: int
:return: orders that have changed.
"""
params = clean_locals(locals())
date_time_sent = datetime.datetime.utcnow()
response = self.request('ListOrdersChangedSince', params, secure=True)
data = self.process_response(response, date_time_sent, 'Orders')
return [parse_orders(order) for order in data.get('data', {}).get('Order', [])] if data.get('data') else []
def get_single_order(self, OrderId):
"""
Get full detail and history about an individual order.
:param order_id: id of the order we wish to get history for.
:type order_id: int
:return: single orders history and current status.
"""
params = clean_locals(locals())
date_time_sent = datetime.datetime.utcnow()
response = self.request('GetOrderDetails', params, secure=True)
data = self.process_response(response, date_time_sent, None)
return listy_mc_list(parse_single_order(data.get('data', {})) if data.get('data') else {})
def place_orders(self, order_list, WantAllOrNothingBehaviour=Boolean.T.value, receipt=True):
"""
Places one or more orders at exchange.
Receipt determines whether to wait for complete matching cycle or just return the order ID.
:param order_list: List of orders to be sent to exchange
:type order_list: list of betdaq_py.filters.create_order
:param WantAllOrNothingBehaviour: defines whether to kill all orders on any error or place orders independently.
:type WantAllOrNothingBehaviour: betdaq_py.enums.Boolean
:param receipt: whether to wait for matching cycle and return full info of order or not.
:type receipt: bool
:return: the order ID or full order information depending on receipt.
"""
date_time_sent = datetime.datetime.utcnow()
if receipt:
method = 'PlaceOrdersWithReceipt'
params = self.client.secure_types['%sRequest' % method](Orders={'Order': order_list})
else:
method = 'PlaceOrdersNoReceipt'
params = self.client.secure_types['%sRequest' % method](
WantAllOrNothingBehaviour=WantAllOrNothingBehaviour, Orders={'Order': order_list}
)
response = self.request(method, params, secure=True)
data = self.process_response(response, date_time_sent, 'Orders')
return [
parse_orders_receipt(order) for order in data.get('data', {}).get('Order', [])
] if data.get('data') else []
def update_orders(self, order_list):
"""
Update orders on exchange
:param order_list: list of order updates to be sent to exchange.
:type order_list: list of betdaq_py.filters.update_order
:return: BetID and status of update.
"""
params = self.client.secure_types['UpdateOrdersNoReceiptRequest'](Orders={'Order': order_list})
date_time_sent = datetime.datetime.utcnow()
response = self.request('UpdateOrdersNoReceipt', params, secure=True)
data = self.process_response(response, date_time_sent, 'Orders')
return [
parse_order_update(update) for update in data.get('data', {}).get('Order', [])
] if data.get('data') else []
def cancel_orders(self, order_ids):
"""
Cancel one or more orders on exchange
:param order_ids: list of order ids to be cancelled.
:type order_ids: list of ints
:return: information on the cancellation status of each order.
"""
params = self.client.secure_types['CancelOrdersRequest'](
_value_1=[{'OrderHandle': order_id} for order_id in order_ids]
)
date_time_sent = datetime.datetime.utcnow()
response = self.request('CancelOrders', params, secure=True)
data = self.process_response(response, date_time_sent, 'Orders')
return [
parse_cancelled_order(cancel) for cancel in data.get('data', {}).get('Order', [])
] if data.get('data') else []
def cancel_orders_by_market(self, market_ids):
"""
Cancel all orders on one or more markets on exchange
:param market_ids: list of market ids to be cancelled.
:type market_ids: list of ints
:return: information on the cancellation status of each order.
"""
params = self.client.secure_types['CancelAllOrdersOnMarketRequest'](
_value_1=[{'MarketIds': market} for market in market_ids]
)
date_time_sent = datetime.datetime.utcnow()
response = self.request('CancelAllOrdersOnMarket', params, secure=True)
data = self.process_response(response, date_time_sent, 'Order', error_handler=err_cancel_market)
return [parse_cancelled_order(cancel) for cancel in data.get('data', [])] if data.get('data') else []
def cancel_all_orders(self):
"""
Cancels all unmatched orders across all markets.
:return: information on the cancellation status of each order.
"""
date_time_sent = datetime.datetime.utcnow()
response = self.request('CancelAllOrders', {}, secure=True)
data = self.process_response(response, date_time_sent, 'Orders')
return [
parse_cancelled_order(cancel) for cancel in data.get('data', {}).get('Order', [])
] if data.get('data') else []
def suspend_orders(self, order_ids):
"""
Suspend one or more orders on exchange
:param order_ids: list of order ids to be suspended.
:type order_ids: list of ints
:return: information on the suspension status of each order.
"""
params = self.client.secure_types['SuspendOrdersRequest'](
_value_1=[{'OrderIds': order_id} for order_id in order_ids]
)
date_time_sent = datetime.datetime.utcnow()
response = self.request('SuspendOrders', params, secure=True)
data = self.process_response(response, date_time_sent, 'Orders', error_handler=err_suspend_orders)
return [parse_suspended_order(suspend) for suspend in data.get('data', [])] if data.get('data') else []
def suspend_orders_by_market(self, MarketId):
"""
Suspend all orders on a given market.
:param MarketIds: market id to be suspend orders on.
:type MarketIds: ints
:return: information on the suspension status of each order.
"""
params = clean_locals(locals())
date_time_sent = datetime.datetime.utcnow()
response = self.request('SuspendAllOrdersOnMarket', params, secure=True)
data = self.process_response(response, date_time_sent, 'Orders', error_handler=err_suspend_orders)
return [parse_suspended_order(suspend) for suspend in data.get('data', [])] if data.get('data') else []
def suspend_all_orders(self):
"""
Suspend all orders across all markets.
:return: information on the suspension status of each order.
"""
date_time_sent = datetime.datetime.utcnow()
response = self.request('SuspendAllOrders', {}, secure=True)
data = self.process_response(response, date_time_sent, 'Orders', error_handler=err_suspend_orders)
return [parse_suspended_order(suspend) for suspend in data.get('data', [])] if data.get('data') else []
def unsuspend_orders(self, order_ids):
"""
Unsuspends one or more suspended orders
:param order_ids: list of order ids to unsuspend.
:type order_ids: list
:return:
"""
params = self.client.secure_types['UnsuspendOrdersRequest'](
_value_1=[{'OrderIds': order_id} for order_id in order_ids]
)
date_time_sent = datetime.datetime.utcnow()
response = self.request('UnsuspendOrders', params, secure=True)
data = self.process_response(response, date_time_sent, None)
return []
| 2.453125
| 2
|
analysis/paper_plots.py
|
Achilleas/aqua-py-analysis
| 0
|
12776783
|
import h5py
import os, sys, glob
import numpy as np
import plotly.offline as offline
from preprocessing import analysis_pp
from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils
from scipy.stats.stats import power_divergence
from scipy.stats import ttest_ind_from_stats
import csv
import scipy.signal as ss
import math
import time
from pandas import DataFrame
from scipy import optimize
import pandas as pd
import matplotlib.pyplot as plt
from collections import deque
import powerlaw
import pylab
from matplotlib.font_manager import FontProperties
from matplotlib import rc
from scipy import stats
from scipy.stats import skewnorm
import plotly.graph_objs as go
def generate_astro_single_plots(astro_plotter, astroA, output_folder):
output_experiment_path = astro_plotter.get_output_experiment_path(astroA, output_folder)
print('Plotting behaviours basic...')
#Behaviour basic
figs_basic_plots = astro_plotter.get_behaviour_basic_plots(astroA)
for fig_k in figs_basic_plots.keys():
saving_utils.save_plotly_fig(figs_basic_plots[fig_k], os.path.join(output_experiment_path, 'plots', 'behaviours_basic', '{}'.format(fig_k)), width=1000, height=400)
print('Plotting behaviour heatmaps...')
#Behaviour heatmaps
fig_heatmap_grids, fig_heatmap_dff_grids = astro_plotter.get_behaviour_contour_plots(astroA)
heatmap_grid_base_path = os.path.join(output_experiment_path, 'plots', 'behaviour_heatmaps')
for k in fig_heatmap_grids.keys():
saving_utils.save_plotly_fig(fig_heatmap_grids[k], os.path.join(heatmap_grid_base_path, k))
saving_utils.save_plotly_fig(fig_heatmap_dff_grids[k], os.path.join(heatmap_grid_base_path, k + 'dff'))
print('Plotting behaviour heatmaps (saturation)...')
fig_heatmap_grids, fig_heatmap_dff_grids = astro_plotter.get_behaviour_contour_threshold_plots(astroA, threshold=0.5)
heatmap_grid_base_path = os.path.join(output_experiment_path, 'plots', 'behaviour_heatmaps_saturation')
for k in fig_heatmap_grids.keys():
saving_utils.save_plotly_fig(fig_heatmap_grids[k], os.path.join(heatmap_grid_base_path, k))
saving_utils.save_plotly_fig(fig_heatmap_dff_grids[k], os.path.join(heatmap_grid_base_path, k + 'dff'))
print('Plotting borders...')
#Borders plot
fig_border = astro_plotter.get_border_plot(astroA)
saving_utils.save_plotly_fig(fig_border, os.path.join(output_experiment_path, 'plots' , 'borders', 'border'))
print('Plotting behaviour activity bar plot...')
behaviour_activity_path = os.path.join(output_experiment_path, 'plots', 'behaviour_activity', 'activity')
fig_behaviour_activity = astro_plotter.get_behaviour_activity_plot(astroA)
saving_utils.save_plotly_fig(fig_behaviour_activity, behaviour_activity_path, width=1200, height=800)
print('Plotting behaviour event size bar plot...')
behaviour_area_path = os.path.join(output_experiment_path, 'plots', 'behaviour_areas', 'areas')
fig_behaviour_area = astro_plotter.get_behaviour_area_plot(astroA)
saving_utils.save_plotly_fig(fig_behaviour_area, behaviour_area_path)
print('Plotting behaviour amplitude size bar plot...')
behaviour_amplitude_path = os.path.join(output_experiment_path, 'plots', 'signal_amplitudes', 'amplitudes')
fig_behaviour_amplitude = astro_plotter.get_behaviour_amplitude_bar_plot(astroA)
saving_utils.save_plotly_fig(fig_behaviour_amplitude, behaviour_amplitude_path)
def generate_astro_comparison_plots(astro_plotter, astroA_l, output_folder, name_tag, astroA_l_pairs=None, astroA_long_l=None, n_chunks=3):
output_experiment_path_all_comparison, _, _, astroA_l_s = astro_plotter.setup_comparison_all_vars(astroA_l, os.path.join(output_folder, name_tag))
print('Plotting sizes histogram dataset comparison for each behaviour')
bh_l = ['rest', 'stick_rest', 'running', 'stick_run_ind_15']
astroA_l_filt = []
bh_l_test = ['rest', 'running', 'stick_run_ind_15', 'stick_rest']
for astroA in astroA_l:
include = True
for bh in bh_l_test:
if bh not in astroA.indices_d.keys() or bh not in astroA.activity_ratios.keys():
include = False
print(':(', astroA.print_id, bh)
if include:
astroA_l_filt.append(astroA)
day_0_1_pairs = []
if astroA_l_pairs is not None:
for astroA_l_pair in astroA_l_pairs:
if astroA_l_pair[1].day == 1:
day_0_1_pairs.append(astroA_l_pair)
print('Comparing behaviour distribution plots...')
configs = [
{'measure': 'area', 'range': [None, 60], 'nbins' : 20, 'bh_l' : ['rest', 'stick_rest', 'running', 'stick_run_ind_15'], 'mode' : 'MOE'},
{'measure': 'dffMax2', 'range': [0.6, 5], 'nbins' : 20, 'bh_l' : ['rest', 'stick_rest', 'running', 'stick_run_ind_15'], 'mode' : 'MOE'},
{'measure': 'duration', 'range' : [None, 30], 'nbins' : 10, 'bh_l' : ['rest', 'stick_rest', 'running', 'stick_run_ind_15'], 'mode' : 'MOA'}
]
for config in configs:
behaviour_l = config['bh_l']
measure = config['measure']
min_measure, max_measure = config['range']
mode = config['mode']
n_bins = config['nbins']
confidence = True
try:
measure_name = aqua_utils.get_measure_names(measure)
path = os.path.join(output_experiment_path_all_comparison, 'plots', '{}_histogram_bh_comparison'.format(measure_name), 'behaviours-{}-nbins={}-min={}-max={}-conf={}-mode={}'.format('_'.join(behaviour_l), n_bins, min_measure, max_measure, confidence, mode))
plot, stats_d = astro_plotter.measure_distribution_bh_compare_plot(astroA_l, behaviour_l, measure=measure, num_bins=n_bins, min_measure=min_measure, max_measure=max_measure, measure_name=measure_name, confidence=confidence, with_stats=True, mode=mode)
if measure == 'duration':
plotly_utils.apply_fun_axis_fig(plot, lambda x : x / astroA_l[0].fr, axis='x')
if measure == 'area':
saving_utils.save_pth_plt_l_log([plot], [path], axis='x')
else:
saving_utils.save_plotly_fig(plot, path)
#saving_utils.save_pth_plt_l_log([plot], [path], axis='y')
#Save results in text file
for i, name in enumerate(stats_d['names']):
#Create folder
data_folder_path = path
try:
os.makedirs(path)
except:
pass
temp_d = {k : stats_d[k][i] for k in stats_d.keys()}
saving_utils.save_csv_dict(temp_d, os.path.join(data_folder_path, '{}.csv'.format(name)), key_order=['names', 'x', 'mean', 'conf_95', 'std'])
np.savetxt(os.path.join(data_folder_path, '{}-data.csv'.format(name)), np.array(temp_d['data']).transpose(), delimiter=",")
except Exception as e:
print('Exception: {}'.format(e))
#------------------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------
delay_ranges_pairs = [[3*astroA_l[0].fr, 6*astroA_l[0].fr], [2*astroA_l[0].fr, 4*astroA_l[0].fr]]
delay_ranges_pairs = [[int(v[0]), int(v[1])] for v in delay_ranges_pairs]
before_range_3, after_range_6 = delay_ranges_pairs[0]
before_range_2, after_range_4 = delay_ranges_pairs[1]
print('Alt Proportion plots...')
# Rest to run plots
rest_to_run_setting = {
'before_bh':'rest_semi_exact',
'inds_bh':'running_exact_start',
'after_bh':'running_semi_exact',
'before_range' : before_range_3,
'after_range' : after_range_6,
'fit': False,
'delay_step_size': 10,
'confidence': True}
# Rest to run - PROPORTIONS
__save_astro_transition_plot(astro_plotter, astroA_l, setting=rest_to_run_setting, plot_type='proportions',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'rest_to_run_proportions'))
__save_astro_transition_plot(astro_plotter, astroA_l, setting=rest_to_run_setting, plot_type='measure', measure='dffMax2default',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'rest_to_run_amplitudes'))
__save_astro_transition_plot(astro_plotter, astroA_l, setting=rest_to_run_setting, plot_type='measure', measure='time_s',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'rest_to_run_durations'))
__save_astro_transition_plot(astro_plotter, astroA_l, setting=rest_to_run_setting, plot_type='measure', measure='area',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'rest_to_run_sizes'))
rest_to_run_setting['delay_step_size'] = 5
__save_astro_transition_plot(astro_plotter, astroA_l, setting=rest_to_run_setting, plot_type='behaviour', bh_measure='speed',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'rest_to_run_speed'))
# Run to rest plots
run_to_rest_setting = {
'before_bh':'running_semi_exact',
'inds_bh':'rest_start',
'after_bh':'rest_semi_exact',
'before_range' : before_range_3,
'after_range' : after_range_6,
'fit': False,
'delay_step_size': 10,
'confidence': True}
__save_astro_transition_plot(astro_plotter, astroA_l, setting=run_to_rest_setting, plot_type='proportions',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'run_to_rest_proportions'))
__save_astro_transition_plot(astro_plotter, astroA_l, setting=run_to_rest_setting, plot_type='measure', measure='dffMax2default',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'run_to_rest_amplitudes'))
__save_astro_transition_plot(astro_plotter, astroA_l, setting=run_to_rest_setting, plot_type='measure', measure='time_s',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'run_to_rest_durations'))
__save_astro_transition_plot(astro_plotter, astroA_l, setting=run_to_rest_setting, plot_type='measure', measure='area',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'run_to_rest_sizes'))
run_to_rest_setting['delay_step_size'] = 5
__save_astro_transition_plot(astro_plotter, astroA_l, setting=run_to_rest_setting, plot_type='behaviour', bh_measure='speed',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'run_to_rest_speed'))
# Run-stick-run plots
run_stick_run_setting = {
'before_bh':'running_semi_exact',
'inds_bh':'stick_exact_start',
'after_bh':'running_semi_exact',
'before_range' : before_range_2,
'after_range' : after_range_4,
'fit': False,
'delay_step_size': 10,
'confidence': True}
__save_astro_transition_plot(astro_plotter, astroA_l, setting=run_stick_run_setting, plot_type='proportions',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'run_stick_run_proportions'))
__save_astro_transition_plot(astro_plotter, astroA_l, setting=run_stick_run_setting, plot_type='measure', measure='dffMax2default',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'run_stick_run_amplitudes'))
__save_astro_transition_plot(astro_plotter, astroA_l, setting=run_stick_run_setting, plot_type='measure', measure='time_s',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'run_stick_run_durations'))
__save_astro_transition_plot(astro_plotter, astroA_l, setting=run_stick_run_setting, plot_type='measure', measure='area',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'run_stick_run_sizes'))
__save_astro_transition_plot(astro_plotter, astroA_l, setting=run_stick_run_setting, plot_type='behaviour', bh_measure='speed',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'run_stick_run_speed'))
#------------------------------------------------------------------------------------------------------------------
print('--------------------------------------------------------------------------------------------------')
print('Distribution of pixel values real vs fake...')
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'pixel_distribution')
x_l = []
y_l = []
name_l = [astroA.print_id for astroA in astroA_l]
for astroA in astroA_l:
grid = astroA.event_grids_1min['default']
grid = np.interp(grid, (grid.min(), grid.max()), (0, 1))
grid_flat = grid.flatten()
grid_flat_nz = grid_flat[grid_flat != 0]
hist, bin_edges = np.histogram(grid_flat_nz, bins=20, range=(0,1), density=True)
hist = hist * (bin_edges[1] - bin_edges[0])
print('HIST SUM', np.sum(hist))
x_l = bin_edges[:-1]
y_l.append(hist)
y_l_fmt = []
for i in range(len(y_l[0])):
y_l_fmt.append([y[i] for y in y_l])
plot_path = os.path.join(path, 'real')
fig, stats_d = plotly_utils.plot_scatter_error(x_l, y_l_fmt, x_title='Pixel intensity percentile', y_title='Frequency (Density)', exp_fit=True, with_details=True)
saving_utils.save_plotly_fig(fig, plot_path)
df_data = DataFrame(np.array(stats_d['data']).T, columns=x_l, index=name_l)
df_stats = DataFrame([stats_d['mean'], stats_d['conf_95'], stats_d['fit']], columns=x_l, index=['mean', 'conf_95', 'fit'])
df_data.to_csv(plot_path + '-data.csv')
df_stats.to_csv(plot_path +'-stats.csv')
sample_l_all = []
for astroA in astroA_l:
d = astro_plotter.get_individual_heatmaps_threshold_scaled(astroA, bh='default', threshold=1, num_samples=1, dff_mode=False, with_arr=True)
sample_l_all.append(d['arrs_d']['arr_r'][0])
x_l = []
y_l = []
for grid in sample_l_all:
grid = np.interp(grid, (grid.min(), grid.max()), (0, 1))
grid_flat = grid.flatten()
grid_flat_nz = grid_flat[grid_flat != 0]
#Normalize values to 1
grid_flat_nz /= np.max(grid_flat_nz)
hist, bin_edges = np.histogram(grid_flat_nz, bins=20, range=(0,1), density=True)
hist = hist * (bin_edges[1] - bin_edges[0])
print('HIST SUM', np.sum(hist))
x_l = bin_edges[:-1]
y_l.append(hist)
y_l_fmt = []
for i in range(len(y_l[0])):
y_l_fmt.append([y[i] for y in y_l])
plot_path = os.path.join(path, 'fake')
fig, stats_d = plotly_utils.plot_scatter_error(x_l, y_l_fmt, x_title='Pixel intensity percentile', y_title='Frequency (Density)', exp_fit=False, with_details=True)
saving_utils.save_plotly_fig(fig, plot_path)
df_data = DataFrame(np.array(stats_d['data']).T, columns=x_l)
df_stats = DataFrame([stats_d['mean'], stats_d['conf_95']], columns=x_l, index=['mean', 'conf_95'])
df_data.to_csv(plot_path + '-data.csv')
df_stats.to_csv(plot_path +'-stats.csv')
#------------------------------------------------------------------------------------------------------------------
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'power_law_fit_sizes_distribution')
path = path +'/'
saving_utils.generate_directory_path(path)
pylab.rcParams['xtick.major.pad']='8'
pylab.rcParams['ytick.major.pad']='8'
rc('font', family='sans-serif')
rc('font', size=10.0)
rc('text', usetex=False)
panel_label_font = FontProperties().copy()
panel_label_font.set_weight("bold")
panel_label_font.set_size(12.0)
panel_label_font.set_family("sans-serif")
fig, x, y_l, all_events_measure_l = astro_plotter.measure_distribution_plot(astroA_l, 'default', 'area', num_bins=10, min_measure=None, max_measure=None, measure_name='area', mode='MOE', with_measure_values=True)
xmin=5
data_np = np.array(all_events_measure_l)
fit = powerlaw.Fit(data_np, discrete=True, xmin=xmin)
####
fig = fit.plot_ccdf(linewidth=3, label='Empirical Data')
fit.power_law.plot_ccdf(ax=fig, color='r', linestyle='--', label='Power law fit')
fit.lognormal.plot_ccdf(ax=fig, color='g', linestyle='--', label='Lognormal fit')
fit.exponential.plot_ccdf(ax=fig, color='b', linestyle='--', label='Exponential fit')
####
fig.set_ylabel(u"p(X≥x)")
fig.set_xlabel("Size µm^2")
handles, labels = fig.get_legend_handles_labels()
fig.legend(handles, labels, loc=3)
figname = 'EmpiricalvsFits'
plt.savefig(os.path.join(path, figname+'.svg'), bbox_inches='tight')
plt.savefig(os.path.join(path, figname+'.png'), bbox_inches='tight')
#print('POWER LAW VS LOG NORMAL', fit.distribution_compare('power_law', 'lognormal'))
#print('POWER LAW VS EXPONENTIAL cutoff at {}µm**2'.format(xmin), fit.distribution_compare('power_law', 'exponential'))
#print('POWERLAW FUNCTION: ~x**(-{})'.format(fit.power_law.alpha))
#------------------------------------------------------------------------------------------------------------------
plt.ioff()
print('Plotting Size vs Time correlation plot...')
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'size_v_time_corr_ALL')
path = path+'/'
print('Generating direcotyr path', path + '/')
saving_utils.generate_directory_path(path)
areas_all = []
times_all = []
for astroA in astroA_l:
areas_all.extend(np.log(astroA.res_d['area']))
times_all.extend(astroA.res_d['time_s'])
areas_all = np.array(areas_all)
times_all = np.array(times_all)
r, p = stat_utils.get_pearsonr(times_all, areas_all)
df = pd.DataFrame({'Size': areas_all, 'Time': times_all})
title ='Size vs Time correlation plot'
text = 'r = {}, p < {}'.format(general_utils.truncate(r, 2), p)
for kind in ['reg', 'hex', 'kde']:
plotly_utils.seaborn_joint_grid(df, 'Size', 'Time', kind=kind, text=text)
plt.savefig(os.path.join(path, '{}.svg'.format(kind)))
plt.savefig(os.path.join(path, '{}.png'.format(kind)))
#------------------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------
print('Plotting correlation of splitted plots in 3 parts...')
save_folder = os.path.join(output_experiment_path_all_comparison, 'data', 'split_correlation_all')
plot_folder = os.path.join(output_experiment_path_all_comparison, 'plots', 'split_correlation_all')
save_splits_pkl_path = os.path.join(save_folder, 'between_splits.pkl')
save_day_splits_pkl_path = os.path.join(save_folder, 'between_days.pkl')
save_random_pkl_path = os.path.join(save_folder, 'random.pkl')
save_bh_splits_pkl_path = os.path.join(save_folder, 'between_rest_run.pkl')
#1 random simulations
#2 (correlation between splits days with variable the splits (so not between days) 3 split correlations with each other (only day 0 and day 1). day 0 splitted 3 times and correlated between each other. same with day 1
#3 (correlation between splits days with variable the between days)) the day 0 and day 1 splitted and then compared between each other between days
#'split_correlation_all'
#4 (correlation between split days with variable the rest-run behaviour)
for bh in ['rest']:
#2
fig, res_splits_l = astro_plotter.get_between_split_split_xcorr(astroA_long_l, bh=bh, save_pkl_path=save_splits_pkl_path, n_chunks=n_chunks)
#3
fig_2, res_day_splits_l = astro_plotter.get_between_day_split_xcorr(day_0_1_pairs, bh=bh, save_pkl_path=save_day_splits_pkl_path, n_chunks=n_chunks)
#4
fig_3, res_bh_splits_l = astro_plotter.get_between_bh_split_xcorr(astroA_long_l, bh_pair=['rest','running'], save_pkl_path=save_bh_splits_pkl_path, n_chunks=n_chunks)
#1
if os.path.isfile(save_random_pkl_path):
random_l = saving_utils.load_pickle(save_random_pkl_path)
else:
random_l = []
for astroA in astroA_long_l:
random_l.extend(astro_plotter.get_random_corrs_self(astroA, bh, n_fake_samples=3))
if save_random_pkl_path is not None:
saving_utils.save_pickle(random_l, save_random_pkl_path)
x = ['Random', 'Self splits', 'Rest-Run splits', 'Day 0-1 Splits']
y = [random_l, res_splits_l, res_bh_splits_l, res_day_splits_l]
fig, stats_d = plotly_utils.plot_point_box_revised(x, y, title='Split correlations (between splits)- {}'.format(bh), x_title='', y_title='Xcorr value', with_stats=True)
saving_utils.save_plotly_fig(fig, os.path.join(plot_folder, 'splits'))
saving_utils.dict_to_csv(stats_d, os.path.join(plot_folder, 'splits' + '.csv'))
#saving_utils.save_csv_dict(stats_d, os.path.join(plot_folder, 'splits' + '.csv'), key_order=['x', 'mean', 'conf_95'])
results_dict = {x[i] : y[i] for i in range(len(x))}
saving_utils.dict_to_csv(results_dict, os.path.join(plot_folder, 'splits-data' + '.csv'))
#results_dict['x'] = x
#key_order = ['x']
#key_order.extend(x)
#saving_utils.save_csv_dict(results_dict, os.path.join(plot_folder, 'splits_data' + '.csv'), key_order=key_order)
#------------------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------
print('Plotting correlation of self splitted plots...')
#STEP 1
#Take only long duration astrocytes
#Set maximum length of astrocyte duration to be 70min
#Then apply splits with xcorr
data_save_path = os.path.join(output_experiment_path_all_comparison, 'data', 'splits_self_all')
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'splits_self_all', 'self_all')
y_l_l = []
x_l = []
minute_frame_splits_l = [35, 30, 25, 20, 15, 10, 5, 2]
cut_duration = 70
param_str = 'cut_{}-'.format(cut_duration) + 'splits_{}-'.format('_'.join([str(m) for m in minute_frame_splits_l]))
name_l = []
for i, astroA in enumerate(astroA_long_l):
curr_save_path = os.path.join(data_save_path, 'id_{}-{}.pkl'.format(astroA.print_id, param_str))
res_d = astro_plotter.get_compare_full_self_results_alt(astroA, cut_duration_min=cut_duration, minute_frame_splits_l=minute_frame_splits_l, save_pkl_path=curr_save_path)
y_l_l.append(res_d['y'])
x_l.append(res_d['x'])
name_l.append(astroA.print_id)
fig, stats_d = plotly_utils.plot_scatter_mult_with_avg(x_l[0], y_l_l, None, name_l, mode='lines', title='Splits self', x_title='Splits (minutes)', y_title='Correlation',
xrange=None, yrange=None, confidence=True, with_stats=True, point_box=True, exclude_non_avg_conf=True)
print(path)
saving_utils.save_plotly_fig(fig, path)
df_data_m = DataFrame(stats_d['mean_l_l'], columns=stats_d['x'], index=stats_d['names'])
df_ci = DataFrame(stats_d['conf_95'], columns=stats_d['x'], index=stats_d['names'])
df_mean = DataFrame([stats_d['mean'], stats_d['mean_conf']], columns=stats_d['x'], index=['mean', 'conf_95'])
df_data_m.to_csv(path + '-data_means.csv')
df_ci.to_csv(path + '-data_ci.csv')
df_mean.to_csv(path + '-mean_and_CI.csv')
#------------------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------
names_l = ['amplitude', 'size', 'duration']
measure_l = ['dffMax2', 'area', 'time_s' ]
names_l = ['Event number (per minute)', 'amplitude', 'size', 'duration']
measure_l = [None, 'dffMax2', 'area', 'time_s']
bh_list_pairs = [['rest', 'running'], ['rest', 'stick_rest'], ['running', 'stick_run_ind_15']]
bh_list_pairs_names = ['rest_run', 'rest_rest_stick', 'run_run_stick']
for j, bh_list_pair in enumerate(bh_list_pairs):
for i, measure in enumerate(measure_l):
plot_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'transition_dots_{}'.format(bh_list_pairs_names[j]), '{}'.format('dots_'+names_l[i]))
if 'stick_rest' in bh_list_pair:
plot, stats_d = astro_plotter.get_measure_all_dot_plot(astroA_l_filt, measure, bh_list=bh_list_pair)
else:
plot, stats_d = astro_plotter.get_measure_all_dot_plot(astroA_l, measure, bh_list=bh_list_pair)
saving_utils.save_plotly_fig(plot, plot_path)
with open(os.path.join(plot_path + '-data.csv'), mode='w') as csv_file:
writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
stats_d['names']
l = ['']
l.extend(stats_d['x'])
l.extend(['conf_0', 'conf_1'])
writer.writerow(l)
for i in range(len(stats_d['names'])):
l = [stats_d['names'][i]]
l.extend(stats_d['mean_l_l'][i])
if 'conf_95' in stats_d:
l.extend(stats_d['conf_95'][i])
writer.writerow(l)
with open(os.path.join(plot_path + '.csv'), mode='w') as csv_file:
writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow('')
writer.writerow(['mean_0', 'mean_1', 'mean_conf_0', 'mean_conf_1'])
l = []
l.extend(stats_d['mean'])
l.extend(stats_d['mean_conf'])
writer.writerow(l)
#------------------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'pdf_norm_fit')
estimates_d = {}
all_event_values = {}
for measure in ['dffMax2' , 'time_s']:
if measure == 'dffMax2':
num_bins = 200
max_filter_val = 3
elif measure == 'time_s':
num_bins = 30
max_filter_val = 2.91
estimates_d[measure] = {}
all_event_values[measure] = {}
for bh in ['rest', 'running']:
fig, x, y_l, all_events_measure_l = astro_plotter.measure_distribution_plot(astroA_l, bh, measure, num_bins=10, min_measure=None, max_measure=None, measure_name=aqua_utils.get_measure_names([measure]), mode='MOE', with_measure_values=True)
all_events_measure_l = np.array(all_events_measure_l)
all_events_measure_l = all_events_measure_l[all_events_measure_l < max_filter_val]
a_estimate, loc_estimate, scale_estimate = skewnorm.fit(all_events_measure_l)
x = np.linspace(np.min(all_events_measure_l), np.max(all_events_measure_l), 100)
p = skewnorm.pdf(x, a_estimate, loc_estimate, scale_estimate)
estimates_d[measure][bh] = [a_estimate, loc_estimate, scale_estimate, np.min(x), np.max(x)]
all_event_values[measure][bh] = np.copy(np.array(all_events_measure_l))
fig = plotly_utils.plot_scatter_histogram(x=x, y_hist=all_events_measure_l, y_scatter=p, num_bins=num_bins)
mean, var, skew, kurt = skewnorm.stats(a=a_estimate, loc=loc_estimate, scale=scale_estimate, moments='mvsk')
a, b = np.histogram(all_events_measure_l, bins=num_bins, range=(0, np.max(x)), density=True)
id_ = measure + '_' + bh
temp_d = {}
temp_d['Parameters'] = ["a={}".format(a_estimate), "loc={}".format(loc_estimate), "scale={}".format(scale_estimate)]
temp_d['Properties'] = ["MEAN={}".format(mean), "VAR={}".format(var), "SKEW={}".format(skew),"KURT={}".format(kurt)]
#print(temp_d)
saving_utils.save_csv_dict(temp_d, os.path.join(path, id_ + '.csv'), key_order=['Parameters', 'Properties'])
saving_utils.save_plotly_fig(fig, os.path.join(path, id_))
#print('skewnorm.pdf(x, a) = 2 * norm.pdf(x) * norm.cdf(a*x)')
#print('skewnorm.pdf(x, a, loc, scale) is identically equivalent to skewnorm.pdf(y, a) / scale with y = (x - loc) / scale')
with_values = True
for measure in ['dffMax2', 'time_s']:
est_rest = estimates_d[measure]['rest']
est_running = estimates_d[measure]['running']
if measure == 'dffMax2':
x_min = 0.6
x_max = 3
nbins = 100
elif measure == 'time_s':
x_min = 0
x_max = 2.91
else:
raise NotImplementedError()
x = np.linspace(x_min, x_max, 500)
if measure == 'duration' or measure == 'time_s':
tempset = set(list(all_event_values[measure]['rest'])).union(set(list(all_event_values[measure]['running'])))
tempset.add(0)
x_val_bins = np.sort(np.array(list(tempset)))
x_val_bins = x_val_bins[x_val_bins <= x_max]
x_val_bins = x_val_bins[x_val_bins >= x_min]
else:
x_val_bins = np.linspace(x_min, x_max, nbins)
#Add bin size / 2 to align better
x_val_diff = 0
if measure == 'duration' or measure == 'time_s':
x_val_diff = (x_val_bins[1] - x_val_bins[0]) / 2
p_rest = skewnorm.pdf(x, est_rest[0], est_rest[1], est_rest[2])
p_running = skewnorm.pdf(x, est_running[0], est_running[1], est_running[2])
if with_values:
vals_running, vals_x_running = np.histogram(all_event_values[measure]['running'][all_event_values[measure]['running'] < x_max], bins=x_val_bins, density=True)
vals_rest, vals_x_rest = np.histogram(all_event_values[measure]['rest'][all_event_values[measure]['rest'] < x_max], bins=x_val_bins, density=True)
#Shift by 1 so they look more aligned(due to large bin sizes)
#e.g. value at 0 is values between 0-(0+bin_size)
#We are essentially moving the point of values lets say [0, 1] to 0 and then with diff to 0.5
vals_running = vals_running[1:]
vals_rest = vals_rest[1:]
measure_name = aqua_utils.get_measure_names([measure])
fig = plotly_utils.plot_scatter_mult(x_l=[x, x, vals_x_rest + x_val_diff, vals_x_running + x_val_diff], y_l_l=[p_rest, p_running, vals_rest, vals_running], mode_l=['lines','lines', 'markers','markers'], name_l=['rest','running', 'rest-true', 'running-true'], confidence=False, with_stats=False, title='Skewed distribution: {}'.format(measure_name), x_title=measure_name, y_title='p(X)')
else:
measure_name = aqua_utils.get_measure_names([measure])
fig = plotly_utils.plot_scatter_mult(x_l=[x, x], y_l_l=[p_rest, p_running], name_l=['rest','running'], confidence=False, with_stats=False, title='Skewed distribution: {}'.format(measure_name), x_title=measure_name, y_title='p(X)')
id_ = 'measure={}-withvalues={}'.format(measure_name, with_values)
saving_utils.save_plotly_fig(fig, os.path.join(path, id_))
def generate_axon_plots(axon_plotter, AA_l, output_folder):
print('---TRANSITION PROPORTION DELAYS PLOT ALL---')
output_experiment_path_all_comparison = os.path.join(output_folder, 'axon_all')
delay_ranges_pairs = [[3*AA_l[0].fr, 6*AA_l[0].fr], [2*AA_l[0].fr, 4*AA_l[0].fr]]
delay_ranges_pairs = [[int(v[0]), int(v[1])] for v in delay_ranges_pairs]
before_range_3, after_range_6 = delay_ranges_pairs[0]
before_range_2, after_range_4 = delay_ranges_pairs[1]
print('Alt Proportion plots...')
rest_to_run_setting = {
'before_bh':'rest_semi_exact',
'inds_bh':'running_exact_start',
'after_bh':'running_semi_exact',
'before_range' : before_range_3,
'after_range' : after_range_6,
'fit': True,
'delay_step_size': 10,
'confidence': True}
__save_axon_transition_plot(axon_plotter=axon_plotter,
AA_l=AA_l,
setting=rest_to_run_setting,
plot_type='behaviour',
path=os.path.join(output_experiment_path_all_comparison, 'plots', f'rest_to_run_speed'),
bh_measure='speed')
__save_axon_transition_plot(axon_plotter=axon_plotter,
AA_l=AA_l,
setting=rest_to_run_setting,
plot_type='proportions_stick_filter',
path=os.path.join(output_experiment_path_all_comparison, 'plots', f'rest_to_run_vibrisastimtiming'),
bh_measure=None)
for aa_setting in ['axon']:
rest_to_run_setting['aa_setting'] = aa_setting
__save_axon_transition_plot(axon_plotter=axon_plotter,
AA_l=AA_l,
setting=rest_to_run_setting,
plot_type='proportions',
path=os.path.join(output_experiment_path_all_comparison, 'plots', f'rest_to_run_{aa_setting}_proportions'),
bh_measure=None)
run_to_rest_setting = {
'before_bh':'running_semi_exact',
'inds_bh':'rest_start',
'after_bh':'rest_semi_exact',
'before_range' : before_range_3,
'after_range' : after_range_6,
'fit': True,
'delay_step_size': 10,
'confidence': True
}
__save_axon_transition_plot(axon_plotter=axon_plotter,
AA_l=AA_l,
setting=run_to_rest_setting,
plot_type='behaviour',
path=os.path.join(output_experiment_path_all_comparison, 'plots', f'run_to_rest_speed'),
bh_measure='speed')
for aa_setting in ['axon']:
run_to_rest_setting['aa_setting'] = aa_setting
__save_axon_transition_plot(axon_plotter=axon_plotter,
AA_l=AA_l,
setting=run_to_rest_setting,
plot_type='proportions',
path=os.path.join(output_experiment_path_all_comparison, 'plots', f'run_to_rest_{aa_setting}_proportions'),
bh_measure=None)
run_stick_run_setting = {
'before_bh':'running_semi_exact',
'inds_bh':'stick_exact_start',
'after_bh':'running_semi_exact',
'before_range' : before_range_2,
'after_range' : after_range_4,
'fit': True,
'delay_step_size': 10,
'confidence': True
}
__save_axon_transition_plot(axon_plotter=axon_plotter,
AA_l=AA_l,
setting=run_stick_run_setting,
plot_type='behaviour',
path=os.path.join(output_experiment_path_all_comparison, 'plots', f'run_stick_run_speed'),
bh_measure='speed')
__save_axon_transition_plot(axon_plotter=axon_plotter,
AA_l=AA_l,
setting=run_stick_run_setting,
plot_type='proportions_stick_filter',
path=os.path.join(output_experiment_path_all_comparison, 'plots', f'run_stick_run_vibrisastimtiming'),
bh_measure=None)
for aa_setting in ['axon', 'astro']:
run_stick_run_setting['aa_setting'] = aa_setting
__save_axon_transition_plot(axon_plotter=axon_plotter,
AA_l=AA_l,
setting=run_stick_run_setting,
plot_type='proportions',
path=os.path.join(output_experiment_path_all_comparison, 'plots', f'run_stick_run_{aa_setting}_proportions'),
bh_measure=None)
def __save_astro_transition_plot(astro_plotter, astroA_l, setting, plot_type, path, measure=None, bh_measure=None):
measure_y_titles = {'dffMax2default' : 'Amplitude',
'time_s' : 'Duration (s)',
'area' : 'Size'}
bh_measure_y_titles = {'speed' : 'Speed (cm/s)'}
before_bh=setting['before_bh']
inds_bh = setting['inds_bh']
after_bh = setting['after_bh']
before_range = setting['before_range']
after_range = setting['after_range']
fit = setting['fit']
delay_step_size = setting['delay_step_size']
confidence = setting['confidence']
p = {'fit' : fit, 'delay_step_size' : delay_step_size, 'confidence' : confidence}
if plot_type == 'proportions':
fig_d, bin_stats = astro_plotter.get_transition_proportion_delays_plot_all_alt(astroA_l,
before_bh=before_bh, inds_bh=inds_bh, after_bh=after_bh,
before_range=before_range, after_range=after_range,
**p)
elif plot_type == 'measure':
assert measure is not None
fig_d, bin_stats = astro_plotter.get_transition_proportion_delays_plot_all_alt(astroA_l, before_bh=before_bh, inds_bh=inds_bh, after_bh=after_bh,
before_range=before_range, after_range=after_range,
measure=measure,
y_title=measure_y_titles[measure],
**p)
elif plot_type == 'behaviour':
assert bh_measure is not None
fig_d, bin_stats = astro_plotter.get_transition_bh_values_plot_all_alt(astroA_l,
before_bh=before_bh, inds_bh=inds_bh, after_bh=after_bh,
bh_measure=bh_measure,
before_range=before_range, after_range=after_range,
y_title=bh_measure_y_titles[bh_measure],
**p)
else:
raise ValueError('Plot type must be "proportions", "measure"')
fig_v = fig_d['event_avg_no_mult']
fig_id = os.path.join(path, 'range_{}_{}-step_{}'.format(before_range, after_range, delay_step_size))
saving_utils.save_plotly_fig(fig_v, fig_id)
saving_utils.save_csv_dict(bin_stats, path=fig_id + '.csv', key_order=['x', 'mean', 'std', 'confidence_95'])
all_data_dict = {bin_stats['x'][i]:bin_stats['y_all'][:, i] for i in range(len(bin_stats['x']))}
saving_utils.dict_to_csv(all_data_dict, name=fig_id + 'range_{}_{}-step_{}-data.csv'.format(before_range, after_range, delay_step_size), base_folder=path)
#DataFrame(bin_stats['y_all'], columns=bin_stats['x']).to_csv(data_csv_path, index=False)
def __save_axon_transition_plot(axon_plotter, AA_l, setting, plot_type, path, bh_measure=None):
bh_measure_y_titles = {'speed' : 'Speed (cm/s)'}
before_bh = setting['before_bh']
inds_bh = setting['inds_bh']
after_bh = setting['after_bh']
before_range = setting['before_range']
after_range = setting['after_range']
fit = setting['fit']
delay_step_size = setting['delay_step_size']
confidence = setting['confidence']
if 'aa_setting' in setting:
aa_setting = setting['aa_setting']
p = {'fit' : fit, 'delay_step_size' : delay_step_size, 'confidence' : confidence, 'setting' : aa_setting}
else:
p = {'fit' : fit, 'delay_step_size' : delay_step_size, 'confidence' : confidence}
if plot_type == 'proportions':
fig_d, bin_stats = axon_plotter.get_axon_transition_proportion_delays_plot_all(AA_l, before_bh=before_bh, inds_bh=inds_bh, after_bh=after_bh,
before_range=before_range, after_range=after_range,
**p)
elif plot_type == 'behaviour':
assert bh_measure is not None
fig_d, bin_stats = axon_plotter.get_transition_bh_values_plot_all_alt(AA_l,
before_bh=before_bh, inds_bh=inds_bh, after_bh=after_bh,
bh_measure=bh_measure,
before_range=before_range, after_range=after_range,
y_title=bh_measure_y_titles[bh_measure],
**p)
elif plot_type == 'proportions_stick_filter':
fig_d, bin_stats = axon_plotter.get_axon_transition_proportion_delays_STICK_FILTER_plot_all(AA_l, before_bh=before_bh, inds_bh=inds_bh, after_bh=after_bh,
before_range=before_range, after_range=after_range,
**p)
else:
raise ValueError('Invalid plot type')
fig_v = fig_d['event_avg_no_mult']
fig_id = os.path.join(path, 'range_{}_{}-step_{}'.format(before_range, after_range, delay_step_size))
saving_utils.save_plotly_fig(fig_v, fig_id)
saving_utils.save_csv_dict(bin_stats, path=fig_id + '.csv', key_order=['x', 'mean', 'std', 'confidence_95'])
| 2.046875
| 2
|
src/chat/train.py
|
lingeen/lingeen-Ying
| 0
|
12776784
|
# -*- coding: utf-8 -*-
# @Time : 2020/12/24 3:48 PM
# @Author : Kevin
from src.chat import dataset
from src.chat.seq2seq import ChatSeq2Seq
from torch.optim import Adam
import torch.nn.functional as F
import torch
from src import config
from tqdm import tqdm
from src.lib import device,chat_answer_word_sequence_model
from torch import nn
def train(epoch):
# 1.准备数据
dataloader = dataset.get_dataloader()
# 2.建立模型
seq2seq = ChatSeq2Seq().to(device)
optimizer=Adam(seq2seq.parameters(),lr=0.001)
former_acc=0.
seq2seq.train()
bar=tqdm(enumerate(dataloader),ascii=True,desc="training...")
# 3.训练
for index, (asks, answers, ask_lens, answer_lens) in bar:
asks=asks.to(device)
answers=answers.to(device)
optimizer.zero_grad()
decoder_outputs_softmax, decoder_hidden_state = seq2seq(asks, answers,ask_lens,answer_lens)
# [batch size,seq len]>[batch size*seq len]
answers=answers.view(-1)
# decoder_outputs[batch size,seq len,dict size]>[batch size*seq len,dict size]
# -1就是保留
decoder_outputs_softmax=decoder_outputs_softmax.view(decoder_outputs_softmax.size(0)*decoder_outputs_softmax.size(1),-1)
# 保留hidden size维度
# loss ouputs二维,label一维 21 损失21金维他
loss=F.cross_entropy(decoder_outputs_softmax,answers,ignore_index=chat_answer_word_sequence_model.PAD)
loss.backward()
# 梯度裁剪,裁剪掉过大梯度,避免梯度爆炸
# 下划线是直接修改
nn.utils.clip_grad_norm_(seq2seq.parameters(),config.caht_train_grad_clip_max)
optimizer.step()
# 计算正确率
acc = decoder_outputs_softmax.max(dim=-1)[-1]
acc = acc.eq(answers).float().mean()
bar.set_description(f"eporch:{epoch}\tindex:{index}\tloss:{loss.item()}\t正确率:{acc}")
if acc>former_acc:
torch.save(seq2seq.state_dict(), config.chat_seq_2_seq_model_path)
torch.save(optimizer.state_dict(), config.chat_seq_optimizer_model_path)
if epoch%10==0:
torch.save(seq2seq.state_dict(), config.chat_seq_2_seq_model_path+str(epoch))
torch.save(optimizer.state_dict(), config.chat_seq_optimizer_model_path+str(epoch))
former_acc=acc
return former_acc
if __name__ == '__main__':
epoch=30
acc=[]
for i in range(epoch):
former_acc=train(i)
print(acc)
# eval()
| 2.40625
| 2
|
gammapy/spectrum/tests/test_cosmic_ray.py
|
grburgess/gammapy
| 3
|
12776785
|
<reponame>grburgess/gammapy<gh_stars>1-10
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
from astropy.units import Quantity
from astropy.tests.helper import assert_quantity_allclose
from ...spectrum import cosmic_ray_flux
def test_cosmic_ray_flux():
energy = Quantity(1, 'TeV')
actual = cosmic_ray_flux(energy, 'proton')
desired = Quantity(0.096, '1 / (m2 s sr TeV)')
assert_quantity_allclose(actual, desired)
# TODO: test array quantities and other particles
| 2
| 2
|
2. Programming Fundamentals With Python (May 2021)/18. Mid Exam Preparation/More Exercises/02_muOnline.py
|
kzborisov/SoftUni
| 1
|
12776786
|
<filename>2. Programming Fundamentals With Python (May 2021)/18. Mid Exam Preparation/More Exercises/02_muOnline.py
"""
You have initial health 100 and initial bitcoins 0.
You will be given a string, representing the dungeons rooms.
Each room is separated with '|' (vertical bar): "room1|room2|room3…"
Each room contains a command and a number, separated by space.
The command can be:
• "potion"
◦ You are healed with the number in the second part.
But your health cannot exceed your initial health (100).
◦ First print: "You healed for {amount} hp.".
◦ After that, print your current health: "Current health: {health} hp.".
• "chest"
◦ You've found some bitcoins, the number in the second part.
◦ Print: "You found {amount} bitcoins."
• In any other case you are facing a monster, you are going to fight.
The second part of the room, contains the attack of the monster.
You should remove the monster's attack from your health.
◦ If you are not dead (health <= 0) you've slain the monster, and you should print ("You slayed {monster}.")
◦ If you've died, print "You died! Killed by {monster}." and your quest is over.
Print the best room you`ve manage to reach: "Best room: {room}".
If you managed to go through all the rooms in the dungeon, print on the next three lines:
"You've made it!", "Bitcoins: {bitcoins}", "Health: {health}".
Input / Constraints
You receive a string, representing the dungeons rooms,
separated with '|' (vertical bar): "room1|room2|room3…".
Test Input
rat 10|bat 20|potion 10|rat 10|chest 100|boss 70|chest 1000
cat 10|potion 30|orc 10|chest 10|snake 25|chest 110
Output
Print the corresponding messages, described above.
"""
import sys
def potion_cmd(hp, healing_pts):
if hp + healing_pts > 100:
healing_pts = 100 - hp
hp += healing_pts
print(f"You healed for {healing_pts} hp.")
return hp
def chest_cmd(initial_bitcoins, bitcoins_found):
print(f"You found {bitcoins_found} bitcoins.")
return initial_bitcoins + bitcoins_found
def fight(hp, room_num, monster, attack_power):
hp -= attack_power
if hp > 0:
print(f"You slayed {monster}.")
return hp
print(f"You died! Killed by {monster}.")
print(f"Best room: {room_num+1}")
sys.exit()
dungeon_rooms = input().split("|")
health = 100
bitcoins = 0
for idx, room in enumerate(dungeon_rooms):
command = room.split()[0]
number = int(room.split()[1])
if command == "potion":
health = potion_cmd(health, number)
print(f"Current health: {health} hp.")
elif command == "chest":
bitcoins = chest_cmd(bitcoins, number)
else:
health = fight(health, idx, command, number)
if health > 0:
final_msg = f"You've made it!\n"\
f"Bitcoins: {bitcoins}\n"\
f"Health: {health}"
print(final_msg)
| 4.3125
| 4
|
source/yorm/mixins.py
|
faraazkhan/aws-control-tower-customizations
| 20
|
12776787
|
<gh_stars>10-100
import warnings
from yorm import utilities
class ModelMixin:
"""Adds ORM methods to a mapped class."""
@classmethod
def create(cls, *args, **kwargs):
return utilities.create(cls, *args, **kwargs)
@classmethod
def new(cls, *args, **kwargs):
msg = "ModelMixin.new() has been renamed to ModelMixin.create()"
warnings.warn(msg, DeprecationWarning)
return utilities.create(cls, *args, **kwargs)
@classmethod
def find(cls, *args, **kwargs):
return utilities.find(cls, *args, **kwargs)
@classmethod
def match(cls, *args, **kwargs):
return utilities.match(cls, *args, **kwargs)
def load(self):
return utilities.load(self)
def save(self):
return utilities.save(self)
def delete(self):
return utilities.delete(self)
| 2.328125
| 2
|
AD18-flask-admin-image-demo/app/extensions.py
|
AngelLiang/Flask-Demos
| 3
|
12776788
|
from flask_sqlalchemy import SQLAlchemy
from flask_admin import Admin
db = SQLAlchemy()
admin = Admin(template_mode='bootstrap3')
def register_extensions(app):
db.init_app(app)
admin.init_app(app)
from app.admin_ import register_modelviews
register_modelviews(admin, app)
| 1.71875
| 2
|
utils/graphUtils/graphML.py
|
VishnuDuttSharma/gnn_pathplanning
| 86
|
12776789
|
# 2018/11/01~2018/07/12
# <NAME>, <EMAIL>.
"""
graphML.py Module for basic GSP and graph machine learning functions.
Functionals
LSIGF: Applies a linear shift-invariant graph filter
spectralGF: Applies a linear shift-invariant graph filter in spectral form
NVGF: Applies a node-variant graph filter
EVGF: Applies an edge-variant graph filter
learnAttentionGSO: Computes the GSO following the attention mechanism
graphAttention: Applies a graph attention layer
Filtering Layers (nn.Module)
GraphFilter: Creates a graph convolutional layer using LSI graph filters
SpectralGF: Creates a graph convolutional layer using LSI graph filters in
spectral form
NodeVariantGF: Creates a graph filtering layer using node-variant graph filters
EdgeVariantGF: Creates a graph filtering layer using edge-variant graph filters
GraphAttentional: Creates a layer using graph attention mechanisms
Activation Functions - Nonlinearities (nn.Module)
MaxLocalActivation: Creates a localized max activation function layer
MedianLocalActivation: Creates a localized median activation function layer
NoActivation: Creates a layer for no activation function
Summarizing Functions - Pooling (nn.Module)
NoPool: No summarizing function.
MaxPoolLocal: Max-summarizing function
"""
import math
import numpy as np
import torch
import torch.nn as nn
import utils.graphUtils.graphTools as graphTools
zeroTolerance = 1e-9 # Values below this number are considered zero.
infiniteNumber = 1e12 # infinity equals this number
# WARNING: Only scalar bias.
def LSIGF(h, S, x, b=None):
"""
LSIGF(filter_taps, GSO, input, bias=None) Computes the output of a linear
shift-invariant graph filter on input and then adds bias.
Denote as G the number of input features, F the number of output features,
E the number of edge features, K the number of filter taps, N the number of
nodes, S_{e} in R^{N x N} the GSO for edge feature e, x in R^{G x N} the
input data where x_{g} in R^{N} is the graph signal representing feature
g, and b in R^{F x N} the bias vector, with b_{f} in R^{N} representing the
bias for feature f.
Then, the LSI-GF is computed as
y_{f} = \sum_{e=1}^{E}
\sum_{k=0}^{K-1}
\sum_{g=1}^{G}
[h_{f,g,e}]_{k} S_{e}^{k} x_{g}
+ b_{f}
for f = 1, ..., F.
Inputs:
filter_taps (torch.tensor): array of filter taps; shape:
output_features x edge_features x filter_taps x input_features
GSO (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
input (torch.tensor): input signal; shape:
batch_size x input_features x number_nodes
bias (torch.tensor): shape: output_features x number_nodes
if the same bias is to be applied to all nodes, set number_nodes = 1
so that b_{f} vector becomes b_{f} \mathbf{1}_{N}
Outputs:
output: filtered signals; shape:
batch_size x output_features x number_nodes
"""
# The basic idea of what follows is to start reshaping the input and the
# GSO so the filter coefficients go just as a very plain and simple
# linear operation, so that all the derivatives and stuff on them can be
# easily computed.
# h is output_features x edge_weights x filter_taps x input_features
# S is edge_weighs x number_nodes x number_nodes
# x is batch_size x input_features x number_nodes
# b is output_features x number_nodes
# Output:
# y is batch_size x output_features x number_nodes
# Get the parameter numbers:
F = h.shape[0]
E = h.shape[1]
K = h.shape[2]
G = h.shape[3]
assert S.shape[0] == E
N = S.shape[1]
assert S.shape[2] == N
B = x.shape[0]
assert x.shape[1] == G
assert x.shape[2] == N
# Or, in the notation we've been using:
# h in F x E x K x G
# S in E x N x N
# x in B x G x N
# b in F x N
# y in B x F x N
# Now, we have x in B x G x N and S in E x N x N, and we want to come up
# with matrix multiplication that yields z = x * S with shape
# B x E x K x G x N.
# For this, we first add the corresponding dimensions
x = x.reshape([B, 1, G, N])
S = S.reshape([1, E, N, N])
z = x.reshape([B, 1, 1, G, N]).repeat(1, E, 1, 1, 1) # This is for k = 0
# We need to repeat along the E dimension, because for k=0, S_{e} = I for
# all e, and therefore, the same signal values have to be used along all
# edge feature dimensions.
for k in range(1,K):
x = torch.matmul(x, S) # B x E x G x N
xS = x.reshape([B, E, 1, G, N]) # B x E x 1 x G x N
z = torch.cat((z, xS), dim = 2) # B x E x k x G x N
# This output z is of size B x E x K x G x N
# Now we have the x*S_{e}^{k} product, and we need to multiply with the
# filter taps.
# We multiply z on the left, and h on the right, the output is to be
# B x N x F (the multiplication is not along the N dimension), so we reshape
# z to be B x N x E x K x G and reshape it to B x N x EKG (remember we
# always reshape the last dimensions), and then make h be E x K x G x F and
# reshape it to EKG x F, and then multiply
y = torch.matmul(z.permute(0, 4, 1, 2, 3).reshape([B, N, E*K*G]),
h.reshape([F, E*K*G]).permute(1, 0)).permute(0, 2, 1)
# And permute againt to bring it from B x N x F to B x F x N.
# Finally, add the bias
if b is not None:
y = y + b
return y
def spectralGF(h, V, VH, x, b=None):
"""
spectralGF(filter_coeff, eigenbasis, eigenbasis_hermitian, input, bias=None)
Computes the output of a linear shift-invariant graph filter in spectral
form applying filter_coefficients on the graph fourier transform of the
input .
Denote as G the number of input features, F the number of output features,
E the number of edge features, N the number of nodes, S_{e} in R^{N x N}
the GSO for edge feature e with S_{e} = V_{e} Lambda_{e} V_{e}^{H} as
eigendecomposition, x in R^{G x N} the input data where x_{g} in R^{N} is
the graph signal representing feature g, and b in R^{F x N} the bias vector,
with b_{f} in R^{N} representing the bias for feature f.
Then, the LSI-GF in spectral form is computed as
y_{f} = \sum_{e=1}^{E}
\sum_{g=1}^{G}
V_{e} diag(h_{f,g,e}) V_{e}^{H} x_{g}
+ b_{f}
for f = 1, ..., F, with h_{f,g,e} in R^{N} the filter coefficients for
output feature f, input feature g and edge feature e.
Inputs:
filter_coeff (torch.tensor): array of filter coefficients; shape:
output_features x edge_features x input_features x number_nodes
eigenbasis (torch.tensor): eigenbasis of the graph shift operator;shape:
edge_features x number_nodes x number_nodes
eigenbasis_hermitian (torch.tensor): hermitian of the eigenbasis; shape:
edge_features x number_nodes x number_nodes
input (torch.tensor): input signal; shape:
batch_size x input_features x number_nodes
bias (torch.tensor): shape: output_features x number_nodes
if the same bias is to be applied to all nodes, set number_nodes = 1
so that b_{f} vector becomes b_{f} \mathbf{1}_{N}
Outputs:
output: filtered signals; shape:
batch_size x output_features x number_nodes
Obs.: While we consider most GSOs to be normal (so that the eigenbasis is
an orthonormal basis), this function would also work if V^{-1} is used as
input instead of V^{H}
"""
# The decision to input both V and V_H is to avoid any time spent in
# permuting/inverting the matrix. Because this depends on the graph and not
# the data, it can be done faster if we just input it.
# h is output_features x edge_weights x input_features x number_nodes
# V is edge_weighs x number_nodes x number_nodes
# VH is edge_weighs x number_nodes x number_nodes
# x is batch_size x input_features x number_nodes
# b is output_features x number_nodes
# Output:
# y is batch_size x output_features x number_nodes
# Get the parameter numbers:
F = h.shape[0]
E = h.shape[1]
G = h.shape[2]
N = h.shape[3]
assert V.shape[0] == VH.shape[0] == E
assert V.shape[1] == VH.shape[1] == V.shape[2] == VH.shape[2] == N
B = x.shape[0]
assert x.shape[1] == G
assert x.shape[2] == N
# Or, in the notation I've been using:
# h in F x E x G x N
# V in E x N x N
# VH in E x N x N
# x in B x G x N
# b in F x N
# y in B x F x N
# We will do proper matrix multiplication in this case (algebraic
# multiplication using column vectors instead of CS notation using row
# vectors).
# We will multiply separate VH with x, and V with diag(h).
# First, to multiply VH with x, we need to add one dimension for each one
# of them (dimension E for x and dimension B for VH)
x = x.reshape([B, 1, G, N]).permute(0, 1, 3, 2) # B x 1 x N x G
VH = VH.reshape([1, E, N, N]) # 1 x E x N x N
# Now we multiply. Note that we also permute to make it B x E x G x N
# instead of B x E x N x G because we want to multiply for a specific e and
# g, there we do not want to sum (yet) over G.
VHx = torch.matmul(VH, x).permute(0, 1, 3, 2) # B x E x G x N
# Now we want to multiply V * diag(h), both are matrices. So first, we
# add the necessary dimensions (B and G for V and an extra N for h to make
# it a matrix from a vector)
V = V.reshape([1, E, 1, N, N]) # 1 x E x 1 x N x N
# We note that multiplying by a diagonal matrix to the right is equivalent
# to an elementwise multiplication in which each column is multiplied by
# a different number, so we will do this to make it faster (elementwise
# multiplication is faster than matrix multiplication). We need to repeat
# the vector we have columnwise.
diagh = h.reshape([F, E, G, 1, N]).repeat(1, 1, 1, N, 1) # F x E x G x N x N
# And now we do elementwise multiplication
Vdiagh = V * diagh # F x E x G x N x N
# Finally, we make the multiplication of these two matrices. First, we add
# the corresponding dimensions
Vdiagh = Vdiagh.reshape([1, F, E, G, N, N]) # 1 x F x E x G x N x N
VHx = VHx.reshape([B, 1, E, G, N, 1]) # B x 1 x E x G x N x 1
# And do matrix multiplication to get all the corresponding B,F,E,G vectors
VdiaghVHx = torch.matmul(Vdiagh, VHx) # B x F x E x G x N x 1
# Get rid of the last dimension which we do not need anymore
y = VdiaghVHx.squeeze(5) # B x F x E x G x N
# Sum over G
y = torch.sum(y, dim = 3) # B x F x E x N
# Sum over E
y = torch.sum(y, dim = 2) # B x F x N
# Finally, add the bias
if b is not None:
y = y + b
return y
def NVGF(h, S, x, b=None):
"""
NVGF(filter_taps, GSO, input, bias=None) Computes the output of a
node-variant graph filter on input and then adds bias.
Denote as G the number of input features, F the number of output features,
E the number of edge features, K the number of shifts, N the number of
nodes, S_{e} in R^{N x N} the GSO for edge feature e, x in R^{G x N} the
input data where x_{g} in R^{N} is the graph signal representing feature
g, and b in R^{F x N} the bias vector, with b_{f} in R^{N} representing the
bias for feature f. Denote as h_{k}^{efg} in R^{N} the vector with the N
filter taps corresponding to the efg filter for shift k.
Then, the NV-GF is computed as
y_{f} = \sum_{e=1}^{E}
\sum_{k=0}^{K-1}
\sum_{g=1}^{G}
diag(h_{k}^{efg}) S_{e}^{k} x_{g}
+ b_{f}
for f = 1, ..., F.
Inputs:
filter_taps (torch.tensor): array of filter taps; shape:
output_features x edge_features x filter_taps x input_features
x number_nodes
GSO (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
input (torch.tensor): input signal; shape:
batch_size x input_features x number_nodes
bias (torch.tensor): shape: output_features x number_nodes
if the same bias is to be applied to all nodes, set number_nodes = 1
so that b_{f} vector becomes b_{f} \mathbf{1}_{N}
Outputs:
output: filtered signals; shape:
batch_size x output_features x number_nodes
"""
# h is output_features x edge_weights x filter_taps x input_features
# x number_nodes
# S is edge_weighs x number_nodes x number_nodes
# x is batch_size x input_features x number_nodes
# b is output_features x number_nodes
# Output:
# y is batch_size x output_features x number_nodes
# Get the parameter numbers:
F = h.shape[0]
E = h.shape[1]
K = h.shape[2]
G = h.shape[3]
N = h.shape[4]
assert S.shape[0] == E
assert S.shape[1] == S.shape[2] == N
B = x.shape[0]
assert x.shape[1] == G
assert x.shape[2] == N
# Or, in the notation I've been using:
# h in F x E x K x G x N
# S in E x N x N
# x in B x G x N
# b in F x N
# y in B x F x N
# Now, we have x in B x G x N and S in E x N x N, and we want to come up
# with matrix multiplication that yields z = x * S with shape
# B x E x K x G x N.
# For this, we first add the corresponding dimensions
xr = x.reshape([B, 1, G, N])
Sr = S.reshape([1, E, N, N])
z = xr.reshape([B, 1, 1, G, N]).repeat(1, E, 1, 1, 1) # This is for k = 0
# We need to repeat along the E dimension, because for k=0, S_{e} = I for
# all e, and therefore, the same signal values have to be used along all
# edge feature dimensions.
for k in range(1,K):
xr = torch.matmul(xr, Sr) # B x E x G x N
xS = xr.reshape([B, E, 1, G, N]) # B x E x 1 x G x N
z = torch.cat((z, xS), dim = 2) # B x E x k x G x N
# This output z is of size B x E x K x G x N
# Now we have the x*S_{e}^{k} product, and we need to multiply with the
# filter taps.
# This multiplication with filter taps is ``element wise'' on N since for
# each node we have a different element
# First, add the extra dimension (F for z, and B for h)
z = z.reshape([B, 1, E, K, G, N])
h = h.reshape([1, F, E, K, G, N])
# Now let's do elementwise multiplication
zh = z * h
# And sum over the dimensions E, K, G to get B x F x N
y = torch.sum(zh, dim = 4) # Sum over G
y = torch.sum(y, dim = 3) # Sum over K
y = torch.sum(y, dim = 2) # Sum over E
# Finally, add the bias
if b is not None:
y = y + b
return y
def EVGF(S, x, b=None):
"""
EVGF(filter_matrices, input, bias=None) Computes the output of an
edge-variant graph filter on input and then adds bias.
Denote as G the number of input features, F the number of output features,
E the number of edge features, K the number of shifts, N the number of
nodes, Phi_{efg} in R^{N x N} the filter matrix for edge feature e, output
feature f and input feature g (recall that Phi_{efg}^{k} has the same
sparsity pattern as the graph, except for Phi_{efg}^{0} which is expected to
be a diagonal matrix), x in R^{G x N} the input data where x_{g} in R^{N} is
the graph signal representing feature g, and b in R^{F x N} the bias vector,
with b_{f} in R^{N} representing the bias for feature f.
Then, the EV-GF is computed as
y_{f} = \sum_{e=1}^{E}
\sum_{k=0}^{K-1}
\sum_{g=1}^{G}
Phi_{efg}^{k:0} x_{g}
+ b_{f}
for f = 1, ..., F, with Phi_{efg}^{k:0} = Phi_{efg}^{k} Phi_{efg}^{k-1} ...
Phi_{efg}^{0}.
Inputs:
filter_matrices (torch.tensor): array of filter matrices; shape:
output_features x edge_features x filter_taps x input_features
x number_nodes x number_nodes
input (torch.tensor): input signal; shape:
batch_size x input_features x number_nodes
bias (torch.tensor): shape: output_features x number_nodes
if the same bias is to be applied to all nodes, set number_nodes = 1
so that b_{f} vector becomes b_{f} \mathbf{1}_{N}
Outputs:
output: filtered signals; shape:
batch_size x output_features x number_nodes
"""
# We just need to multiply by the filter_matrix recursively, and then
# add for all E, G, and K features.
# S is output_features x edge_features x filter_taps x input_features
# x number_nodes x number_nodes
# x is batch_size x input_features x number_nodes
# b is output_features x number_nodes
# Output:
# y is batch_size x output_features x number_nodes
# Get the parameter numbers:
F = S.shape[0]
E = S.shape[1]
K = S.shape[2]
G = S.shape[3]
N = S.shape[4]
assert S.shape[5] == N
B = x.shape[0]
assert x.shape[1] == G
assert x.shape[2] == N
# Or, in the notation I've been using:
# S in F x E x K x G x N x N
# x in B x G x N
# b in F x N
# y in B x F x N
# We will be doing matrix multiplications in the algebraic way, trying to
# multiply the N x N matrix corresponding to the appropriate e, f, k and g
# dimensions, with the respective x vector (N x 1 column vector)
# For this, we first add the corresponding dimensions (for x we add
# dimensions F, E and the last dimension for column vector)
x = x.reshape([B, 1, 1, G, N, 1])
# When we do index_select along dimension K we get rid of this dimension
Sk = torch.index_select(S, 2, torch.tensor(0).to(S.device)).squeeze(2)
# Sk in F x E x G x N x N
# And we add one further dimension for the batch size B
Sk = Sk.unsqueeze(0) # 1 x F x E x G x N x N
# Matrix multiplication
x = torch.matmul(Sk, x) # B x F x E x G x N x 1
# And we collect this for every k in a vector z, along the K dimension
z = x.reshape([B, F, E, 1, G, N, 1]).squeeze(6) # B x F x E x 1 x G x N
# Now we do all the matrix multiplication
for k in range(1,K):
# Extract the following k
Sk = torch.index_select(S, 2, torch.tensor(k).to(S.device)).squeeze(2)
# Sk in F x E x G x N x N
# Give space for the batch dimension B
Sk = Sk.unsqueeze(0) # 1 x F x E x G x N x N
# Multiply with the previously cumulative Sk * x
x = torch.matmul(Sk, x) # B x F x E x G x N x 1
# Get rid of the last dimension (of a column vector)
Sx = x.reshape([B, F, E, 1, G, N, 1]).squeeze(6) # B x F x E x 1 x G x N
# Add to the z
z = torch.cat((z, Sx), dim = 2) # B x F x E x k x G x N
# Sum over G
z = torch.sum(z, dim = 4)
# Sum over K
z = torch.sum(z, dim = 3)
# Sum over E
y = torch.sum(z, dim = 2)
if b is not None:
y = y + b
return y
def learnAttentionGSO(x, a, W, S, negative_slope=0.2):
"""
learnAttentionGSO(x, a, W, S) Computes the GSO following the attention
mechanism
Denote as G the number of input features, F the number of output features,
E the number of edge features, P the number of attention heads, Ji the
number of nodes in N_{i}, the neighborhood of node i, and N the number of
nodes. Let x_{i} in R^{G} be the feature associated to node i,
W^{ep} in R^{F x G} the weight marix associated to edge feature e and
attention head p, and a^{ep} in R^{2F} the mixing vector. Let
alpha_{ij}^{ep} in R the attention coefficient between nodes i and j, for
edge feature e and attention head p, and let s_{ij}^{e} be the value of
feature e of the edge connecting nodes i and j.
Each elements of the new GSO is alpha_{ij}^{ep} computed as
alpha_{ij}^{ep} = softmax_{j} ( LeakyReLU_{beta} (
(a^{ep})^T [cat(W^{ep}x_{i}, W^{ep} x_{j})]
))
for all j in N_{i}, and where beta is the negative slope of the leaky ReLU.
Inputs:
x (torch.tensor): input;
shape: batch_size x input_features x number_nodes
a (torch.tensor): mixing parameter; shape:
number_heads x edge_features x 2 * output_features
W (torch.tensor): linear parameter; shape:
number_heads x edge_features x output_features x input_features
S (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
negative_slope (float): negative slope of the leaky relu (default: 0.2)
Outputs:
aij: output GSO; shape:
batch_size x number_heads x edge_features x number_nodes x number_nodes
"""
B = x.shape[0] # batch_size
G = x.shape[1] # input_features
N = x.shape[2] # number_nodes
P = a.shape[0] # number_heads
E = a.shape[1] # edge_features
assert W.shape[0] == P
assert W.shape[1] == E
F = W.shape[2] # output_features
assert a.shape[2] == int(2*F)
G = W.shape[3] # input_features
assert S.shape[0] == E
assert S.shape[1] == S.shape[2] == N
# Add ones of the GSO at all edge feature levels so that the node always
# has access to itself. The fact that it's one is not so relevant, because
# the attention coefficient that is learned would compensate for this
S = S + torch.eye(N).reshape([1,N,N]).repeat(E,1,1).to(S.device)
# WARNING:
# (If the GSOs already have self-connections, then these will be added a 1,
# which might be a problem if the self-connection is a -1. I will have to
# think of this more carefully)
# W is of size P x E x F x G
# a is of size P x E x 2F
# Compute Wx for all nodes
x = x.reshape([B, 1, 1, G, N])
W = W.reshape([1, P, E, F, G])
Wx = torch.matmul(W, x) # B x P x E x F x N
# Now, do a_1^T Wx, and a_2^T Wx to get a tensor of shape B x P x E x 1 x N
# because we're applying the inner product on the F dimension.
a1 = torch.index_select(a, 2, torch.arange(F).to(x.device)) # K x E x F
a2 = torch.index_select(a, 2, torch.arange(F, 2*F).to(x.device)) # K x E x F
a1Wx = torch.matmul(a1.reshape([1, P, E, 1, F]), Wx) # B x P x E x 1 x N
a2Wx = torch.matmul(a2.reshape([1, P, E, 1, F]), Wx) # B x P x E x 1 x N
# And then, use this to sum them accordingly and create a B x P x E x N x N
# matrix.
aWx = a1Wx + a2Wx.permute(0, 1, 2, 4, 3) # B x P x E x N x N
# Obs.: In this case, we have one column vector and one row vector; then,
# what the sum does, is to repeat the column and the row, respectively,
# until both matrices are of the same size, and then adds up, which is
# precisely what we want to do
# Apply the LeakyRelu
eij = nn.functional.leaky_relu(aWx, negative_slope = negative_slope)
# B x P x E x N x N
# Each element of this N x N matrix is, precisely, e_ij (eq. 1) in the GAT
# paper.
# And apply the softmax. For the softmax, we do not want to consider
# the places where there are no neighbors, so we need to set them to -infty
# so that they will be assigned a zero.
# First, get places where we have edges
maskEdges = torch.sum(torch.abs(S.data), dim = 0)
# Make it a binary matrix
maskEdges = (maskEdges > zeroTolerance).type(x.dtype)
# Make it -infinity where there are zeros
infinityMask = (1-maskEdges) * infiniteNumber
# Compute the softmax plus the -infinity (we first force the places where
# there is no edge to be zero, and then we add -infinity to them)
aij = nn.functional.softmax(eij*maskEdges - infinityMask, dim = 4)
# B x P x E x N x N
# This will give me a matrix of all the alpha_ij coefficients.
# Re-inforce the zeros just to be sure
return aij * maskEdges # B x P x E x N x N
def graphAttention(x, a, W, S, negative_slope=0.2):
"""
graphAttention(x, a, W, S) Computes attention following GAT layer taking
into account multiple edge features.
Denote as G the number of input features, F the number of output features,
E the number of edge features, P the number of attention heads, Ji the
number of nodes in N_{i}, the neighborhood of node i, and N the number of
nodes. Let x_{i} in R^{G} be the feature associated to node i,
W^{ep} in R^{F x G} the weight marix associated to edge feature e and
attention head p, and a^{ep} in R^{2F} the mixing vector. Let
alpha_{ij}^{ep} in R the attention coefficient between nodes i and j, for
edge feature e and attention head p, and let s_{ij}^{e} be the value of
feature e of the edge connecting nodes i and j.
Let y_{i}^{p} in R^{F} be the output of the graph attention at node i for
attention head p. It is computed as
y_{i}^{p} = \sum_{e=1}^{E}
\sum_{j in N_{i}}
s_{ij}^{e} alpha_{ij}^{ep} W^{ep} x_{j}
with
alpha_{ij}^{ep} = softmax_{j} ( LeakyReLU_{beta} (
(a^{ep})^T [cat(W^{ep}x_{i}, W^{ep} x_{j})]
))
for all j in N_{i}, and where beta is the negative slope of the leaky ReLU.
Inputs:
x (torch.tensor): input;
shape: batch_size x input_features x number_nodes
a (torch.tensor): mixing parameter; shape:
number_heads x edge_features x 2 * output_features
W (torch.tensor): linear parameter; shape:
number_heads x edge_features x output_features x input_features
S (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
negative_slope (float): negative slope of the leaky relu (default: 0.2)
Outputs:
y: output; shape:
batch_size x number_heads x output_features x number_nodes
"""
B = x.shape[0] # batch_size
G = x.shape[1] # input_features
N = x.shape[2] # number_nodes
P = a.shape[0] # number_heads
E = a.shape[1] # edge_features
assert W.shape[0] == P
assert W.shape[1] == E
F = W.shape[2] # output_features
assert a.shape[2] == int(2*F)
G = W.shape[3] # input_features
assert S.shape[0] == E
assert S.shape[1] == S.shape[2] == N
# First, we need to learn the attention GSO
aij = learnAttentionGSO(x, a, W, S, negative_slope = negative_slope)
# B x P x E x N x N
# Then, we need to compute the high-level features
# W is of size P x E x F x G
# a is of size P x E x 2F
# Compute Wx for all nodes
x = x.reshape([B, 1, 1, G, N])
W = W.reshape([1, P, E, F, G])
Wx = torch.matmul(W, x) # B x P x E x F x N
# Finally, we just need to apply this matrix to the Wx which we have already
# computed, and done.
y = torch.matmul(Wx, S.reshape([1, 1, E, N, N]) * aij) # B x P x E x F x N
# And sum over all edges
return torch.sum(y, dim = 2) # B x P x F x N
class MaxLocalActivation(nn.Module):
# <NAME>, <EMAIL>, 2019/03/15
"""
MaxLocalActivation creates a localized activation function layer on graphs
Initialization:
MaxLocalActivation(K)
Inputs:
K (int): number of hops (>0)
Output:
torch.nn.Module for a localized max activation function layer
Add graph shift operator:
MaxLocalActivation.addGSO(GSO) Before applying the filter, we need to
define the GSO that we are going to use. This allows to change the GSO
while using the same filtering coefficients (as long as the number of
edge features is the same; but the number of nodes can change).
Inputs:
GSO (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
Forward call:
y = MaxLocalActivation(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x dim_features x number_nodes
Outputs:
y (torch.tensor): activated data; shape:
batch_size x dim_features x number_nodes
"""
def __init__(self, K):
super().__init__()
assert K > 0 # range has to be greater than 0
self.K = K
self.S = None # no GSO assigned yet
self.N = None # no GSO assigned yet (N learned from the GSO)
self.neighborhood = 'None' # no neighborhoods calculated yet
# Create parameters:
self.weight = nn.parameter.Parameter(torch.Tensor(1,self.K+1))
# Initialize parameters
self.reset_parameters()
def addGSO(self, S):
# Every S has 3 dimensions.
assert len(S.shape) == 3
# S is of shape E x N x N
self.N = S.shape[1]
assert S.shape[2] == self.N
self.S = S
# The neighborhood matrix has to be a tensor of shape
# nOutputNodes x maxNeighborhoodSize
neighborhood = []
maxNeighborhoodSizes = []
for k in range(1,self.K+1):
# For each hop (0,1,...) in the range K
thisNeighborhood = graphTools.computeNeighborhood(
np.array(self.S), k, outputType='matrix')
# compute the k-hop neighborhood
neighborhood.append(torch.tensor(thisNeighborhood))
maxNeighborhoodSizes.append(thisNeighborhood.shape[1])
self.maxNeighborhoodSizes = maxNeighborhoodSizes
self.neighborhood = neighborhood
def forward(self, x):
# x should be of shape batchSize x dimNodeSignals x N
batchSize = x.shape[0]
dimNodeSignals = x.shape[1]
assert x.shape[2] == self.N
# And given that the self.neighborhood is already a torch.tensor matrix
# we can just go ahead and get it.
# So, x is of shape B x F x N. But we need it to be of shape
# B x F x N x maxNeighbor. Why? Well, because we need to compute the
# maximum between the value of each node and those of its neighbors.
# And we do this by applying a torch.max across the rows (dim = 3) so
# that we end up again with a B x F x N, but having computed the max.
# How to fill those extra dimensions? Well, what we have is neighborhood
# matrix, and we are going to use torch.gather to bring the right
# values (torch.index_select, while more straightforward, only works
# along a single dimension).
# Each row of the matrix neighborhood determines all the neighbors of
# each node: the first row contains all the neighbors of the first node,
# etc.
# The values of the signal at those nodes are contained in the dim = 2
# of x. So, just for now, let's ignore the batch and feature dimensions
# and imagine we have a column vector: N x 1. We have to pick some of
# the elements of this vector and line them up alongside each row
# so that then we can compute the maximum along these rows.
# When we torch.gather along dimension 0, we are selecting which row to
# pick according to each column. Thus, if we have that the first row
# of the neighborhood matrix is [1, 2, 0] means that we want to pick
# the value at row 1 of x, at row 2 of x in the next column, and at row
# 0 of the last column. For these values to be the appropriate ones, we
# have to repeat x as columns to build our b x F x N x maxNeighbor
# matrix.
xK = x # xK is a tensor aggregating the 0-hop (x), 1-hop, ..., K-hop
# max's it is initialized with the 0-hop neigh. (x itself)
xK = xK.unsqueeze(3) # extra dimension added for concatenation ahead
x = x.unsqueeze(3) # B x F x N x 1
# And the neighbors that we need to gather are the same across the batch
# and feature dimensions, so we need to repeat the matrix along those
# dimensions
for k in range(1,self.K+1):
x_aux = x.repeat([1, 1, 1, self.maxNeighborhoodSizes[k-1]])
gatherNeighbor = self.neighborhood[k-1].reshape(
[1,
1,
self.N,
self.maxNeighborhoodSizes[k-1]]
)
gatherNeighbor = gatherNeighbor.repeat([batchSize,
dimNodeSignals,
1,
1])
# And finally we're in position of getting all the neighbors in line
xNeighbors = torch.gather(x_aux, 2, gatherNeighbor.long())
# B x F x nOutput x maxNeighbor
# Note that this gather function already reduces the dimension to
# nOutputNodes.
# And proceed to compute the maximum along this dimension
v, _ = torch.max(xNeighbors, dim = 3)
v = v.unsqueeze(3) # to concatenate with xK
xK = torch.cat((xK,v),3)
out = torch.matmul(xK,self.weight.unsqueeze(2))
# multiply each k-hop max by corresponding weight
out = out.reshape([batchSize,dimNodeSignals,self.N])
return out
def reset_parameters(self):
# Taken from _ConvNd initialization of parameters:
stdv = 1. / math.sqrt(self.K)
self.weight.data.uniform_(-stdv, stdv)
def extra_repr(self):
if self.neighborhood is not None:
reprString = "neighborhood stored"
else:
reprString = "NO neighborhood stored"
return reprString
class MedianLocalActivation(nn.Module):
# <NAME>, <EMAIL>, 2019/03/27
"""
MedianLocalActivation creates a localized activation function layer on
graphs
Initialization:
MedianLocalActivation(K)
Inputs:
K (int): number of hops (>0)
Output:
torch.nn.Module for a localized median activation function layer
Add graph shift operator:
MedianLocalActivation.addGSO(GSO) Before applying the filter, we need
to define the GSO that we are going to use. This allows to change the
GSO while using the same filtering coefficients (as long as the number
of edge features is the same; but the number of nodes can change).
This function also calculates the 0-,1-,...,K-hop neighborhoods of every
node
Inputs:
GSO (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
Forward call:
y = MedianLocalActivation(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x dim_features x number_nodes
Outputs:
y (torch.tensor): activated data; shape:
batch_size x dim_features x number_nodes
"""
def __init__(self, K):
super().__init__()
assert K > 0 # range has to be greater than 0
self.K = K
self.S = None # no GSO assigned yet
self.N = None # no GSO assigned yet (N learned from the GSO)
self.neighborhood = 'None' # no neighborhoods calculated yet
self.masks = 'None' # no mask yet
# Create parameters:
self.weight = nn.parameter.Parameter(torch.Tensor(1,self.K+1))
# Initialize parameters
self.reset_parameters()
def addGSO(self, S):
# Every S has 3 dimensions.
assert len(S.shape) == 3
# S is of shape E x N x N
self.N = S.shape[1]
assert S.shape[2] == self.N
self.S = S
# The neighborhood matrix has to be a tensor of shape
# nOutputNodes x maxNeighborhoodSize
neighborhood = []
for k in range(1,self.K+1):
# For each hop (0,1,...) in the range K
thisNeighborhood = graphTools.computeNeighborhood(
np.array(self.S), k, outputType='list')
# compute the k-hop neighborhood
neighborhood.append(thisNeighborhood)
self.neighborhood = neighborhood
def forward(self, x):
# x should be of shape batchSize x dimNodeSignals x N
batchSize = x.shape[0]
dimNodeSignals = x.shape[1]
assert x.shape[2] == self.N
xK = x # xK is a tensor aggregating the 0-hop (x), 1-hop, ..., K-hop
# max's
# It is initialized with the 0-hop neigh. (x itself)
xK = xK.unsqueeze(3) # extra dimension added for concatenation ahead
#x = x.unsqueeze(3) # B x F x N x 1
for k in range(1,self.K+1):
kHopNeighborhood = self.neighborhood[k-1]
# Fetching k-hop neighborhoods of all nodes
kHopMedian = torch.empty(0)
# Initializing the vector that will contain the k-hop median for
# every node
for n in range(self.N):
# Iterating over the nodes
# This step is necessary because here the neighborhoods are
# lists of lists. It is impossible to pad them and feed them as
# a matrix, as this would impact the outcome of the median
# operation
nodeNeighborhood = torch.tensor(np.array(kHopNeighborhood[n]))
neighborhoodLen = len(nodeNeighborhood)
gatherNode = nodeNeighborhood.reshape([1, 1, neighborhoodLen])
gatherNode = gatherNode.repeat([batchSize, dimNodeSignals, 1])
# Reshaping the node neighborhood for the gather operation
xNodeNeighbors = torch.gather(x, 2, gatherNode.long())
# Gathering signal values in the node neighborhood
nodeMedian,_ = torch.median(xNodeNeighbors, dim = 2,
keepdim=True)
# Computing the median in the neighborhood
kHopMedian = torch.cat([kHopMedian,nodeMedian],2)
# Concatenating k-hop medians node by node
kHopMedian = kHopMedian.unsqueeze(3) # Extra dimension for
# concatenation with the previous (k-1)-hop median tensor
xK = torch.cat([xK,kHopMedian],3)
out = torch.matmul(xK,self.weight.unsqueeze(2))
# Multiplying each k-hop median by corresponding trainable weight
out = out.reshape([batchSize,dimNodeSignals,self.N])
return out
def reset_parameters(self):
# Taken from _ConvNd initialization of parameters:
stdv = 1. / math.sqrt(self.K)
self.weight.data.uniform_(-stdv, stdv)
def extra_repr(self):
if self.neighborhood is not None:
reprString = "neighborhood stored"
else:
reprString = "NO neighborhood stored"
return reprString
class NoActivation(nn.Module):
"""
NoActivation creates an activation layer that does nothing
It is for completeness, to be able to switch between linear models
and nonlinear models, without altering the entire architecture model
Initialization:
NoActivation()
Output:
torch.nn.Module for an empty activation layer
Forward call:
y = NoActivation(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x dim_features x number_nodes
Outputs:
y (torch.tensor): activated data; shape:
batch_size x dim_features x number_nodes
"""
def __init__(self):
super().__init__()
def forward(self, x):
return x
def extra_repr(self):
reprString = "No Activation Function"
return reprString
class NoPool(nn.Module):
"""
This is a pooling layer that actually does no pooling. It has the same input
structure and methods of MaxPoolLocal() for consistency. Basically, this
allows us to change from pooling to no pooling without necessarily creating
a new architecture.
In any case, we're pretty sure this function should never ship, and pooling
can be avoided directly when defining the architecture.
"""
def __init__(self, nInputNodes, nOutputNodes, nHops):
super().__init__()
self.nInputNodes = nInputNodes
self.nOutputNodes = nOutputNodes
self.nHops = nHops
self.neighborhood = None
def addGSO(self, GSO):
# This is necessary to keep the form of the other pooling strategies
# within the SelectionGNN framework. But we do not care about any GSO.
pass
def forward(self, x):
# x should be of shape batchSize x dimNodeSignals x nInputNodes
assert x.shape[2] == self.nInputNodes
# Check that there are at least the same number of nodes that
# we will keep (otherwise, it would be unpooling, instead of
# pooling)
assert x.shape[2] >= self.nOutputNodes
# And do not do anything
return x
def extra_repr(self):
reprString = "in_dim=%d, out_dim=%d, number_hops = %d, " % (
self.nInputNodes, self.nOutputNodes, self.nHops)
reprString += "no neighborhood needed"
return reprString
class MaxPoolLocal(nn.Module):
"""
MaxPoolLocal Creates a pooling layer on graphs by selecting nodes
Initialization:
MaxPoolLocal(in_dim, out_dim, number_hops)
Inputs:
in_dim (int): number of nodes at the input
out_dim (int): number of nodes at the output
number_hops (int): number of hops to pool information
Output:
torch.nn.Module for a local max-pooling layer.
Observation: The selected nodes for the output are always the top ones.
Add a neighborhood set:
Add graph shift operator:
GraphFilter.addGSO(GSO) Before being used, we need to define the GSO
that will determine the neighborhood that we are going to pool.
Inputs:
GSO (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
Forward call:
v = MaxPoolLocal(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x dim_features x in_dim
Outputs:
y (torch.tensor): pooled data; shape:
batch_size x dim_features x out_dim
"""
def __init__(self, nInputNodes, nOutputNodes, nHops):
super().__init__()
self.nInputNodes = nInputNodes
self.nOutputNodes = nOutputNodes
self.nHops = nHops
self.neighborhood = None
def addGSO(self, S):
# Every S has 3 dimensions.
assert len(S.shape) == 3
# S is of shape E x N x N (And I don't care about E, because the
# computeNeighborhood function takes care of it)
self.N = S.shape[1]
assert S.shape[2] == self.N
# Get the device (before operating with S and losing it, it's cheaper
# to store the device now, than to duplicate S -i.e. keep a numpy and a
# tensor copy of S)
device = S.device
# Move the GSO to cpu and to np.array so it can be handled by the
# computeNeighborhood function
S = np.array(S.cpu())
# Compute neighborhood
neighborhood = graphTools.computeNeighborhood(S, self.nHops,
self.nOutputNodes,
self.nInputNodes,'matrix')
# And move the neighborhood back to a tensor
neighborhood = torch.tensor(neighborhood).to(device)
# The neighborhood matrix has to be a tensor of shape
# nOutputNodes x maxNeighborhoodSize
assert neighborhood.shape[0] == self.nOutputNodes
assert neighborhood.max() <= self.nInputNodes
# Store all the relevant information
self.maxNeighborhoodSize = neighborhood.shape[1]
self.neighborhood = neighborhood
def forward(self, x):
# x should be of shape batchSize x dimNodeSignals x nInputNodes
batchSize = x.shape[0]
dimNodeSignals = x.shape[1]
assert x.shape[2] == self.nInputNodes
# Check that there are at least the same number of nodes that
# we will keep (otherwise, it would be unpooling, instead of
# pooling)
assert x.shape[2] >= self.nOutputNodes
# And given that the self.neighborhood is already a torch.tensor matrix
# we can just go ahead and get it.
# So, x is of shape B x F x N. But we need it to be of shape
# B x F x N x maxNeighbor. Why? Well, because we need to compute the
# maximum between the value of each node and those of its neighbors.
# And we do this by applying a torch.max across the rows (dim = 3) so
# that we end up again with a B x F x N, but having computed the max.
# How to fill those extra dimensions? Well, what we have is neighborhood
# matrix, and we are going to use torch.gather to bring the right
# values (torch.index_select, while more straightforward, only works
# along a single dimension).
# Each row of the matrix neighborhood determines all the neighbors of
# each node: the first row contains all the neighbors of the first node,
# etc.
# The values of the signal at those nodes are contained in the dim = 2
# of x. So, just for now, let's ignore the batch and feature dimensions
# and imagine we have a column vector: N x 1. We have to pick some of
# the elements of this vector and line them up alongside each row
# so that then we can compute the maximum along these rows.
# When we torch.gather along dimension 0, we are selecting which row to
# pick according to each column. Thus, if we have that the first row
# of the neighborhood matrix is [1, 2, 0] means that we want to pick
# the value at row 1 of x, at row 2 of x in the next column, and at row
# 0 of the last column. For these values to be the appropriate ones, we
# have to repeat x as columns to build our b x F x N x maxNeighbor
# matrix.
x = x.unsqueeze(3) # B x F x N x 1
x = x.repeat([1, 1, 1, self.maxNeighborhoodSize]) # BxFxNxmaxNeighbor
# And the neighbors that we need to gather are the same across the batch
# and feature dimensions, so we need to repeat the matrix along those
# dimensions
gatherNeighbor = self.neighborhood.reshape([1, 1,
self.nOutputNodes,
self.maxNeighborhoodSize])
gatherNeighbor = gatherNeighbor.repeat([batchSize, dimNodeSignals, 1,1])
# And finally we're in position of getting all the neighbors in line
xNeighbors = torch.gather(x, 2, gatherNeighbor)
# B x F x nOutput x maxNeighbor
# Note that this gather function already reduces the dimension to
# nOutputNodes.
# And proceed to compute the maximum along this dimension
v, _ = torch.max(xNeighbors, dim = 3)
return v
def extra_repr(self):
reprString = "in_dim=%d, out_dim=%d, number_hops = %d, " % (
self.nInputNodes, self.nOutputNodes, self.nHops)
if self.neighborhood is not None:
reprString += "neighborhood stored"
else:
reprString += "NO neighborhood stored"
return reprString
class GraphFilter(nn.Module):
"""
GraphFilter Creates a (linear) layer that applies a graph filter
Initialization:
GraphFilter(in_features, out_features, filter_taps,
edge_features=1, bias=True)
Inputs:
in_features (int): number of input features (each feature is a graph
signal)
out_features (int): number of output features (each feature is a
graph signal)
filter_taps (int): number of filter taps
edge_features (int): number of features over each edge
bias (bool): add bias vector (one bias per feature) after graph
filtering
Output:
torch.nn.Module for a graph filtering layer (also known as graph
convolutional layer).
Observation: Filter taps have shape
out_features x edge_features x filter_taps x in_features
Add graph shift operator:
GraphFilter.addGSO(GSO) Before applying the filter, we need to define
the GSO that we are going to use. This allows to change the GSO while
using the same filtering coefficients (as long as the number of edge
features is the same; but the number of nodes can change).
Inputs:
GSO (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
Forward call:
y = GraphFilter(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x in_features x number_nodes
Outputs:
y (torch.tensor): output; shape:
batch_size x out_features x number_nodes
"""
def __init__(self, G, F, K, E = 1, bias = True):
# K: Number of filter taps
# GSOs will be added later.
# This combines both weight scalars and weight vectors.
# Bias will always be shared and scalar.
# Initialize parent
super().__init__()
# Save parameters:
self.G = G
self.F = F
self.K = K
self.E = E
self.S = None # No GSO assigned yet
# Create parameters:
self.weight = nn.parameter.Parameter(torch.Tensor(F, E, K, G))
if bias:
self.bias = nn.parameter.Parameter(torch.Tensor(F, 1))
else:
self.register_parameter('bias', None)
# Initialize parameters
self.reset_parameters()
def reset_parameters(self):
# Taken from _ConvNd initialization of parameters:
stdv = 1. / math.sqrt(self.G * self.K)
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def addGSO(self, S):
# Every S has 3 dimensions.
assert len(S.shape) == 3
# S is of shape E x N x N
assert S.shape[0] == self.E
self.N = S.shape[1]
assert S.shape[2] == self.N
self.S = S
def forward(self, x):
# x is of shape: batchSize x dimInFeatures x numberNodesIn
B = x.shape[0]
F = x.shape[1]
Nin = x.shape[2]
# And now we add the zero padding
if Nin < self.N:
x = torch.cat((x,
torch.zeros(B, F, self.N-Nin)\
.type(x.dtype).to(x.device)
), dim = 2)
# Compute the filter output
u = LSIGF(self.weight, self.S, x, self.bias)
# So far, u is of shape batchSize x dimOutFeatures x numberNodes
# And we want to return a tensor of shape
# batchSize x dimOutFeatures x numberNodesIn
# since the nodes between numberNodesIn and numberNodes are not required
if Nin < self.N:
u = torch.index_select(u, 2, torch.arange(Nin).to(u.device))
return u
def extra_repr(self):
reprString = "in_features=%d, out_features=%d, " % (
self.G, self.F) + "filter_taps=%d, " % (
self.K) + "edge_features=%d, " % (self.E) +\
"bias=%s, " % (self.bias is not None)
if self.S is not None:
reprString += "GSO stored"
else:
reprString += "no GSO stored"
return reprString
class GraphFilterRNN(nn.Module):
"""
GraphFilterRNN Creates a (linear) layer that applies a graph filter
with Hidden Markov Model
Initialization:
GraphFilterRNN(in_features, out_features, hidden_features, filter_taps,
edge_features=1, bias=True)
Inputs:
in_features (int): number of input features (each feature is a graph
signal)
out_features (int): number of output features (each feature is a
graph signal)
hidden_features (int): number of hidden features (each feature is a
graph signal)
filter_taps (int): number of filter taps
edge_features (int): number of features over each edge
bias (bool): add bias vector (one bias per feature) after graph
filtering
Output:
torch.nn.Module for a graph filtering layer (also known as graph
convolutional layer).
Observation: Filter taps have shape
out_features x edge_features x filter_taps x in_features
Add graph shift operator:
GraphFilter.addGSO(GSO) Before applying the filter, we need to define
the GSO that we are going to use. This allows to change the GSO while
using the same filtering coefficients (as long as the number of edge
features is the same; but the number of nodes can change).
Inputs:
GSO (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
Forward call:
y = GraphFilter(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x in_features x number_nodes
Outputs:
y (torch.tensor): output; shape:
batch_size x out_features x number_nodes
"""
def __init__(self, G, H, F, K, E=1, bias=True):
# K: Number of filter taps
# GSOs will be added later.
# This combines both weight scalars and weight vectors.
# Bias will always be shared and scalar.
# Initialize parent
super().__init__()
# Save parameters:
self.G = G # in_features
self.F = F # out_features
self.H = H # hidden_features
self.K = K # filter_taps
self.E = E # edge_features
self.S = None # No GSO assigned yet
# Create parameters:
self.weight_A = nn.parameter.Parameter(torch.Tensor(H, E, K, G))
self.weight_B = nn.parameter.Parameter(torch.Tensor(H, E, K, H))
self.weight_U = nn.parameter.Parameter(torch.Tensor(F, E, K, H))
if bias:
self.bias_A = nn.parameter.Parameter(torch.Tensor(H, 1))
self.bias_B = nn.parameter.Parameter(torch.Tensor(H, 1))
self.bias_U = nn.parameter.Parameter(torch.Tensor(F, 1))
else:
self.register_parameter('bias', None)
# Initialize parameters
self.reset_parameters()
def reset_parameters(self):
# Taken from _ConvNd initialization of parameters:
stdv_a = 1. / math.sqrt(self.G * self.K)
self.weight_A.data.uniform_(-stdv_a, stdv_a)
if self.bias_A is not None:
self.bias_A.data.uniform_(-stdv_a, stdv_a)
stdv_b = 1. / math.sqrt(self.H * self.K)
self.weight_B.data.uniform_(-stdv_b, stdv_b)
if self.bias_B is not None:
self.bias_B.data.uniform_(-stdv_b, stdv_b)
stdv_u = 1. / math.sqrt(self.H * self.K)
self.weight_U.data.uniform_(-stdv_u, stdv_u)
if self.bias_U is not None:
self.bias_U.data.uniform_(-stdv_u, stdv_u)
def addGSO(self, S):
# Every S has 3 dimensions.
assert len(S.shape) == 3
# S is of shape E x N x N
assert S.shape[0] == self.E
self.N = S.shape[1]
assert S.shape[2] == self.N
self.S = S
def forward(self, x, h):
# x is of shape: batchSize x dimInFeatures x numberNodesIn
B = x.shape[0]
F = x.shape[1]
Nin = x.shape[2]
# And now we add the zero padding
if Nin < self.N:
x = torch.cat((x,
torch.zeros(B, F, self.N - Nin) \
.type(x.dtype).to(x.device)
), dim=2)
# Compute the filter output
u_a = LSIGF(self.weight_A, self.S, x, self.bias_A)
u_b = LSIGF(self.weight_B, self.S, h, self.bias_B)
h = u_a + u_b
u = LSIGF(self.weight_U, self.S, h, self.bias_U)
# So far, u is of shape batchSize x dimOutFeatures x numberNodes
# And we want to return a tensor of shape
# batchSize x dimOutFeatures x numberNodesIn
# since the nodes between numberNodesIn and numberNodes are not required
if Nin < self.N:
u = torch.index_select(u, 2, torch.arange(Nin).to(u.device))
return u
def extra_repr(self):
reprString = "in_features=%d, out_features=%d, hidden_features=%d" % (
self.G, self.F, self.H) + "filter_taps=%d, " % (
self.K) + "edge_features=%d, " % (self.E) + \
"bias=%s, " % (self.bias is not None)
if self.S is not None:
reprString += "GSO stored"
else:
reprString += "no GSO stored"
return reprString
class SpectralGF(nn.Module):
"""
SpectralGF Creates a (linear) layer that applies a LSI graph filter in the
spectral domain using a cubic spline if needed.
Initialization:
GraphFilter(in_features, out_features, filter_coeff,
edge_features=1, bias=True)
Inputs:
in_features (int): number of input features (each feature is a graph
signal)
out_features (int): number of output features (each feature is a
graph signal)
filter_coeff (int): number of filter spectral coefficients
edge_features (int): number of features over each edge
bias (bool): add bias vector (one bias per feature) after graph
filtering
Output:
torch.nn.Module for a graph filtering layer (also known as graph
convolutional layer) implemented in the spectral domain.
Observation: Filter taps have shape
out_features x edge_features x in_features x filter_coeff
Add graph shift operator:
SpectralGF.addGSO(GSO) Before applying the filter, we need to define
the GSO that we are going to use. This allows to change the GSO while
using the same filtering coefficients (as long as the number of edge
features is the same; but the number of nodes can change).
Inputs:
GSO (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
Forward call:
y = SpectralGF(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x in_features x number_nodes
Outputs:
y (torch.tensor): output; shape:
batch_size x out_features x number_nodes
"""
def __init__(self, G, F, M, E = 1, bias = True):
# GSOs will be added later.
# Bias will always be shared and scalar.
# Initialize parent
super().__init__()
# Save parameters:
self.G = G
self.F = F
self.M = M
self.E = E
self.S = None # No GSO assigned yet
# Create parameters:
self.weight = nn.parameter.Parameter(torch.Tensor(F, E, G, M))
if bias:
self.bias = nn.parameter.Parameter(torch.Tensor(F, 1))
else:
self.register_parameter('bias', None)
# Initialize parameters
self.reset_parameters()
def reset_parameters(self):
# Taken from _ConvNd initialization of parameters:
stdv = 1. / math.sqrt(self.G * self.M)
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def addGSO(self, S):
# Every S has to have 3 dimensions.
assert len(S.shape) == 3
# S is of shape E x N x N
assert S.shape[0] == self.E
self.N = S.shape[1]
assert S.shape[2] == self.N
self.S = S # Save S
# Now we need to compute the eigendecomposition and save it
# To compute the eigendecomposition, we use numpy.
# So, first, get S in numpy format.
Snp = np.array(S.data.cpu())
# We will compute the eigendecomposition for each edge feature, so we
# create the E x N x N space for V, VH and Lambda (we need lambda for
# the spline kernel)
V = np.zeros([self.E, self.N, self.N])
VH = np.zeros([self.E, self.N, self.N])
Lambda = np.zeros([self.E, self.N])
# Here we save the resulting spline kernel matrix
splineKernel = np.zeros([self.E, self.N, self.M])
for e in range(self.E):
# Compute the eigendecomposition
Lambda[e,:], V[e,:,:] = np.linalg.eig(Snp[e,:,:])
# Compute the hermitian
VH[e,:,:] = V[e,:,:].conj().T
# Compute the splineKernel basis matrix
splineKernel[e,:,:] = graphTools.splineBasis(self.M, Lambda[e,:])
# Transform everything to tensors of appropriate type on appropriate
# device, and store them.
self.V = torch.tensor(V).type(S.dtype).to(S.device) # E x N x N
self.VH = torch.tensor(VH).type(S.dtype).to(S.device) # E x N x N
self.splineKernel = torch.tensor(splineKernel)\
.type(S.dtype).to(S.device)
# E x N x M
# Once we have computed the splineKernel, we do not need to save the
# eigenvalues.
def forward(self, x):
# x is of shape: batchSize x dimInFeatures x numberNodesIn
B = x.shape[0]
F = x.shape[1]
Nin = x.shape[2]
# Check if we have enough spectral filter coefficients as needed, or if
# we need to fill out the rest using the spline kernel.
if self.M == self.N:
self.h = self.weight # F x E x G x N (because N = M)
else:
# Adjust dimensions for proper algebraic matrix multiplication
splineKernel = self.splineKernel.reshape([1,self.E,self.N,self.M])
# We will multiply a 1 x E x N x M matrix with a F x E x M x G
# matrix to get the proper F x E x N x G coefficients
self.h = torch.matmul(splineKernel, self.weight.permute(0,1,3,2))
# And now we rearrange it to the same shape that the function takes
self.h = self.h.permute(0,1,3,2) # F x E x G x N
# And now we add the zero padding (if this comes from a pooling
# operation)
if Nin < self.N:
zeroPad = torch.zeros(B, F, self.N-Nin).type(x.dtype).to(x.device)
x = torch.cat((x, zeroPad), dim = 2)
# Compute the filter output
u = spectralGF(self.h, self.V, self.VH, x, self.bias)
# So far, u is of shape batchSize x dimOutFeatures x numberNodes
# And we want to return a tensor of shape
# batchSize x dimOutFeatures x numberNodesIn
# since the nodes between numberNodesIn and numberNodes are not required
if Nin < self.N:
u = torch.index_select(u, 2, torch.arange(Nin).to(u.device))
return u
def extra_repr(self):
reprString = "in_features=%d, out_features=%d, " % (
self.G, self.F) + "filter_taps=%d, " % (
self.K) + "edge_features=%d, " % (self.E) +\
"bias=%s, " % (self.bias is not None)
if self.S is not None:
reprString += "GSO stored"
else:
reprString += "no GSO stored"
return reprString
class NodeVariantGF(nn.Module):
"""
NodeVariantGF Creates a filtering layer that applies a node-variant graph
filter
Initialization:
NodeVariantGF(in_features, out_features, shift_taps, node_taps
edge_features=1, bias=True)
Inputs:
in_features (int): number of input features (each feature is a graph
signal)
out_features (int): number of output features (each feature is a
graph signal)
shift_taps (int): number of filter taps for shifts
node_taps (int): number of filter taps for nodes
edge_features (int): number of features over each edge
bias (bool): add bias vector (one bias per feature) after graph
filtering
Output:
torch.nn.Module for a graph filtering layer using node-variant graph
filters.
Observation: Filter taps have shape
out_features x edge_features x shift_taps x in_features x node_taps
Add graph shift operator:
NodeVariantGF.addGSO(GSO) Before applying the filter, we need to define
the GSO that we are going to use. This allows to change the GSO while
using the same filtering coefficients (as long as the number of edge
features is the same; but the number of nodes can change).
Inputs:
GSO (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
Forward call:
y = NodeVariantGF(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x in_features x number_nodes
Outputs:
y (torch.tensor): output; shape:
batch_size x out_features x number_nodes
"""
def __init__(self, G, F, K, M, E = 1, bias = True):
# G: Number of input features
# F: Number of output features
# K: Number of filter shift taps
# M: Number of filter node taps
# GSOs will be added later.
# Bias will always be shared and scalar.
# Initialize parent
super().__init__()
# Save parameters:
self.G = G
self.F = F
self.K = K
self.M = M
self.E = E
self.S = None # No GSO assigned yet
# Create parameters:
self.weight = nn.parameter.Parameter(torch.Tensor(F, E, K, G, M))
if bias:
self.bias = nn.parameter.Parameter(torch.Tensor(F, 1))
else:
self.register_parameter('bias', None)
# Initialize parameters
self.reset_parameters()
def reset_parameters(self):
# Taken from _ConvNd initialization of parameters:
stdv = 1. / math.sqrt(self.G * self.K * self.M)
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def addGSO(self, S):
# Every S has 3 dimensions.
assert len(S.shape) == 3
# S is of shape E x N x N
assert S.shape[0] == self.E
self.N = S.shape[1]
assert S.shape[2] == self.N
self.S = S
npS = np.array(S.data.cpu()) # Save the GSO as a numpy array because we
# are going to compute the neighbors.
# And now we have to fill up the parameter vector, from M to N
if self.M < self.N:
# The first elements of M (ordered with whatever order we want)
# are the ones associated to independent node taps.
copyNodes = [m for m in range(self.M)]
# The rest of the nodes will copy one of these M node taps.
# The way we do this is: if they are connected to one of the M
# indepdendent nodes, just copy it. If they are not connected,
# look at the neighbors, neighbors, and so on, until we reach one
# of the independent nodes.
# Ties are broken by selecting the node with the smallest index
# (which, due to the ordering, is the most important node of all
# the available ones)
neighborList = graphTools.computeNeighborhood(npS, 1,
nb = self.M)
# This gets the list of 1-hop neighbors for all nodes.
# Find the nodes that have no neighbors
nodesWithNoNeighbors = [n for n in range(self.N) \
if len(neighborList[n]) == 0]
# If there are still nodes that didn't find a neighbor
K = 1 # K-hop neighbor we have looked so far
while len(nodesWithNoNeighbors) > 0:
# Looks for the next hop
K += 1
# Get the neigbors one further hop away
thisNeighborList = graphTools.computeNeighborhood(npS,
K,
nb = self.M)
# Check if we now have neighbors for those that didn't have
# before
for n in nodesWithNoNeighbors:
# Get the neighbors of the node
thisNodeList = thisNeighborList[n]
# If there are neighbors
if len(thisNodeList) > 0:
# Add them to the list
neighborList[n] = thisNodeList
# Recheck if all nodes have non-empty neighbors
nodesWithNoNeighbors = [n for n in range(self.N) \
if len(neighborList[n]) == 0]
# Now we have obtained the list of independent nodes connected to
# all nodes, we keep the one with highest score. And since the
# matrix is already properly ordered, this means keeping the
# smallest index in the neighborList.
for m in range(self.M, self.N):
copyNodes.append(min(neighborList[m]))
# And, finally create the indices of nodes to copy
self.copyNodes = torch.tensor(copyNodes).to(S.device)
elif self.M == self.N:
# In this case, all parameters go into the vector h
self.copyNodes = torch.arange(self.M).to(S.device)
else:
# This is the rare case in which self.M < self.N, for example, if
# we train in a larger network and deploy in a smaller one. Since
# the matrix is ordered by score, we just keep the first N
# weights
self.copyNodes = torch.arange(self.N).to(S.device)
# OBS.: self.weight is updated on each training step, so we cannot
# define the self.h vector (i.e. the vector with N elements) here,
# because otherwise it wouldn't be updated every time. So we need, in
# the for, to use index_select on the actual weights, to create the
# vector h that is later feed into the NVGF computation.
def forward(self, x):
# x is of shape: batchSize x dimInFeatures x numberNodesIn
B = x.shape[0]
F = x.shape[1]
Nin = x.shape[2]
# If we have less filter coefficients than the required ones, we need
# to use the copying scheme
if self.M == self.N:
self.h = self.weight
else:
self.h = torch.index_select(self.weight, 4, self.copyNodes)
# And now we add the zero padding
if Nin < self.N:
zeroPad = torch.zeros(B, F, self.N-Nin).type(x.dtype).to(x.device)
x = torch.cat((x, zeroPad), dim = 2)
# Compute the filter output
u = NVGF(self.h, self.S, x, self.bias)
# So far, u is of shape batchSize x dimOutFeatures x numberNodes
# And we want to return a tensor of shape
# batchSize x dimOutFeatures x numberNodesIn
# since the nodes between numberNodesIn and numberNodes are not required
if Nin < self.N:
u = torch.index_select(u, 2, torch.arange(Nin).to(u.device))
return u
def extra_repr(self):
reprString = "in_features=%d, out_features=%d, " % (
self.G, self.F) + "shift_taps=%d, node_taps=%d, " % (
self.K, self.M) + "edge_features=%d, " % (self.E) +\
"bias=%s, " % (self.bias is not None)
if self.S is not None:
reprString += "GSO stored"
else:
reprString += "no GSO stored"
return reprString
class EdgeVariantGF(nn.Module):
"""
EdgeVariantGF Creates a (linear) layer that applies an edge-variant graph
filter using the masking approach. If less nodes than the total number
of nodes are selected, then the remaining nodes adopt an LSI filter
(i.e. it becomes a hybrid edge-variant grpah filter)
Initialization:
EdgeVariantGF(in_features, out_features, shift_taps,
selected_nodes, number_nodes,
edge_features=1, bias=True)
Inputs:
in_features (int): number of input features (each feature is a graph
signal)
out_features (int): number of output features (each feature is a
graph signal)
shift_taps (int): number of shifts to consider
selected_nodes (int): number of selected nodes to implement the EV
part of the filter
number_nodes (int): number of nodes
edge_features (int): number of features over each edge
bias (bool): add bias vector (one bias per feature) after graph
filtering
Output:
torch.nn.Module for a graph filtering layer using hybrid
edge-variant graph filters.
Observation: Filter taps have shape
out_features x edge_features x shift_taps x in_features
x number_nodes x number_nodes
These weights are masked by the corresponding sparsity pattern of
the graph and the desired number of selected nodes, so only weights
in the nonzero edges of these nodes will be trained, the
rest of the parameters contain trash. Therefore, the number of
parameters will not reflect the actual number of parameters being
trained.
Add graph shift operator:
EdgeVariantGF.addGSO(GSO) Before applying the filter, we need to define
the GSO that we are going to use. This allows to change the GSO while
using the same filtering coefficients (as long as the number of edge
features is the same; but the number of nodes can change).
Inputs:
GSO (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
Forward call:
y = EdgeVariantGF(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x in_features x number_nodes
Outputs:
y (torch.tensor): output; shape:
batch_size x out_features x number_nodes
"""
def __init__(self, G, F, K, M, N, E=1, bias = True):
# Initialize parent
super().__init__()
# Save parameters:
self.G = G
self.F = F
self.K = K
self.E = E
self.M = M # Number of selected nodes
self.N = N # Total number of nodes
self.S = None
# Create parameters for the Edge-Variant part:
self.weightEV = nn.parameter.Parameter(torch.Tensor(F, E, K, G, N, N))
# If we want a hybrid, create parameters
if self.M < self.N:
self.weightLSI = nn.parameter.Parameter(torch.Tensor(F, E, K, G))
else:
self.register_parameter('weightLSI', None)
if bias:
self.bias = nn.parameter.Parameter(torch.Tensor(F, 1))
else:
self.register_parameter('bias', None)
# Initialize parameters
self.reset_parameters()
def reset_parameters(self):
# Taken from _ConvNd initialization of parameters:
stdv = 1. / math.sqrt(self.G * self.K * self.N)
self.weightEV.data.uniform_(-stdv, stdv)
if self.weightLSI is not None:
self.weightLSI.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def addGSO(self, S):
# Every S has 3 dimensions.
assert len(S.shape) == 3
# S is of shape E x N x N
assert S.shape[0] == self.E
self.N = S.shape[1]
assert S.shape[2] == self.N
self.S = S # Save the GSO
# Get the identity matrix across all edge features
multipleIdentity = torch.eye(self.N).reshape([1, self.N, self.N])\
.repeat(self.E, 1, 1).to(S.device)
# Compute the nonzero elements of S+I_{N}
sparsityPattern = ((torch.abs(S) + multipleIdentity) > zeroTolerance)
# Change from byte tensors to float tensors (or the same type of data as
# the GSO)
sparsityPattern = sparsityPattern.type(S.dtype)
# But now we need to kill everything that is between elements M and N
# (only if M < N)
if self.M < self.N:
# Create the ones in the row
hybridMaskOnesRows = torch.ones([self.M, self.N])
# Create the ones int he columns
hybridMaskOnesCols = torch.ones([self.N - self.M, self.M])
# Create the zeros
hybridMaskZeros = torch.zeros([self.N - self.M, self.N - self.M])
# Concatenate the columns
hybridMask = torch.cat((hybridMaskOnesCols,hybridMaskZeros), dim=1)
# Concatenate the rows
hybridMask = torch.cat((hybridMaskOnesRows,hybridMask), dim=0)
else:
hybridMask = torch.ones([self.N, self.N])
# Now that we have the hybrid mask, we need to mask the sparsityPattern
# we got so far
hybridMask = hybridMask.reshape([1, self.N, self.N]).to(S.device)
# 1 x N x N
sparsityPattern = sparsityPattern * hybridMask
self.sparsityPattern = sparsityPattern.to(S.device)
# E x N x N
# This gives the sparsity pattern for each edge feature
# Now, let's create it of the right shape, so we do not have to go
# around wasting time with reshapes when called in the forward
# The weights have shape F x E x K x G x N x N
# The sparsity pattern has shape E x N x N. And we want to make it
# 1 x E x K x 1 x N x N. The K dimension is to guarantee that for k=0
# we have the identity
multipleIdentity = (multipleIdentity * hybridMask)\
.reshape([1, self.E, 1, 1, self.N, self.N])
# This gives a 1 x E x 1 x 1 x N x N identity matrix
sparsityPattern = sparsityPattern\
.reshape([1, self.E, 1, 1, self.N, self.N])
# This gives a 1 x E x 1 x 1 x N x N sparsity pattern matrix
sparsityPattern = sparsityPattern.repeat(1, 1, self.K-1, 1, 1, 1)
# This repeats the sparsity pattern K-1 times giving a matrix of shape
# 1 x E x (K-1) x 1 x N x N
sparsityPattern = torch.cat((multipleIdentity,sparsityPattern), dim = 2)
# This sholud give me a 1 x E x K x 1 x N x N matrix with the identity
# in the first element
self.sparsityPatternFull = sparsityPattern.type(S.dtype).to(S.device)
def forward(self, x):
# x is of shape: batchSize x dimInFeatures x numberNodesIn
B = x.shape[0]
F = x.shape[1]
Nin = x.shape[2]
# Mask the parameters
self.Phi = self.weightEV * self.sparsityPatternFull
# And now we add the zero padding
if Nin < self.N:
zeroPad = torch.zeros(B, F, self.N-Nin).type(x.dtype).to(x.device)
x = torch.cat((x, zeroPad), dim = 2)
# Compute the filter output for the EV part
uEV = EVGF(self.Phi, x, self.bias)
# Check if we need an LSI part
if self.M < self.N:
# Compute the filter output for the LSI part
uLSI = LSIGF(self.weightLSI, self.S, x, self.bias)
else:
# If we don't, just add zero
uLSI = torch.tensor(0., dtype = uEV.dtype).to(uEV.device)
# Add both
u = uEV + uLSI
# So far, u is of shape batchSize x dimOutFeatures x numberNodes
# And we want to return a tensor of shape
# batchSize x dimOutFeatures x numberNodesIn
# since the nodes between numberNodesIn and numberNodes are not required
if Nin < self.N:
u = torch.index_select(u, 2, torch.arange(Nin).to(u.device))
return u
def extra_repr(self):
reprString = "in_features=%d, out_features=%d, " % (
self.G, self.F) + "shift_taps=%d, " % (
self.K) + \
"selected_nodes=%d, " % (self.M) +\
"number_nodes=%d, " % (self.N) +\
"edge_features=%d, " % (self.E) +\
"bias=%s, " % (self.bias is not None)
if self.S is not None:
reprString += "GSO stored"
else:
reprString += "no GSO stored"
return reprString
class GraphAttentional(nn.Module):
"""
GraphAttentional Creates a graph attentional layer
Initialization:
GraphAttentional(in_features, out_features, attention_heads,
edge_features=1, nonlinearity=nn.functional.relu,
concatenate=True)
Inputs:
in_features (int): number of input features on top of each node
out_features (int): number of output features on top of each node
attention_heads (int): number of attention_heads
edge_features (int): number of features on top of each edge
(default: 1)
nonlinearity (nn.functional): nonlinearity applied after features
have been updated through attention (default:nn.functional.relu)
concatenate (bool): If True, the output of the attention_heads
attention heads are concatenated to form the output features, if
False, they are averaged (default: True)
Output:
torch.nn.Module for a graph attentional layer.
Add graph shift operator:
GraphAttentional.addGSO(GSO) Before applying the filter, we need to
define the GSO that we are going to use. This allows to change the GSO
while using the same filtering coefficients (as long as the number of
edge features is the same; but the number of nodes can change).
Inputs:
GSO (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
Forward call:
y = GraphAttentional(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x in_features x number_nodes
Outputs:
y (torch.tensor): output; shape:
batch_size x out_features x number_nodes
"""
def __init__(self, G, F, K, E = 1,
nonlinearity = nn.functional.relu, concatenate = True):
# K: Number of filter taps
# GSOs will be added later.
# This combines both weight scalars and weight vectors.
# Initialize parent
super().__init__()
# Save parameters:
self.G = G
self.F = F
self.K = K
self.E = E
self.S = None # No GSO assigned yet
self.nonlinearity = nonlinearity
self.concatenate = concatenate
# Create parameters:
self.mixer = nn.parameter.Parameter(torch.Tensor(K, E, 2*F))
self.weight = nn.parameter.Parameter(torch.Tensor(K, E, F, G))
# Initialize parameters
self.reset_parameters()
def reset_parameters(self):
# Taken from _ConvNd initialization of parameters:
stdv = 1. / math.sqrt(self.G * self.K)
self.weight.data.uniform_(-stdv, stdv)
self.mixer.data.uniform_(-stdv, stdv)
def addGSO(self, S):
# Every S has 3 dimensions.
assert len(S.shape) == 3
# S is of shape E x N x N
assert S.shape[0] == self.E
self.N = S.shape[1]
assert S.shape[2] == self.N
self.S = S
def forward(self, x):
# x is of shape: batchSize x dimInFeatures x numberNodesIn
B = x.shape[0]
F = x.shape[1]
Nin = x.shape[2]
# And now we add the zero padding
if Nin < self.N:
x = torch.cat((x,
torch.zeros(B, F, self.N-Nin)\
.type(x.dtype).to(x.device)
), dim = 2)
# And get the graph attention output
y = graphAttention(x, self.mixer, self.weight, self.S)
# This output is of size B x K x F x N. Now, we can either concatenate
# them (inner layers) or average them (outer layer)
if self.concatenate:
# When we concatenate we first apply the nonlinearity
y = self.nonlinearity(y)
# Concatenate: Make it B x KF x N such that first iterates over f
# and then over k: (k=0,f=0), (k=0,f=1), ..., (k=0,f=F-1), (k=1,f=0),
# (k=1,f=1), ..., etc.
y = y.permute(0, 3, 1, 2)\
.reshape([B, self.N, self.K*self.F])\
.permute(0, 2, 1)
else:
# When we don't, we first average
y = torch.mean(y, dim = 1) # B x F x N
# And then we apply the nonlinearity
y = self.nonlinearity(y)
if Nin < self.N:
y = torch.index_select(y, 2, torch.arange(Nin).to(y.device))
return y
def extra_repr(self):
reprString = "in_features=%d, out_features=%d, " % (
self.G, self.F) + "attention_heads=%d, " % (
self.K) + "edge_features=%d, " % (self.E)
if self.S is not None:
reprString += "GSO stored: number_nodes=%d" % (self.N)
else:
reprString += "no GSO stored"
return reprString
def matrixPowersBatch(S, K):
"""
matrixPowers(A_b, K) Computes the matrix powers A_b^k for k = 0, ..., K-1
for each A_b in b = 1, ..., B.
Inputs:
A (tensor): Matrices to compute powers. It can be either a single matrix
per batch element: shape batch_size x number_nodes x number_nodes
or contain edge features: shape
batch_size x edge_features x number_nodes x number_nodes
K (int): maximum power to be computed (up to K-1)
Outputs:
AK: either a collection of K matrices B x K x N x N (if the input was a
single matrix) or a collection B x E x K x N x N (if the input was a
collection of E matrices).
"""
# S can be either a single GSO (N x N) or a collection of GSOs (E x N x N)
if len(S.shape) == 3:
B = S.shape[0]
N = S.shape[1]
assert S.shape[2] == N
E = 1
S = S.unsqueeze(1)
scalarWeights = True
elif len(S.shape) == 4:
B = S.shape[0]
E = S.shape[1]
N = S.shape[2]
assert S.shape[3] == N
scalarWeights = False
# Now, let's build the powers of S:
thisSK = torch.eye(N).repeat([B, E, 1, 1]).to(S.device)
SK = thisSK.unsqueeze(2)
for k in range(1, K):
thisSK = torch.matmul(thisSK, S)
SK = torch.cat((SK, thisSK.unsqueeze(2)), dim=2)
# Take out the first dimension if it was a single GSO
if scalarWeights:
SK = SK.squeeze(1)
return SK
def batchLSIGF(h, SK, x, bias=None):
"""
batchLSIGF(filter_taps, GSO_K, input, bias=None) Computes the output of a
linear shift-invariant graph filter on input and then adds bias.
In this case, we consider that there is a separate GSO to be used for each
of the signals in the batch. In other words, SK[b] is applied when filtering
x[b] as opposed to applying the same SK to all the graph signals in the
batch.
Inputs:
filter_taps: vector of filter taps; size:
output_features x edge_features x filter_taps x input_features
GSO_K: collection of matrices; size:
batch_size x edge_features x filter_taps x number_nodes x number_nodes
input: input signal; size:
batch_size x input_features x number_nodes
bias: size: output_features x number_nodes
if the same bias is to be applied to all nodes, set number_nodes = 1
so that b_{f} vector becomes b_{f} \mathbf{1}_{N}
Outputs:
output: filtered signals; size:
batch_size x output_features x number_nodes
"""
# Get the parameter numbers:
F = h.shape[0]
E = h.shape[1]
K = h.shape[2]
G = h.shape[3]
B = SK.shape[0]
assert SK.shape[1] == E
assert SK.shape[2] == K
N = SK.shape[3]
assert SK.shape[4] == N
assert x.shape[0] == B
assert x.shape[1] == G
assert x.shape[2] == N
# Or, in the notation I've been using:
# h in F x E x K x G
# SK in B x E x K x N x N
# x in B x G x N
# b in F x N
# y in B x F x N
SK = SK.permute(1, 2, 0, 3, 4)
# Now, SK is of shape E x K x B x N x N so that we can multiply by x of
# size B x G x N to get
z = torch.matmul(x, SK)
# which is of size E x K x B x G x N.
# Now, we have already carried out the multiplication across the dimension
# of the nodes. Now we need to focus on the K, F, G.
# Let's start by putting B and N in the front
z = z.permute(2, 4, 0, 1, 3).reshape([B, N, E * K * G])
# so that we get z in B x N x EKG.
# Now adjust the filter taps so they are of the form EKG x F
h = h.reshape([F, G * E * K]).permute(1, 0)
# Multiply
y = torch.matmul(z, h)
# to get a result of size B x N x F. And permute
y = y.permute(0, 2, 1)
# to get it back in the right order: B x F x N.
# Now, in this case, each element x[b,:,:] has adequately been filtered by
# the GSO S[b,:,:,:]
if bias is not None:
y = y + bias
return y
class GraphFilterBatchGSO(GraphFilter):
"""
GraphFilterBatchGSO Creates a (linear) layer that applies a graph filter
with a different GSO for each signal in the batch.
This function is typically useful when not only the graph signal is changed
during training, but also the GSO. That is, each data point in the batch is
of the form (x_b,S_b) for b = 1,...,B instead of just x_b. The filter
coefficients are still the same being applied to all graph filters, but both
the GSO and the graph signal are different for each datapoint in the batch.
Initialization:
GraphFilterBatchGSO(in_features, out_features, filter_taps,
edge_features=1, bias=True)
Inputs:
in_features (int): number of input features (each feature is a graph
signal)
out_features (int): number of output features (each feature is a
graph signal)
filter_taps (int): number of filter taps
edge_features (int): number of features over each edge
bias (bool): add bias vector (one bias per feature) after graph
filtering
Output:
torch.nn.Module for a graph filtering layer (also known as graph
convolutional layer).
Observation: Filter taps have shape
out_features x edge_features x filter_taps x in_features
Add graph shift operator:
GraphFilterBatchGSO.addGSO(GSO) Before applying the filter, we need to
define the GSOs that we are going to use for each element of the batch.
Each GSO has to have the same number of edges, but the number of nodes
can change.
Inputs:
GSO (tensor): collection of graph shift operators; size can be
batch_size x number_nodes x number_nodes, or
batch_size x edge_features x number_nodes x number_nodes
Forward call:
y = GraphFilterBatchGSO(x)
Inputs:
x (tensor): input data; size: batch_size x in_features x number_nodes
Outputs:
y (tensor): output; size: batch_size x out_features x number_nodes
"""
def __init__(self, G, F, K, E=1, bias=True):
# K: Number of filter taps
# GSOs will be added later.
# This combines both weight scalars and weight vectors.
# Bias will always be shared and scalar.
# Initialize parent
super().__init__(G, F, K, E, bias)
def addGSO(self, S):
# So, we have to take into account the situation where S is either
# B x N x N or B x E x N x N. No matter what, we're always handling,
# internally the dimension E. So if the input is B x N x N, we have to
# unsqueeze it so it becomes B x 1 x N x N.
if len(S.shape) == 3 and S.shape[1] == S.shape[2]:
self.S = S.unsqueeze(1)
elif len(S.shape) == 4 and S.shape[1] == self.E \
and S.shape[2] == S.shape[3]:
self.S = S
else:
# TODO: print error
pass
self.N = self.S.shape[2]
self.B = self.S.shape[0]
self.SK = matrixPowersBatch(self.S, self.K)
def forward(self, x):
# TODO: If S (and consequently SK) hasn't been defined, print an error.
return batchLSIGF(self.weight, self.SK, x, self.bias)
def extra_repr(self):
reprString = "in_features=%d, out_features=%d, " % (
self.G, self.F) + "filter_taps=%d, " % (
self.K) + "edge_features=%d, " % (self.E) + \
"bias=%s, " % (self.bias is not None)
if self.S is not None:
reprString += "GSO stored: number_nodes=%d, batch_size=%d" % (
self.N, self.B)
else:
reprString += "no GSO stored"
return reprString
def BatchLSIGF(h, S, x, b=None):
"""
LSIGF(filter_taps, GSO, input, bias=None) Computes the output of a linear
shift-invariant graph filter on input and then adds bias.
Denote as G the number of input features, F the number of output features,
E the number of edge features, K the number of filter taps, N the number of
nodes, S_{e} in R^{N x N} the GSO for edge feature e, x in R^{G x N} the
input data where x_{g} in R^{N} is the graph signal representing feature
g, and b in R^{F x N} the bias vector, with b_{f} in R^{N} representing the
bias for feature f.
Then, the LSI-GF is computed as
y_{f} = \sum_{e=1}^{E}
\sum_{k=0}^{K-1}
\sum_{g=1}^{G}
[h_{f,g,e}]_{k} S_{e}^{k} x_{g}
+ b_{f}
for f = 1, ..., F.
Inputs:
filter_taps (torch.tensor): array of filter taps; shape:
output_features x edge_features x filter_taps x input_features
GSO (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
input (torch.tensor): input signal; shape:
batch_size x input_features x number_nodes
bias (torch.tensor): shape: output_features x number_nodes
if the same bias is to be applied to all nodes, set number_nodes = 1
so that b_{f} vector becomes b_{f} \mathbf{1}_{N}
Outputs:
output: filtered signals; shape:
batch_size x output_features x number_nodes
"""
# The basic idea of what follows is to start reshaping the input and the
# GSO so the filter coefficients go just as a very plain and simple
# linear operation, so that all the derivatives and stuff on them can be
# easily computed.
# h is output_features x edge_weights x filter_taps x input_features
# S is edge_weighs x number_nodes x number_nodes
# x is batch_size x input_features x number_nodes
# b is output_features x number_nodes
# Output:
# y is batch_size x output_features x number_nodes
# Get the parameter numbers:
F = h.shape[0]
E = h.shape[1]
K = h.shape[2]
G = h.shape[3]
assert S.shape[1] == E
N = S.shape[2]
assert S.shape[3] == N
B = x.shape[0]
assert x.shape[1] == G
assert x.shape[2] == N
# Or, in the notation we've been using:
# h in F x E x K x G
# S in B x E x N x N
# x in B x G x N
# b in F x N
# y in B x F x N
# Now, we have x in B x G x N and S in B x E x N x N, and we want to come up
# with matrix multiplication that yields z = x * S with shape
# B x E x K x G x N.
# For this, we first add the corresponding dimensions
x = x.reshape([B, 1, G, N])
# print(S)
S = S.reshape([B, E, N, N])
z = x.reshape([B, 1, 1, G, N]).repeat(1, E, 1, 1, 1) # This is for k = 0
# We need to repeat along the E dimension, because for k=0, S_{e} = I for
# all e, and therefore, the same signal values have to be used along all
# edge feature dimensions.
for k in range(1,K):
x = torch.matmul(x, S.float()) # B x E x G x N
xS = x.reshape([B, E, 1, G, N]) # B x E x 1 x G x N
z = torch.cat((z, xS), dim = 2) # B x E x k x G x N
# This output z is of size B x E x K x G x N
# Now we have the x*S_{e}^{k} product, and we need to multiply with the
# filter taps.
# We multiply z on the left, and h on the right, the output is to be
# B x N x F (the multiplication is not along the N dimension), so we reshape
# z to be B x N x E x K x G and reshape it to B x N x EKG (remember we
# always reshape the last dimensions), and then make h be E x K x G x F and
# reshape it to EKG x F, and then multiply
y = torch.matmul(z.permute(0, 4, 1, 2, 3).reshape([B, N, E*K*G]),
h.reshape([F, E*K*G]).permute(1, 0)).permute(0, 2, 1)
# And permute againt to bring it from B x N x F to B x F x N.
# Finally, add the bias
if b is not None:
y = y + b
return y
class GraphFilterBatch(nn.Module):
"""
GraphFilter Creates a (linear) layer that applies a graph filter
Initialization:
GraphFilter(in_features, out_features, filter_taps,
edge_features=1, bias=True)
Inputs:
in_features (int): number of input features (each feature is a graph
signal)
out_features (int): number of output features (each feature is a
graph signal)
filter_taps (int): number of filter taps
edge_features (int): number of features over each edge
bias (bool): add bias vector (one bias per feature) after graph
filtering
Output:
torch.nn.Module for a graph filtering layer (also known as graph
convolutional layer).
Observation: Filter taps have shape
out_features x edge_features x filter_taps x in_features
Add graph shift operator:
GraphFilter.addGSO(GSO) Before applying the filter, we need to define
the GSO that we are going to use. This allows to change the GSO while
using the same filtering coefficients (as long as the number of edge
features is the same; but the number of nodes can change).
Inputs:
GSO (torch.tensor): graph shift operator; shape:
Batch edge_features x number_nodes x number_nodes
Forward call:
y = GraphFilter(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x in_features x number_nodes
Outputs:
y (torch.tensor): output; shape:
batch_size x out_features x number_nodes
"""
def __init__(self, G, F, K, E = 1, bias = True):
# K: Number of filter taps
# GSOs will be added later.
# This combines both weight scalars and weight vectors.
# Bias will always be shared and scalar.
# Initialize parent
super().__init__()
# Save parameters:
self.G = G
self.F = F
self.K = K
self.E = E
self.S = None # No GSO assigned yet
# Create parameters:
self.weight = nn.parameter.Parameter(torch.Tensor(F, E, K, G))
if bias:
self.bias = nn.parameter.Parameter(torch.Tensor(F, 1))
else:
self.register_parameter('bias', None)
# Initialize parameters
self.reset_parameters()
def reset_parameters(self):
# Taken from _ConvNd initialization of parameters:
stdv = 1. / math.sqrt(self.G * self.K)
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def addGSO(self, S):
# Every S has 4 dimensions.
assert len(S.shape) == 4
# S is of shape B x E x N x N
assert S.shape[1] == self.E
self.N = S.shape[2]
assert S.shape[3] == self.N
self.S = S
def forward(self, x):
# x is of shape: batchSize x dimInFeatures x numberNodesIn
B = x.shape[0]
F = x.shape[1]
Nin = x.shape[2]
# And now we add the zero padding
if Nin < self.N:
x = torch.cat((x,
torch.zeros(B, F, self.N-Nin)\
.type(x.dtype).to(x.device)
), dim = 2)
# Compute the filter output
u = BatchLSIGF(self.weight, self.S, x, self.bias)
# So far, u is of shape batchSize x dimOutFeatures x numberNodes
# And we want to return a tensor of shape
# batchSize x dimOutFeatures x numberNodesIn
# since the nodes between numberNodesIn and numberNodes are not required
if Nin < self.N:
u = torch.index_select(u, 2, torch.arange(Nin).to(u.device))
return u
def extra_repr(self):
reprString = "in_features=%d, out_features=%d, " % (
self.G, self.F) + "filter_taps=%d, " % (
self.K) + "edge_features=%d, " % (self.E) +\
"bias=%s, " % (self.bias is not None)
if self.S is not None:
reprString += "GSO stored"
else:
reprString += "no GSO stored"
return reprString
class GraphFilterRNNBatch(nn.Module):
"""
GraphFilter Creates a (linear) layer that applies a graph filter
Initialization:
GraphFilter(in_features, out_features, filter_taps,
edge_features=1, bias=True)
Inputs:
in_features (int): number of input features (each feature is a graph
signal)
out_features (int): number of output features (each feature is a
graph signal)
filter_taps (int): number of filter taps
edge_features (int): number of features over each edge
bias (bool): add bias vector (one bias per feature) after graph
filtering
Output:
torch.nn.Module for a graph filtering layer (also known as graph
convolutional layer).
Observation: Filter taps have shape
out_features x edge_features x filter_taps x in_features
Add graph shift operator:
GraphFilter.addGSO(GSO) Before applying the filter, we need to define
the GSO that we are going to use. This allows to change the GSO while
using the same filtering coefficients (as long as the number of edge
features is the same; but the number of nodes can change).
Inputs:
GSO (torch.tensor): graph shift operator; shape:
Batch edge_features x number_nodes x number_nodes
Forward call:
y = GraphFilter(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x in_features x number_nodes
Outputs:
y (torch.tensor): output; shape:
batch_size x out_features x number_nodes
"""
def __init__(self, G, H, F, K, E=1, bias=True):
# K: Number of filter taps
# GSOs will be added later.
# This combines both weight scalars and weight vectors.
# Bias will always be shared and scalar.
# Initialize parent
super().__init__()
# Save parameters:
self.G = G
self.F = F
self.H = H # hidden_features
self.K = K
self.E = E
self.S = None # No GSO assigned yet
# Create parameters:
self.weight_A = nn.parameter.Parameter(torch.Tensor(H, E, K, G))
self.weight_B = nn.parameter.Parameter(torch.Tensor(H, E, K, H))
self.weight_D = nn.parameter.Parameter(torch.Tensor(F, E, K, H))
if bias:
self.bias_A = nn.parameter.Parameter(torch.Tensor(H, 1))
self.bias_B = nn.parameter.Parameter(torch.Tensor(H, 1))
self.bias_D = nn.parameter.Parameter(torch.Tensor(F, 1))
else:
self.register_parameter('bias', None)
# Initialize parameters
self.reset_parameters()
def reset_parameters(self):
# Taken from _ConvNd initialization of parameters:
stdv_a = 1. / math.sqrt(self.G * self.K)
self.weight_A.data.uniform_(-stdv_a, stdv_a)
if self.bias_A is not None:
self.bias_A.data.uniform_(-stdv_a, stdv_a)
stdv_b = 1. / math.sqrt(self.H * self.K)
self.weight_B.data.uniform_(-stdv_b, stdv_b)
if self.bias_B is not None:
self.bias_B.data.uniform_(-stdv_b, stdv_b)
stdv_d = 1. / math.sqrt(self.H * self.K)
self.weight_D.data.uniform_(-stdv_d, stdv_d)
if self.bias_D is not None:
self.bias_D.data.uniform_(-stdv_d, stdv_d)
def addGSO(self, S):
# Every S has 4 dimensions.
assert len(S.shape) == 4
# S is of shape B x E x N x N
assert S.shape[1] == self.E
self.N = S.shape[2]
assert S.shape[3] == self.N
self.S = S
# def initialize_hidden(self):
# # the weights are of the form (nb_layers, batch_size, nb_lstm_units)
# hidden = torch.zeros(self.config.batch_size, self.F, self.numAgents)
#
# self.hiddenstateGPU = hidden.to(self.config.device)
def updateHiddenState(self, hiddenState):
self.hiddenState = hiddenState
def detachHiddenState(self):
# tensor.detach() creates a tensor that shares storage with tensor that does not require grad.
# You should use detach() when attempting to remove a tensor from a computation graph
#https://discuss.pytorch.org/t/clone-and-detach-in-v0-4-0/16861/4
self.hiddenState.detach_()
self.hiddenStateNext.detach_()
pass
def forward(self, x):
# x is of shape: batchSize x dimInFeatures x numberNodesIn
B = x.shape[0]
F = x.shape[1]
Nin = x.shape[2]
# And now we add the zero padding
if Nin < self.N:
x = torch.cat((x,
torch.zeros(B, F, self.N-Nin)\
.type(x.dtype).to(x.device)
), dim = 2)
# Compute the filter output
u_a = BatchLSIGF(self.weight_A, self.S, x, self.bias_A)
u_b = BatchLSIGF(self.weight_B, self.S, self.hiddenState, self.bias_B)
sigma = nn.ReLU(inplace=True)
# sigma = nn.Tanh()
self.hiddenStateNext = sigma(u_a + u_b)
u = BatchLSIGF(self.weight_D, self.S, self.hiddenStateNext, self.bias_D)
self.updateHiddenState(self.hiddenStateNext)
# So far, u is of shape batchSize x dimOutFeatures x numberNodes
# And we want to return a tensor of shape
# batchSize x dimOutFeatures x numberNodesIn
# since the nodes between numberNodesIn and numberNodes are not required
if Nin < self.N:
u = torch.index_select(u, 2, torch.arange(Nin).to(u.device))
return u
def extra_repr(self):
reprString = "in_features=%d, out_features=%d, hidden_features=%d, " % (
self.G, self.F, self.H) + "filter_taps=%d, " % (
self.K) + "edge_features=%d, " % (self.E) +\
"bias=%s, " % (self.bias_D is not None)
if self.S is not None:
reprString += "GSO stored"
else:
reprString += "no GSO stored"
return reprString
def torchpermul(h, x, b=None):
# h is output_features x edge_weights x filter_taps x input_features
# x is batch_size x input_features x number_nodes
# b is output_features x number_nodes
# Output:
# y is batch_size x output_features x number_nodes
# Get the parameter numbers:
# in the notation we've been using:
# h in G x H
# x in B x H x N
# b in G x N
# y in B x G x N
# Now, we have x in B x H x N and h in G x H
# B x N x H with H x G -> B x N x G -> B x G x N
y = torch.mul(x.permute(0, 2, 1), h.permute(1, 0)).permute(0, 2, 1) # B x G x N
# Finally, add the bias
if b is not None:
y = y + b
return y
class GraphFilterMoRNNBatch(nn.Module):
"""
GraphFilter Creates a (linear) layer that applies a graph filter
Initialization:
GraphFilter(in_features, out_features, filter_taps,
edge_features=1, bias=True)
Inputs:
in_features (int): number of input features (each feature is a graph
signal)
out_features (int): number of output features (each feature is a
graph signal)
filter_taps (int): number of filter taps
edge_features (int): number of features over each edge
bias (bool): add bias vector (one bias per feature) after graph
filtering
Output:
torch.nn.Module for a graph filtering layer (also known as graph
convolutional layer).
Observation: Filter taps have shape
out_features x edge_features x filter_taps x in_features
Add graph shift operator:
GraphFilter.addGSO(GSO) Before applying the filter, we need to define
the GSO that we are going to use. This allows to change the GSO while
using the same filtering coefficients (as long as the number of edge
features is the same; but the number of nodes can change).
Inputs:
GSO (torch.tensor): graph shift operator; shape:
Batch edge_features x number_nodes x number_nodes
Forward call:
y = GraphFilter(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x in_features x number_nodes
Outputs:
y (torch.tensor): output; shape:
batch_size x out_features x number_nodes
"""
def __init__(self, G, H, F, K, E=1, bias=True):
# K: Number of filter taps
# GSOs will be added later.
# This combines both weight scalars and weight vectors.
# Bias will always be shared and scalar.
# Initialize parent
super().__init__()
# Save parameters:
self.G = G
self.F = F
self.H = H # hidden_features
self.K = K
self.E = E
self.S = None # No GSO assigned yet
# Create parameters:
self.weight_A = nn.parameter.Parameter(torch.Tensor(H, E, K, G))
self.weight_B = nn.parameter.Parameter(torch.Tensor(H, H))
self.weight_D = nn.parameter.Parameter(torch.Tensor(F, H))
if bias:
self.bias_A = nn.parameter.Parameter(torch.Tensor(H, 1))
self.bias_B = nn.parameter.Parameter(torch.Tensor(H, 1))
self.bias_D = nn.parameter.Parameter(torch.Tensor(F, 1))
else:
self.register_parameter('bias', None)
# Initialize parameters
self.reset_parameters()
def reset_parameters(self):
# todo: check initialize weight
# Taken from _ConvNd initialization of parameters:
stdv_a = 1. / math.sqrt(self.G * self.K)
self.weight_A.data.uniform_(-stdv_a, stdv_a)
if self.bias_A is not None:
self.bias_A.data.uniform_(-stdv_a, stdv_a)
stdv_b = 1. / math.sqrt(self.H)
self.weight_B.data.uniform_(-stdv_b, stdv_b)
if self.bias_B is not None:
self.bias_B.data.uniform_(-stdv_b, stdv_b)
stdv_d = 1. / math.sqrt(self.H )
self.weight_D.data.uniform_(-stdv_d, stdv_d)
if self.bias_D is not None:
self.bias_D.data.uniform_(-stdv_d, stdv_d)
def addGSO(self, S):
# Every S has 4 dimensions.
assert len(S.shape) == 4
# S is of shape B x E x N x N
assert S.shape[1] == self.E
self.N = S.shape[2]
assert S.shape[3] == self.N
self.S = S
def updateHiddenState(self, hiddenState):
self.hiddenState = hiddenState
def forward(self, x):
# x is of shape: batchSize x dimInFeatures x numberNodesIn
B = x.shape[0]
F = x.shape[1]
Nin = x.shape[2]
# And now we add the zero padding
if Nin < self.N:
x = torch.cat((x,
torch.zeros(B, F, self.N-Nin)\
.type(x.dtype).to(x.device)
), dim = 2)
# Compute the filter output
u_a = BatchLSIGF(self.weight_A, self.S, x, self.bias_A) # B x H x n
# u_b = torch.mul(self.hiddenState.permute(0,2,1), self.weight_B.permute(1, 0)).permute(0,2,1) + self.bias_B # B x H x n
u_b = torchpermul(self.weight_B,self.hiddenState,self.bias_B)
sigma = nn.ReLU(inplace=True)
# sigma = nn.Tanh()
self.hiddenStateNext = sigma(u_a + u_b)
# v1
# u = torch.mul(self.weight_D, self.hiddenState) + self.bias_D
# v2
# u = torch.mul(u_a.permute(0,2,1), self.weight_D.permute(1, 0)).permute(0,2,1) + self.bias_D
u = torchpermul(self.weight_D, self.hiddenStateNext, self.bias_D)
self.updateHiddenState(self.hiddenStateNext)
# So far, u is of shape batchSize x dimOutFeatures x numberNodes
# And we want to return a tensor of shape
# batchSize x dimOutFeatures x numberNodesIn
# since the nodes between numberNodesIn and numberNodes are not required
if Nin < self.N:
u = torch.index_select(u, 2, torch.arange(Nin).to(u.device))
return u
def extra_repr(self):
reprString = "in_features=%d, out_features=%d, hidden_features=%d, " % (
self.G, self.F, self.H) + "filter_taps=%d, " % (
self.K) + "edge_features=%d, " % (self.E) +\
"bias=%s, " % (self.bias_D is not None)
if self.S is not None:
reprString += "GSO stored"
else:
reprString += "no GSO stored"
return reprString
class GraphFilterL2ShareBatch(nn.Module):
"""
GraphFilter Creates a (linear) layer that applies a graph filter
Initialization:
GraphFilter(in_features, out_features, filter_taps,
edge_features=1, bias=True)
Inputs:
in_features (int): number of input features (each feature is a graph
signal)
out_features (int): number of output features (each feature is a
graph signal)
filter_taps (int): number of filter taps
edge_features (int): number of features over each edge
bias (bool): add bias vector (one bias per feature) after graph
filtering
Output:
torch.nn.Module for a graph filtering layer (also known as graph
convolutional layer).
Observation: Filter taps have shape
out_features x edge_features x filter_taps x in_features
Add graph shift operator:
GraphFilter.addGSO(GSO) Before applying the filter, we need to define
the GSO that we are going to use. This allows to change the GSO while
using the same filtering coefficients (as long as the number of edge
features is the same; but the number of nodes can change).
Inputs:
GSO (torch.tensor): graph shift operator; shape:
Batch edge_features x number_nodes x number_nodes
Forward call:
y = GraphFilter(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x in_features x number_nodes
Outputs:
y (torch.tensor): output; shape:
batch_size x out_features x number_nodes
"""
def __init__(self, G, H, F, K, E=1, bias=True):
# K: Number of filter taps
# GSOs will be added later.
# This combines both weight scalars and weight vectors.
# Bias will always be shared and scalar.
# Initialize parent
super().__init__()
# Save parameters:
self.G = G
self.F = F
self.H = H # hidden_features
self.K = K
self.E = E
self.S = None # No GSO assigned yet
# Create parameters:
self.weight_A = nn.parameter.Parameter(torch.Tensor(H, E, K, G))
self.weight_B = nn.parameter.Parameter(torch.Tensor(H, H))
self.weight_D = nn.parameter.Parameter(torch.Tensor(F, H))
if bias:
self.bias_A = nn.parameter.Parameter(torch.Tensor(H, 1))
self.bias_B = nn.parameter.Parameter(torch.Tensor(H, 1))
self.bias_D = nn.parameter.Parameter(torch.Tensor(F, 1))
else:
self.register_parameter('bias', None)
# Initialize parameters
self.reset_parameters()
def reset_parameters(self):
# todo: check initialize weight
# Taken from _ConvNd initialization of parameters:
stdv_a = 1. / math.sqrt(self.G * self.K)
self.weight_A.data.uniform_(-stdv_a, stdv_a)
if self.bias_A is not None:
self.bias_A.data.uniform_(-stdv_a, stdv_a)
stdv_b = 1. / math.sqrt(self.H)
self.weight_B.data.uniform_(-stdv_b, stdv_b)
if self.bias_B is not None:
self.bias_B.data.uniform_(-stdv_b, stdv_b)
stdv_d = 1. / math.sqrt(self.H )
self.weight_D.data.uniform_(-stdv_d, stdv_d)
if self.bias_D is not None:
self.bias_D.data.uniform_(-stdv_d, stdv_d)
def addGSO(self, S):
# Every S has 4 dimensions.
assert len(S.shape) == 4
# S is of shape B x E x N x N
assert S.shape[1] == self.E
self.N = S.shape[2]
assert S.shape[3] == self.N
self.S = S
def updateHiddenState(self, hiddenState):
self.hiddenState = hiddenState
def forward(self, x):
# x is of shape: batchSize x dimInFeatures x numberNodesIn
B = x.shape[0]
F = x.shape[1]
Nin = x.shape[2]
# And now we add the zero padding
if Nin < self.N:
x = torch.cat((x,
torch.zeros(B, F, self.N-Nin)\
.type(x.dtype).to(x.device)
), dim = 2)
# Compute the filter output
u_a = BatchLSIGF(self.weight_A, self.S, x, self.bias_A) # B x H x n
u_b = torchpermul(self.weight_B, self.hiddenState, self.bias_B)
sigma = nn.ReLU(inplace=True)
# sigma = nn.Tanh()
self.hiddenStateNext = sigma(u_a + u_b)
# u = torch.mul(u_a.permute(0,2,1), self.weight_D.permute(1, 0)).permute(0,2,1) + self.bias_D
u = torchpermul(self.weight_D, self.hiddenStateNext, self.bias_D)
self.updateHiddenState(self.hiddenStateNext)
# So far, u is of shape batchSize x dimOutFeatures x numberNodes
# And we want to return a tensor of shape
# batchSize x dimOutFeatures x numberNodesIn
# since the nodes between numberNodesIn and numberNodes are not required
if Nin < self.N:
u = torch.index_select(u, 2, torch.arange(Nin).to(u.device))
return u
def extra_repr(self):
reprString = "in_features=%d, out_features=%d, hidden_features=%d, " % (
self.G, self.F, self.H) + "filter_taps=%d, " % (
self.K) + "edge_features=%d, " % (self.E) +\
"bias=%s, " % (self.bias_D is not None)
if self.S is not None:
reprString += "GSO stored"
else:
reprString += "no GSO stored"
return reprString
| 2.609375
| 3
|
winter/core/utils/__init__.py
|
DmitryKhursevich/winter
| 9
|
12776790
|
<reponame>DmitryKhursevich/winter<gh_stars>1-10
from .beautify_string import camel_to_human
from .cached_property import cached_property
from .nested_types import TypeWrapper
from .nested_types import has_nested_type
from .positive_integer import PositiveInteger
| 1.3125
| 1
|
ml/train/evaluation/model_scoring.py
|
ishaanjain/video-annotation-tool
| 12
|
12776791
|
<filename>ml/train/evaluation/model_scoring.py<gh_stars>10-100
import keras
import numpy as np
import os
from keras_retinanet.utils.eval import _get_detections
from keras_retinanet.utils.eval import _get_annotations
from keras_retinanet.utils.anchors import compute_overlap
""" Evaluate a given dataset using a given model.
# Arguments
generator : The generator that represents the dataset to evaluate.
model : The model to evaluate.
iou_threshold : The threshold used to consider when a detection is positive or negative.
score_threshold : The score confidence threshold to use for detections.
max_detections : The maximum number of detections to use per image.
save_path : The path to save images with visualized detections to.
# Returns
best_f1 are the best possible f1 score for each class
best_thresh are the corresponding threshold values to achieve these maximum f1 scores.
"""
def f1_evaluation(generator,model,iou_threshold=0.5,score_threshold=0.05,max_detections=100,save_path=None):
all_detections = _get_detections(generator, model, score_threshold=score_threshold, max_detections=max_detections, save_path=save_path)
all_annotations = _get_annotations(generator)
best_thresh = {}
best_f1 = {}
# process detections and annotations
for label in range(generator.num_classes()):
if not generator.has_label(label):
continue
true_positives, false_positives, scores, num_annotations = compute_measures(
generator, label, iou_threshold, all_detections, all_annotations)
if num_annotations == 0:
continue
f1_points, scores = compute_f1(true_positives, false_positives, scores, num_annotations)
best_f1[label] = np.max(f1_points) if f1_points.size else 0
best_thresh[label] = scores[np.argmax(f1_points)] if f1_points.size else 0
return best_f1, best_thresh
def compute_measures(generator, label, iou_threshold, all_detections, all_annotations):
false_positives = np.zeros((0,))
true_positives = np.zeros((0,))
scores = np.zeros((0,))
num_annotations = 0.0
for i in range(generator.size()):
detections = all_detections[i][label]
annotations = all_annotations[i][label]
num_annotations += annotations.shape[0]
detected_annotations = []
sort_by_score_index = detections[:,4].argsort()[::-1]
for d in detections[sort_by_score_index]:
scores = np.append(scores, d[4])
if num_annotations == 0:
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
continue
overlaps = compute_overlap(np.expand_dims(d, axis=0), annotations)
assigned_annotation = np.argmax(overlaps, axis=1) if overlaps.size else 0
max_overlap = overlaps[0, assigned_annotation] if overlaps.size else 0
if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:
false_positives = np.append(false_positives, 0)
true_positives = np.append(true_positives, 1)
detected_annotations.append(assigned_annotation)
else:
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
return true_positives, false_positives, scores, num_annotations
def compute_f1(true_positives, false_positives, scores, num_annotations):
# sort by score
indices = np.argsort(-scores)
scores = scores[indices]
true_positives = true_positives[indices]
false_positives = false_positives[indices]
# Compute cumulative sums for true and false positives
true_positives = np.cumsum(true_positives)
false_positives = np.cumsum(false_positives)
# compute recall and precision curves
recall = true_positives / num_annotations
precision = true_positives / np.maximum(true_positives + false_positives, np.finfo(np.float64).eps)
# Compute f1 scores
f1_points = [(2*r*p) / (r+p) if (r + p) else 0 for r,p in zip(recall, precision)]
return np.array(f1_points), scores
| 2.671875
| 3
|
sshr/_compat.py
|
zhengxiaowai/sshm
| 0
|
12776792
|
<filename>sshr/_compat.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import six
if six.PY3:
from io import IOBase
file = (IOBase, six.StringIO)
else:
file = (file, six.StringIO)
| 2.03125
| 2
|
Modules/mystuff.py
|
Keerti-Gautam/PythonLearning
| 0
|
12776793
|
def apple():
print "I AM APPLES!"
# this is just a variable
tangerine = "Living reflection of a dream"
def addition():
x = int(raw_input("Please enter the first number to be added: "))
y = int(raw_input("Please enter the second number to be added: "))
#return x+y
z = x+y
return z
| 3.84375
| 4
|
lucene/build_index.py
|
lavizhao/keyword
| 3
|
12776794
|
#coding: utf-8
import lucene
import csv
print "预处理"
INDEX_DIR = '../index'
lucene.initVM()
directory = lucene.SimpleFSDirectory(lucene.File(INDEX_DIR))
analyzer = lucene.StandardAnalyzer(lucene.Version.LUCENE_CURRENT)
def get_data():
"""
"""
f = open("../data/new_train.csv")
reader = csv.reader(f)
data = []
for row in reader:
data.append((row[0]+" "+row[1],row[2]))
return data
def build_index():
"""
"""
print "开始创建索引"
writer = lucene.IndexWriter(directory,analyzer,True,
lucene.IndexWriter.MaxFieldLength.UNLIMITED)
data = get_data()
print "数据个数:",len(data)
a = 0
for content,tag in data:
doc = lucene.Document()
doc.add(lucene.Field('content',content,lucene.Field.Store.YES,
lucene.Field.Index.ANALYZED))
doc.add(lucene.Field('tag',tag,lucene.Field.Store.YES,
lucene.Field.Index.NOT_ANALYZED))
writer.addDocument(doc)
if a % 10000 == 0:
print "%s已完成"%(1.0*a/len(data))
a += 1
print "写引擎优化"
writer.optimize()
writer.close()
if __name__ == '__main__':
print "hello world"
build_index()
| 2.859375
| 3
|
chuong_3.py
|
ngcngmnh/lt_he_thong_dien
| 1
|
12776795
|
import array
import math
import chuong_1
import chuong_2
def bien_ap_t1():
s_max=abs(chuong_2.s_a)
s_dm_B=s_max/1.4
d_p_n=260
d_p_0=100
u_n=14
i_0=0.045
r_b1=d_p_n*110**2/20000**2*10**3
z_b1=u_n*110**2/20000*10
x_b1=math.sqrt(z_b1**2-r_b1**2)
d_q_FE=i_0*20000/100
print('S_ptmax1:',s_max,'\nS_dm_B>=',s_dm_B)
print('==> Chon 2 may bien ap 20000/110 kV')
print('R_B1:',r_b1)
print('Z_B1:',z_b1)
print('X_B1:',x_b1)
print('ΔQ_FE:',d_q_FE)
print()
def bien_ap_t2():
s_max=abs(chuong_2.s_b)
s_dm_B=s_max/1.4
d_p_n=260
d_p_0=100
u_n=14
i_0=0.045
r_b1=d_p_n*110**2/20000**2*10**3
z_b1=u_n*110**2/20000*10
x_b1=math.sqrt(z_b1**2-r_b1**2)
d_q_FE=i_0*20000/100
print('S_ptmax2:',s_max,'\nS_dm_B>=',s_dm_B)
print('==> Chon 2 may bien ap 20000/110 kV')
print('R_B2:',r_b1)
print('Z_B2:',z_b1)
print('X_B2:',x_b1)
print('ΔQ_FE:',d_q_FE)
print()
def bien_ap_t3():
s_max=chuong_1.load_3[0]/chuong_1.load_3[2]
d_p_n=220
d_p_0=115
u_n=14
i_0=0.042
r_b1=d_p_n*110**2/31500**2*10**3
z_b1=u_n*110**2/31500*10
x_b1=math.sqrt(z_b1**2-r_b1**2)
d_q_FE=i_0*31500/100
print('S_ptmax3:',s_max,'\nS_dm_B>=',s_max)
print('==> Chon 1 may bien ap 31500/110 kV')
print('R_B3:',r_b1)
print('Z_B3:',z_b1)
print('X_B3:',x_b1)
print('ΔQ_FE:',d_q_FE)
print()
| 2.46875
| 2
|
labs/ex04/template/ex04.py
|
kcyu1993/ML_course_kyu
| 0
|
12776796
|
# Useful starting lines
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
# %load_ext autoreload
# %autoreload 2
from sklearn import linear_model
# from __future__ import absolute_import
from labs.ex03.template import helpers
from labs.ex04.template.costs import compute_rmse, compute_mse
from labs.ex04.template.costs import compute_mse_for_ridge
from labs.ex04.template.ridge_regression import ridge_regression
from labs.ex04.template.build_polynomial import build_poly
from labs.ex04.template.plots import cross_validation_visualization
from labs.ex04.template.plots import cross_validation_visualization_for_degree
from labs.ex04.template.least_squares import least_squares
from labs.ex04.template.split_data import split_data
from labs.ex04.template.plots import bias_variance_decomposition_visualization
# load dataset
def data_load():
''' Return x, y '''
return helpers.load_data()
def build_k_indices(y, k_fold, seed):
"""build k indices for k-fold."""
num_row = y.shape[0]
interval = int(num_row / k_fold)
np.random.seed(seed)
indices = np.random.permutation(num_row)
k_indices = [indices[k * interval: (k + 1) * interval]
for k in range(k_fold)]
return np.array(k_indices)
def cross_validation(y, x, k_indices, k, lamb, degree, rmse=False):
"""return the loss of ridge regression."""
# ***************************************************
# Split data into K groups according to indices
# get k'th subgroup in test, others in train:
# ***************************************************
x = np.array(x)
y = np.array(y)
train_ind = np.concatenate((k_indices[:k], k_indices[k+1:]), axis=0)
train_ind = np.reshape(train_ind, (train_ind.size,))
test_ind = k_indices[k]
# Note: different from np.ndarray, tuple is name[index,]
# ndarray is name[index,:]
train_x = x[train_ind,]
train_y = y[train_ind,]
test_x = x[test_ind,]
test_y = y[test_ind,]
# ***************************************************
# INSERT YOUR CODE HERE
# form data with polynomial degree:
# ***************************************************
train_x = build_poly(train_x, degree)
test_x = build_poly(test_x, degree)
# ***************************************************
# INSERT YOUR CODE HERE
# ridge regression:
# ***************************************************
loss_tr, weight = ridge_regression(train_y, train_x, lamb)
# Test with sklearn ridge solve.
clf = linear_model.ridge_regression(train_x, train_y, alpha=lamb)
# weight = clf
# ***************************************************
# INSERT YOUR CODE HERE
# calculate the loss for train and test data: TODO
# ***************************************************
''' Compute MSE by ridge weights '''
loss_tr = compute_mse_for_ridge(train_y, train_x, weight,lamb)
loss_te = compute_mse_for_ridge(test_y, test_x, weight, lamb)
# loss_tr = compute_mse(train_y, train_x, weight)
# loss_te = compute_mse(test_y, test_x, weight)
if rmse is True:
loss_tr = compute_rmse(loss_tr)
loss_te = compute_rmse(loss_te)
return loss_tr, loss_te
def cross_validation_demo():
seed = 1
degree = 7
k_fold = 4
lambdas = np.logspace(-4, 2, 30)
y,x = data_load()
# split data in k fold
k_indices = build_k_indices(y, k_fold, seed)
# define lists to store the loss of training data and test data
mse_tr = []
mse_te = []
# ***************************************************
# INSERT YOUR CODE HERE
# cross validation:
# ***************************************************
for lamb in lambdas:
_mse_tr = []
_mse_te = []
for k in range(k_fold):
loss_tr, loss_te = cross_validation(y,x,k_indices,k,lamb,degree, rmse=True)
_mse_tr += [loss_tr]
_mse_te += [loss_te]
avg_tr = np.average(_mse_tr)
avg_te = np.average(_mse_te)
mse_tr += [avg_tr]
mse_te += [avg_te]
cross_validation_visualization(lambdas, mse_tr, mse_te)
print(mse_tr, mse_te)
def cross_validation_demo_degree():
seed = 1
degrees = range(2,11)
k_fold = 4
lamb = 0.5
y,x = data_load()
# split data in k fold
k_indices = build_k_indices(y, k_fold, seed)
# define lists to store the loss of training data and test data
mse_tr = []
mse_te = []
# ***************************************************
# INSERT YOUR CODE HERE
# cross validation:
# ***************************************************
for degree in degrees:
_mse_tr = []
_mse_te = []
for k in range(k_fold):
loss_tr, loss_te = cross_validation(y,x,k_indices,k,lamb, degree, rmse=True)
_mse_tr += [loss_tr]
_mse_te += [loss_te]
avg_tr = np.average(_mse_tr)
avg_te = np.average(_mse_te)
mse_tr += [avg_tr]
mse_te += [avg_te]
cross_validation_visualization_for_degree(degrees, mse_tr, mse_te)
print(mse_tr, mse_te)
def bias_variance2(y, x, weight, variance_e):
'''
For linear model bias-variance calculation. The dimension is len(weight)
:param y:
:param x:
:param weight: beta of linear model
:param function:
:param variance_e:
:return:
'''
# N = len(x)
# res = np.dot(x, weight)
# error = variance_e * (len(weight) / N) + np.sum( (y - np.dot(x, weight)) **2 )/ N
# return compute_rmse(error)
return compute_rmse(compute_mse(y,x,weight) + 1 + len(weight)/ len(x))
def bias_variance(function, x, weight, variance_e):
'''
For linear model bias-variance calculation. The dimension is len(weight)
:param y:
:param x:
:param weight: beta of linear model
:param function:
:param variance_e:
:return:
'''
y = function(x[:,1])
# N = len(x)
# res = np.dot(x, weight)
# error = variance_e * (len(weight) / N) + np.sum( (y - np.dot(x, weight)) **2 )/ N
# return compute_rmse(error)
return compute_rmse(compute_mse(y,x,weight))
def bias_variance_demo():
"""The entry."""
# define parameters
seeds = range(100)
num_data = 10000
ratio_train = 0.005
degrees = range(1, 10)
# define list to store the variable
rmse_tr = np.empty((len(seeds), len(degrees)))
rmse_te = np.empty((len(seeds), len(degrees)))
for index_seed, seed in enumerate(seeds):
np.random.seed(seed)
x = np.linspace(0.1, 2 * np.pi, num_data)
y = np.sin(x) + 0.3 * np.random.randn(num_data).T
# ***************************************************
# INSERT YOUR CODE HERE
# split data with a specific seed: TODO
# ***************************************************
train_x, train_y, test_x, test_y = split_data(x,y,ratio_train,seed)
# ***************************************************
# INSERT YOUR CODE HERE
# bias_variance_decomposition: TODO
# ***************************************************
for ind_degree, degree in enumerate(degrees):
# Use least square
x_tr = build_poly(train_x, degree)
x_te = build_poly(test_x, degree)
mse, weight = least_squares(train_y, x_tr)
rmse_tr[index_seed][ind_degree] = bias_variance(np.sin, x_tr, weight, 1)
rmse_te[index_seed][ind_degree] = bias_variance(np.sin, x_te, weight, 1)
# rmse_tr[index_seed][ind_degree] = bias_variance2(train_y, x_tr, weight, 1)
# rmse_te[index_seed][ind_degree] = bias_variance2(test_y, x_te, weight, 1)
bias_variance_decomposition_visualization(degrees, rmse_tr, rmse_te)
# cross_validation_demo()
# degree = 5.
# cross_validation_demo_degree()
bias_variance_demo()
print()
| 2.578125
| 3
|
virtualmother_app/module/database.py
|
guralin/virtual_mother
| 0
|
12776797
|
<reponame>guralin/virtual_mother
#!/bin/env python
# coding: utf-8
from virtualmother_app import db
from virtualmother_app.models import Table,TodoTable
from flask_sqlalchemy import SQLAlchemy
import datetime
# views.py (/register)
class SendData(Table):
# カラムに値を代入
def __init__(self, user_id, get_up_time):
self.user_id = user_id
self.get_up_time = get_up_time
class SendDate(Table):
def __init__(self, user_id, date):
self.user_id = user_id
self.date = date
class DBOperation():
def __init__(self,db):
self.db = db
# 目覚ましの時間を新規登録
def insert_get_up_time(self, user_id, get_up_time):
user_data = SendData(user_id, get_up_time)
db.session.add(user_data)
db.session.commit()
# 日付を登録
def insert_date(self, user_id):
user_data = SendDate(user_id, datetime.date.today())
db.session.add(user_data)
db.session.commit()
# 目覚ましの時間の変更
def update_get_up_time(self, user_id, get_up_time):
user_data = db.session.query(Table).filter(Table.user_id == user_id).first()
user_data.get_up_time = get_up_time
db.session.commit()
# 日付の更新(DMのリンクをクリックした時)
def update_date(self, user_id):
user_data = db.session.query(Table).filter(Table.user_id == user_id).first()
user_data.date = datetime.date.today() #(1600, 2, 4)
print(user_data.date)
db.session.commit()
# 目覚まし解除
def delete_get_up_time(self, user_id):
user_data = db.session.query(Table).filter(Table.user_id == user_id).first()
user_data.get_up_time = None
db.session.commit()
# morning.py
class GetData(Table): # カラムを指定してデータを取得
def id_and_get_up(self):
self.users_data = db.session.query(Table.user_id, Table.get_up_time, Table.date).all()
return self.users_data
def get_up_time(self,search_user_id):
users = db.session.query(Table).filter(Table.user_id==search_user_id).first()
return users.get_up_time
####################################
# todoapp用のtable
class AddTodo(TodoTable):
def __init__(self,user_id,todo):
self.user_id = user_id
self.todo = todo
class TodoData(TodoTable):
def get_todo_from_all_user(self):
users_data = db.session.query(TodoTable.user_id,TodoTable.todo).all()
return users_data
def get_todolist_from_single_user(self,user_id):
users_data = db.session.query(TodoTable.user_id,TodoTable.todo).filter(TodoTable.user_id == user_id).all()
todolist = []
for todo_row in users_data:
todo = todo_row[1] # [1]はdatabaseのtodo列
todolist.append(todo)
return todolist
def add_todo(self,user_id,todo):
todo_data=AddTodo(user_id,todo)
db.session.add(todo_data)
db.session.commit()
def delete_todo(self,user_id,todo):
user_data = db.session.query(TodoTable).filter(TodoTable.user_id== user_id).filter(TodoTable.todo == todo).delete()
db.session.commit()
| 2.578125
| 3
|
GUI.py
|
Kr0nox/LaunchpadTool
| 1
|
12776798
|
<reponame>Kr0nox/LaunchpadTool
import tkinter as tk
import webbrowser
from LaunchpadListener import LaunchpadListener
import time
import threading
import math
from typing import List
from KeyProfile import KeyProfile
import Actions
launchpad = None
profiles: List[KeyProfile] = []
UI = None
UNCLICKED = "#bbb"
BACKGROUND = "#111"
BUTTON_SETTINGS_SELECTION_WIDTH = 26
DEFAULT_COLORS = {'fg': "#fff", 'bg': BACKGROUND}
INTERACTABLE_COLORS = {'fg': "#fff", 'bg': "#222"}
SELECTION_SETTINGS = {**DEFAULT_COLORS, 'bd': 0, 'highlightthickness': 1, 'highlightbackground': "#000"}
COLOR_SLIDER_SETTINGS = {'from_': 0, 'to': 3, 'orient': "horizontal", **DEFAULT_COLORS,
'highlightthickness': 1,
'highlightbackground': "#000", 'showvalue': False, 'activebackground': BACKGROUND,
'troughcolor': "#222", 'bd': 0, 'length': 195}
ATTRIBUTEROW = 6
LBL_INFO_PACK = {'fill': "x", 'expand': True, 'padx': 5, 'pady': 2, 'side': "top", 'anchor': "n"}
HEIGHT = 470
class GUI:
uIButtons = []
root = None
buttonCanvas = None
currentlyWorkingOnButton = [None, None]
quiting = False
def __init__(self, lp, p):
global launchpad
global profiles
profiles = p
launchpad = lp
self.root = tk.Tk()
self.root.title("Launchpad Shortcuts")
canvas = tk.Canvas(self.root, bg="#222", bd=0, highlightthickness=0)
canvas.pack()
self.root.resizable(False, False)
self.selected = 0
# region Launchpad Display Matrix as Buttons
buttonCanvas = tk.Canvas(canvas, width=450, height=450, bg=BACKGROUND, highlightthickness=2,
highlightbackground="#000")
buttonCanvas.pack(padx=10, pady=10, side="left", ipadx=1, ipady=1)
for y in range(9):
for x in range(9):
if x == 8 and y == 0:
self.uIButtons.append("None")
continue
tag = "clickable" + str(len(self.uIButtons))
if x == 8 or y == 0:
c = buttonCanvas.create_oval(5 + x * 50, 5 + y * 50, 50 + x * 50, 50 + y * 50, fill=UNCLICKED,
width=0, tags=tag)
# self.uIButtons.append(c)
else:
c = self.round_rectangle(buttonCanvas, tag, 5 + x * 50, 5 + y * 50, 50 + x * 50, 50 + y * 50, 15)
buttonCanvas.tag_bind(tag, "<Button-1>",
lambda event, pos=x + y * 9: self.pressedUiLpKey(event, pos))
self.uIButtons.append(c)
# endregion
settingsCanvas = tk.Canvas(canvas, width=270, height=HEIGHT, bg="#222", highlightthickness=0)
settingsCanvas.pack(side="left")
# region Settings for File
fileSettingsCanvas = tk.Canvas(settingsCanvas, width=250, height=100, bg=BACKGROUND, highlightthickness=2,
highlightbackground="#000")
fileSettingsCanvas.pack_propagate(0)
fileSettingsCanvas.pack(padx=10, pady=10, side="top", ipadx=5, ipady=5, fill="both")
self.profilesChoices = ["Display Current"]
for p in profiles:
self.profilesChoices.append(p.name)
# Drop-Down Selection
self.profilesChoice = tk.StringVar(fileSettingsCanvas)
self.profilesChoice.set(self.profilesChoices[0])
self.profilesSelection = tk.OptionMenu(fileSettingsCanvas, self.profilesChoice, *self.profilesChoices)
self.profilesSelection.configure(**SELECTION_SETTINGS)
self.profilesSelection.pack(fill="x", expand=True, padx=5, pady=5, side="top", anchor="n")
self.profilesChoice.trace_variable("w", self.profileSelectionChanged)
# Name Changing
self.changeNameCanvas = tk.Canvas(fileSettingsCanvas, bg=BACKGROUND, highlightthickness=0)
self.changeNameCanvasPackSettings = {'fill': "x", 'expand': True, 'padx': 2}
self.changeNameCanvas.pack(**self.changeNameCanvasPackSettings)
txtName = tk.Label(self.changeNameCanvas, text="New Name:", **DEFAULT_COLORS)
txtName.pack(side="left", padx=3, pady=3)
self.inputText = tk.StringVar(self.changeNameCanvas, "asdasd")
self.inputName = tk.Entry(self.changeNameCanvas, textvariable=self.inputText, **DEFAULT_COLORS,
borderwidth=0, highlightthickness=1,
highlightbackground="#000")
self.inputName.pack(fill="x", expand=True, padx=3, pady=3, side="right")
# Buttons
self.fileButtonCanvas = tk.Canvas(fileSettingsCanvas, bg=BACKGROUND, highlightthickness=0)
self.fileButtonCanvasPackSettings = {'fill': "x", 'expand': True, 'padx': 2, 'pady': 2, 'anchor': "s"}
self.fileButtonCanvas.pack(**self.fileButtonCanvasPackSettings)
saveButton = tk.Button(self.fileButtonCanvas, text="Save", **INTERACTABLE_COLORS, borderwidth=0,
command=self.saveNameChange)
saveButton.pack(padx=3, pady=3, expand=True, fill="x", side="left")
cancelButton = tk.Button(self.fileButtonCanvas, text="Cancel", **INTERACTABLE_COLORS, borderwidth=0,
command=self.cancelNameChange)
cancelButton.pack(padx=3, pady=3, expand=True, fill="x", side="right")
# endregion
# region Settings for Key
# Canvas Structure
buttonSettings = tk.Canvas(settingsCanvas, width=250, height=210, bg=BACKGROUND, highlightthickness=2,
highlightbackground="#000")
buttonSettings.pack_propagate(0)
buttonSettings.pack(padx=10, pady=10, side="top", ipadx=5, ipady=5)
self.buttonSettingsCanvasOuter = tk.Canvas(buttonSettings, bg=BACKGROUND, highlightthickness=2,
highlightbackground="#000")
self.buttonSettingsCanvasOuterPackSettings = {'fill': "both", 'expand': True}
self.buttonSettingsCanvasOuter.pack(**self.buttonSettingsCanvasOuterPackSettings)
buttonSettingsCanvas = tk.Canvas(self.buttonSettingsCanvasOuter, bg=BACKGROUND, highlightthickness=0, width=240)
buttonSettingsCanvas.pack(padx=5, pady=5, fill="both", expand=True)
# X und Y Coordinates
self.lblX = tk.Label(buttonSettingsCanvas, text="X: ", **DEFAULT_COLORS)
self.lblX.grid(column=0, row=0)
self.lblY = tk.Label(buttonSettingsCanvas, text="Y: ", **DEFAULT_COLORS)
self.lblY.grid(column=1, row=0, sticky="w")
# Red Slider
lblRed = tk.Label(buttonSettingsCanvas, text="Red:", **DEFAULT_COLORS)
lblRed.grid(column=0, row=1)
self.sliderRed = tk.Scale(buttonSettingsCanvas, **COLOR_SLIDER_SETTINGS, command=self.colorSliderChange)
self.sliderRed.grid(column=1, row=1)
# Green Slider
lblGreen = tk.Label(buttonSettingsCanvas, text="Green:", **DEFAULT_COLORS)
lblGreen.grid(column=0, row=2, sticky="ew")
self.sliderGreen = tk.Scale(buttonSettingsCanvas, **COLOR_SLIDER_SETTINGS, command=self.colorSliderChange)
self.sliderGreen.grid(column=1, row=2)
# Display Color
lblCol = tk.Label(buttonSettingsCanvas, text="Color:", **DEFAULT_COLORS)
lblCol.grid(column=0, row=3)
self.lblColor = tk.Label(buttonSettingsCanvas, **DEFAULT_COLORS, width=BUTTON_SETTINGS_SELECTION_WIDTH)
self.lblColor.grid(column=1, row=3, sticky="w")
# Function
lblFunc = tk.Label(buttonSettingsCanvas, text="Function:", **DEFAULT_COLORS)
lblFunc.grid(column=0, row=4)
Actions.checkMethods()
self.funcCatChoices = Actions.methodCategories
self.funcCatChoices.append("none")
self.funcCatValue = tk.StringVar(buttonSettingsCanvas)
self.funcCatValue.set("none")
self.funcCatSelection = tk.OptionMenu(buttonSettingsCanvas, self.funcCatValue, *self.funcCatChoices)
self.funcCatSelection.configure(**SELECTION_SETTINGS, width=BUTTON_SETTINGS_SELECTION_WIDTH)
self.funcCatSelection.grid(column=1, row=4)
self.funcCatValue.trace_variable("w", self.funcCatSelectionChanged)
self.funcChoices = ["none"]
self.funcValue = tk.StringVar(buttonSettingsCanvas)
self.funcValue.set("none")
self.funcSelection = tk.OptionMenu(buttonSettingsCanvas, self.funcValue, *self.funcChoices)
self.funcSelection.configure(**SELECTION_SETTINGS, width=BUTTON_SETTINGS_SELECTION_WIDTH)
self.funcSelection.grid(column=1, row=5)
self.funcValue.trace_variable("w", self.funcSelectionChanged)
self.txtAtt = tk.Label(buttonSettingsCanvas, text="Params:", **DEFAULT_COLORS)
self.txtAtt.grid(column=0, row=ATTRIBUTEROW)
self.attString = tk.StringVar(buttonSettingsCanvas, "")
self.inputAtt = tk.Entry(buttonSettingsCanvas, textvariable=self.attString, **DEFAULT_COLORS,
borderwidth=0, highlightthickness=1, width=32,
highlightbackground="#000")
self.inputAtt.grid(column=1, row=ATTRIBUTEROW)
buttonSettingsCanvas.columnconfigure(1, weight=1)
for i in range(5):
buttonSettingsCanvas.rowconfigure(i, pad=5)
# Buttons
saveButtonB = tk.Button(self.buttonSettingsCanvasOuter, text="Save", **INTERACTABLE_COLORS, borderwidth=0,
command=self.saveKeyChange)
saveButtonB.pack(padx=5, pady=5, expand=True, fill="x", side="left")
cancelButtonB = tk.Button(self.buttonSettingsCanvasOuter, text="Cancel", **INTERACTABLE_COLORS, borderwidth=0,
command=self.cancelKeyChange)
cancelButtonB.pack(padx=5, pady=5, expand=True, fill="x", side="right")
# endregion
# Spacer
spacerCanvas = tk.Canvas(settingsCanvas, width=250, height=64, bg=BACKGROUND, highlightthickness=2,
highlightbackground="#000")
spacerCanvas.pack_propagate(0)
spacerCanvas.pack(padx=10, pady=10, side="bottom", ipadx=5, ipady=5)
lblInfo1 = tk.Label(spacerCanvas, text="Open Source Project on ", **DEFAULT_COLORS)
lblInfo2 = tk.Label(spacerCanvas, text="GitHub", bg=BACKGROUND, fg="#5af")
lblInfo3 = tk.Label(spacerCanvas, text="Tested with Novation Launchpad Mini", **DEFAULT_COLORS)
lblInfo1.pack(**LBL_INFO_PACK)
lblInfo2.pack(**LBL_INFO_PACK)
lblInfo2.bind("<Button-1>", lambda e: webbrowser.open_new("https://github.com/AlexV-KX/LaunchpadTool"))
lblInfo3.pack(**LBL_INFO_PACK)
self.buttonSettingsCanvasOuter.pack_forget()
self.changeNameCanvas.pack_forget()
self.fileButtonCanvas.pack_forget()
self.inputAtt.grid_forget()
self.txtAtt.grid_forget()
self.buttonCanvas = buttonCanvas
self.listener = LaunchpadListener(launchpad, self, profiles)
global UI
UI = self
t = threading.Thread(target=self.startPad)
t.start()
self.root.protocol("WM_DELETE_WINDOW", self.closing)
self.root.mainloop()
# UI Changes
def profileSelectionChanged(self, *args):
value = self.profilesChoice.get()
self.selected = self.profilesChoices.index(value) - 1
if value == self.profilesChoices[0]:
self.buttonSettingsCanvasOuter.pack_forget()
self.changeNameCanvas.pack_forget()
self.fileButtonCanvas.pack_forget()
self.colorUIButtons(self.listener.buttons)
else:
self.buttonSettingsCanvasOuter.pack(**self.buttonSettingsCanvasOuterPackSettings)
self.changeNameCanvas.pack(**self.changeNameCanvasPackSettings)
self.fileButtonCanvas.pack(**self.fileButtonCanvasPackSettings)
self.inputText.set(profiles[self.selected].name)
self.colorUIButtons(profiles[self.selected].buttons, True)
def funcCatSelectionChanged(self, *args):
index = self.funcCatChoices.index(self.funcCatValue.get())
menu = self.funcSelection.children['menu']
for i in range(len(self.funcChoices)):
menu.delete(0)
if self.funcCatValue.get() != 'none':
for t in Actions.methodNames[index]:
menu.add_command(label=t, command=lambda v=self.funcValue, l=t: v.set(l))
self.funcChoices = Actions.methodNames[index]
else:
self.attString.set("")
self.inputAtt.grid_forget()
self.txtAtt.grid_forget()
self.funcChoices = ['none']
self.funcValue.set(self.funcChoices[0])
def funcSelectionChanged(self, *args):
if self.funcCatValue.get() == 'none':
return
catIndex = Actions.methodCategories.index(self.funcCatValue.get())
nameIndex = Actions.methodNames[catIndex].index(self.funcValue.get())
if Actions.attributeCount[catIndex][nameIndex] > 0:
self.txtAtt.grid(column=0, row=ATTRIBUTEROW)
self.inputAtt.grid(column=1, row=ATTRIBUTEROW)
else:
self.txtAtt.grid_forget()
self.inputAtt.grid_forget()
def colorSliderChange(self, event):
self.lblColor.config(bg=self.returnHexColor(self.sliderRed.get(), self.sliderGreen.get()))
@staticmethod
def onEnter(e, o):
o['background'] = "#000"
@staticmethod
def onLeave(e, o):
o['background'] = "#222"
# Called if a key in the UI gets pressed
def pressedUiLpKey(self, event, i):
if self.selected != -1:
self.resetButtonSettings()
x = (i % 9)
y = math.floor(i / 9)
self.lblX.config(text="X: " + str(x))
self.lblY.config(text="Y: " + str(y))
self.currentlyWorkingOnButton = [x, y]
button = profiles[self.selected].buttons[i]
if button is not None:
self.sliderRed.set(button.uRed)
self.sliderGreen.set(button.uGreen)
methodName = profiles[self.selected].methodNames[i]
self.funcCatValue.set(Actions.getParentClass(methodName))
self.funcValue.set(methodName)
self.attString.set(profiles[self.selected].methodArguments[i])
else:
self.sliderRed.set(0)
self.sliderGreen.set(0)
# Save functions
def saveNameChange(self):
profiles[self.selected].name = self.inputText.get()
profiles[self.selected].saveToFile()
def cancelNameChange(self):
self.inputText.set(profiles[self.selected].name)
def saveKeyChange(self):
if self.currentlyWorkingOnButton[0] is None:
return
index = self.currentlyWorkingOnButton[0] + self.currentlyWorkingOnButton[1] * 9
attr = None
if self.funcCatValue.get() != 'none':
catIndex = Actions.methodCategories.index(self.funcCatValue.get())
nameIndex = Actions.methodNames[catIndex].index(self.funcValue.get())
attr = self.attString.get() if Actions.attributeCount[catIndex][nameIndex] > 0 else None
profiles[self.selected].changeKey(index,
self.sliderRed.get(), self.sliderGreen.get(), self.funcValue.get(), attr)
profiles[self.selected].saveToFile()
self.setButtonColor(index, self.returnHexColor(self.sliderRed.get(), self.sliderGreen.get()))
self.resetButtonSettings()
self.listener.setButtons()
self.currentlyWorkingOnButton = [None, None]
# Cancel functions
def cancelKeyChange(self):
self.resetButtonSettings()
self.currentlyWorkingOnButton = [None, None]
def resetButtonSettings(self):
self.lblX.config(text="X: ")
self.lblY.config(text="Y: ")
self.sliderRed.set(0)
self.sliderGreen.set(0)
self.funcCatValue.set("none")
menu = self.funcSelection.children['menu']
for i in range(len(self.funcChoices)):
menu.delete(0)
self.funcValue.set("none")
self.txtAtt.grid_forget()
self.inputAtt.grid_forget()
self.attString.set("")
# Makes square with rounded corners
@staticmethod
def round_rectangle(holder, tags, x1, y1, x2, y2, radius=25):
points = [x1 + radius, y1,
x1 + radius, y1,
x2 - radius, y1,
x2 - radius, y1,
x2, y1,
x2, y1 + radius,
x2, y1 + radius,
x2, y2 - radius,
x2, y2 - radius,
x2, y2,
x2 - radius, y2,
x2 - radius, y2,
x1 + radius, y2,
x1 + radius, y2,
x1, y2,
x1, y2 - radius,
x1, y2 - radius,
x1, y1 + radius,
x1, y1 + radius,
x1, y1]
return holder.create_polygon(points, smooth=True, fill=UNCLICKED, tags=tags)
# Gives the UI the corresponding colors
def colorUIButtons(self, lpButtons, ignore=False):
if (self.selected < 0) or ignore:
self.resetUIPad()
for i in range(81):
b = lpButtons[i]
if b is not None:
self.setButtonColor(i, self.returnHexColor(b.uRed, b.uGreen))
# Sets the color of a single button on the UI
def setButtonColor(self, n, c):
self.buttonCanvas.itemconfig(self.uIButtons[n], fill=c)
# creates a Hexcode through red and green values form 0 to 3
@staticmethod
def returnHexColor(r, g):
if r == 0 and g == 0:
return UNCLICKED
col = ["0", "9", "c", "f"]
return "#" + col[r] + col[g] + "0"
# Makes the UI buttons blank
def resetUIPad(self):
for i in range(81):
self.setButtonColor(i, UNCLICKED)
# Reacts to a pressed key on the Launchpad
def pressedOnLaunchpad(self, p, button):
if button is not None and self.selected < 0:
self.setButtonColor(p, self.returnHexColor(button.pRed, button.pGreen))
time.sleep(0.5)
self.setButtonColor(p, self.returnHexColor(button.uRed, button.uGreen))
def startPad(self):
self.listener.start()
def closing(self):
self.quiting = True
self.root.destroy()
def none(self, *args):
pass
| 2.390625
| 2
|
lecture_04/302_ros_hello_world_listener.py
|
farzanehesk/COMPAS-II-FS2022
| 11
|
12776799
|
import time
from roslibpy import Topic
from compas_fab.backends import RosClient
def receive_message(message):
print("Received: " + message["data"])
with RosClient("localhost") as client:
print("Waiting for messages...")
listener = Topic(client, "/messages", "std_msgs/String")
listener.subscribe(receive_message)
while client.is_connected:
time.sleep(1)
| 2.5
| 2
|
tests/show_text_input.py
|
royqh1979/easygui_qt
| 53
|
12776800
|
import os
import sys
sys.path.insert(0, os.getcwd())
try:
from easygui_qt import easygui_qt
except ImportError:
print("problem with import")
name = easygui_qt.text_input(message="What is your name?",
title="Mine is Reeborg.")
print(name, end='')
| 2.390625
| 2
|
studies/handwriting-all-digits/NPQC/JOB_SPECIFICATION.py
|
chris-n-self/large-scale-qml
| 6
|
12776801
|
<reponame>chris-n-self/large-scale-qml<filename>studies/handwriting-all-digits/NPQC/JOB_SPECIFICATION.py
"""
"""
import numpy as np
#
# Set all run arguments
#
BACKEND_NAME = 'aer_simulator'
N_QUBITS = 8
DEPTH = 8
TYPE_CIRCUIT = 1
TYPE_DATASET = 4
APPLY_STRATIFY = True
RESCALE_FACTOR = 1.
N_PCA_FEATURES = 36
N_BOOTSTRAPS = 0
CIRCUIT_RANDOM_SEED = None
DATA_RANDOM_SEED = 0
CROSSFID_RANDOM_SEED = None
CIRCUIT_INITIAL_ANGLES = 'natural'
N_SHOTS = 8192
N_UNITARIES = 8
DATA_BATCH_SIZE = 10
# user's IBMQ access
HUB = 'ibm-q'
GROUP = 'open'
PROJECT = 'main'
# used to name log files
JOB_TAG = 'allhandwriting_NPQC'
# custom measurement angles
CROSSFID_MODE = np.array([
[
[2.09914325, 2.00705957, 0. ],
[1.28350721, 5.41722865, 0. ],
[2.11177026, 2.55739671, 0. ],
[0.43537802, 0.93103415, 0. ],
[2.6945566 , 0.06899472, 0. ],
[0.94732866, 4.01614502, 0. ],
[1.52719778, 1.45538482, 0. ],
[2.57135093, 6.00713708, 0. ]
],
[
[1.05521506, 1.22524914, 0. ],
[0.99413113, 1.98661381, 0. ],
[2.90135481, 6.18348174, 0. ],
[2.51300765, 3.26634291, 0. ],
[1.63533351, 5.79325141, 0. ],
[0.49922029, 4.90991771, 0. ],
[1.78169364, 0.7474835 , 0. ],
[1.0508384 , 1.35706396, 0. ]
],
[
[0.96187981, 2.43270834, 0. ],
[2.61452563, 5.53521137, 0. ],
[1.81197411, 2.09851027, 0. ],
[1.45864039, 2.90963026, 0. ],
[0.5060164 , 3.02073861, 0. ],
[1.31113091, 2.0404974 , 0. ],
[2.38614734, 4.94163278, 0. ],
[1.16511222, 0.34448649, 0. ]
],
[
[1.20851174, 1.41943541, 0. ],
[1.78616964, 5.18416325, 0. ],
[1.05213422, 3.87258914, 0. ],
[1.68491338, 3.21589025, 0. ],
[2.13810595, 2.32168701, 0. ],
[1.13000814, 0.81769108, 0. ],
[1.51627106, 2.87006046, 0. ],
[1.72392878, 6.20209751, 0. ]
],
[
[1.29393566, 3.73905116, 0. ],
[0.98511984, 3.62275491, 0. ],
[0.69007815, 6.22109855, 0. ],
[1.70214123, 5.56315351, 0. ],
[2.63593981, 6.27815448, 0. ],
[0.27484382, 5.99185499, 0. ],
[2.39106181, 4.19400641, 0. ],
[2.75169041, 1.91833309, 0. ]
],
[
[1.65555076, 3.94673999, 0. ],
[2.78122407, 2.18786692, 0. ],
[1.99277337, 5.0942152 , 0. ],
[1.66137053, 0.86268424, 0. ],
[1.4980823 , 2.90626433, 0. ],
[1.50354366, 5.28275155, 0. ],
[1.94334822, 2.85578366, 0. ],
[1.01102875, 3.77630652, 0. ]
],
[
[2.01617789, 0.61752991, 0. ],
[1.55277483, 1.79136744, 0. ],
[1.92764635, 5.99720756, 0. ],
[2.65029944, 4.1276729 , 0. ],
[1.0085446 , 2.29298042, 0. ],
[1.3760181 , 3.24766021, 0. ],
[1.10438958, 4.86864287, 0. ],
[0.46315548, 4.94782213, 0. ]
],
[
[1.81746973, 5.20227599, 0. ],
[1.8307544 , 1.81987862, 0. ],
[3.09304307, 1.49732037, 0. ],
[1.77425086, 4.01186712, 0. ],
[1.36148421, 3.31249936, 0. ],
[2.50196395, 3.54928814, 0. ],
[0.2487076 , 1.3484653 , 0. ],
[1.16517901, 2.00197444, 0. ]
]
])
# make output filename
JOB_FILENAME = (
','.join([
BACKEND_NAME,
'n_qubits'+f'{N_QUBITS}',
'depth'+f'{DEPTH}',
'n_shots'+f'{N_SHOTS}',
'n_unitaries'+f'{N_UNITARIES}',
'crossfid_mode'+'RzRy_custom' # +f'{CROSSFID_MODE}',
])
)
| 1.84375
| 2
|
layers/__init__.py
|
MSU-MLSys-Lab/CATE
| 15
|
12776802
|
from .graphEncoder import PairWiseLearning
from .graphEncoder import GraphEncoder
from .loss import KLDivLoss
__all__ = ["PairWiseLearning", "KLDivLoss", "GraphEncoder"]
| 1.046875
| 1
|
open_relation/hex/hex_setup.py
|
sx14/hierarchical-relationship
| 1
|
12776803
|
def hed_setup(E_h, E_e):
"""
使用理解矩阵构建HEX图
:param E_h: H-edge-mat
:param E_e: E-edge-mat
:return: G
"""
| 2.015625
| 2
|
imagr_users/forms.py
|
cewing/cfpydev-imagr
| 0
|
12776804
|
from django import forms
from imagr_users.models import ImagrUser
from registration.forms import RegistrationForm
class ImagrUserRegistrationForm(RegistrationForm):
def clean_username(self):
"""Validate that the username is alphanumeric and is not already in use.
"""
existing = ImagrUser.objects.filter(
username__iexact=self.cleaned_data['username']
)
if existing.exists():
raise forms.ValidationError(
"A user with that username already exists."
)
else:
return self.cleaned_data['username']
| 2.5625
| 3
|
chatclient.py
|
charchitdahal/Chat-Server
| 0
|
12776805
|
#!/usr/bin/env python
import socket
import os
import sys
import thread
import time
import json
server_ip = sys.argv[1]
server_port = 1134
username = sys.argv[2]
sock = socket.socket( socket.AF_INET,socket.SOCK_DGRAM )
def get_messages():
global sock, username
while True:
data = None
try:
data, addr = sock.recvfrom( 1024, socket.MSG_DONTWAIT )
except socket.error:
time.sleep(0.01)
if data:
try:
message = json.loads(data)
if(message['username'] != username):
msg_str = message['message']
#else:
# print "DUPLICATE"
#if(message['username'] == username):
# print "DUPLICATE USERNAME"
if(message['username']):
msg_str = message['username'] + ": " + msg_str
# print the message
if len(message['message']) > 0:
print msg_str
except ValueError:
print "error: can not retrive message"
def get_input():
global sock, username
try:
while True:
message = { "username" : username, "message" : raw_input().strip()}
sock.sendto( json.dumps(message), (server_ip, int(server_port)) )
except KeyboardInterrupt:
print "bye"
thread.start_new_thread(get_input, ())
thread.start_new_thread(get_messages, ())
# upon "connecting", send /join and /who to announce our arrival and get a list
# of other people in the room
message = { "username" : username, "message" : "/join"}
sock.sendto( json.dumps(message), (server_ip, int(server_port)) )
message = { "username" : username, "message" : "/who"}
sock.sendto( json.dumps(message), (server_ip, int(server_port)) )
try:
while 1:
time.sleep(0.01)
except KeyboardInterrupt:
print "bye"
message = { "username" : username, "message" : "/bye"}
sock.sendto( json.dumps(message), (server_ip, int(server_port)) )
sys.exit(0)
| 3.125
| 3
|
init_db.py
|
zhy0216-collection/Gather
| 0
|
12776806
|
<gh_stars>0
# coding=utf-8
import settings
import pymongo
db = pymongo.Connection(host=settings.mongodb_host,
port=settings.mongodb_port)[settings.database_name]
db.members.create_index([('created', -1)])
db.topics.create_index([('last_reply_time', -1), ('node', 1)])
db.replies.create_index([('topic', 1), ('index', 1)])
db.notifications.create_index([('to', 1), ('created', 1)])
db.links.create_index([('priority', -1)])
db.histories.create_index([('target_id', 1), ('created', 1)])
| 2.25
| 2
|
sdk/python/generated/azuremarketplace/saas/models/subscription_summary_py3.py
|
Ercenk/AMPSaaSSpecs
| 0
|
12776807
|
<filename>sdk/python/generated/azuremarketplace/saas/models/subscription_summary_py3.py
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SubscriptionSummary(Model):
"""SubscriptionSummary.
:param id:
:type id: str
:param subscription_name:
:type subscription_name: str
:param offer_id:
:type offer_id: str
:param plan_id:
:type plan_id: str
:param quantity:
:type quantity: long
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'subscription_name': {'key': 'subscriptionName', 'type': 'str'},
'offer_id': {'key': 'offerId', 'type': 'str'},
'plan_id': {'key': 'planId', 'type': 'str'},
'quantity': {'key': 'quantity', 'type': 'long'},
}
def __init__(self, *, id: str=None, subscription_name: str=None, offer_id: str=None, plan_id: str=None, quantity: int=None, **kwargs) -> None:
super(SubscriptionSummary, self).__init__(**kwargs)
self.id = id
self.subscription_name = subscription_name
self.offer_id = offer_id
self.plan_id = plan_id
self.quantity = quantity
| 1.929688
| 2
|
main/edit-distance-hr/edit-distance-hr.py
|
EliahKagan/old-practice-snapshot
| 0
|
12776808
|
#!/usr/bin/env python3
def read_text():
"""Reads a line of text, stripping whitespace."""
return input().strip()
def distance(s, t):
"""Wagner-Fischer algorithm"""
if len(s) < len(t):
s, t = t, s
pre = [None] * (len(t) + 1)
cur = list(range(len(pre)))
for i, sc in enumerate(s, 1):
pre, cur = cur, pre
cur[0] = i
for j, tc in enumerate(t, 1):
if sc == tc:
cur[j] = pre[j - 1]
else:
cur[j] = 1 + min(cur[j - 1], pre[j], pre[j - 1])
return cur[-1]
for _ in range(int(input())):
print(distance(read_text(), read_text()))
| 3.875
| 4
|
library/__init__.py
|
DanielLevy0705/algosec-ansible-role
| 13
|
12776809
|
<reponame>DanielLevy0705/algosec-ansible-role
__author__ = '<NAME> (@AlmogCohen)'
__version__ = '0.0.1'
| 0.726563
| 1
|
tareas/4/JimenezRodrigo/Tarea4.py
|
EnriqueAlbores03/sistop-2022-1
| 6
|
12776810
|
<reponame>EnriqueAlbores03/sistop-2022-1<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 27 21:06:27 2021
@author: RGJG
"""
global A
global posicion
A=[]
posicion=0
def modos(archivo,identificador,modo,A,posicion):
A.append([archivo,identificador,modo,posicion,False])
return A
print("""dir → Muestra el directorio
open <arch> <modo> → Especifica que operaremos con el archivo de
nombre "arch", empleando el modo especificado. Entrega un
descriptor de archivo numérico.
close <descr> → Termina una sesión de trabajo con el archivo
referido por el descriptor indicado. Después de un close,
cualquier intento por usar ese archivo entregará error.
read <descr> <longitud> → Lee la cantidad de bytes especificada
write <descr> <longitud> <datos» → Escribe la cantidad de
bytes especificada, guardando los datos indicados
como parámetro.
seek <descr> <ubicacion> → Salta a la ubicación especificada del
archivo.
quit → Detiene la ejecución de la simulación
""")
arch1="También puede escribir una palabra clave para buscar en línea el vídeo que mejor se adapte a su documento.Para otorgar a su documento un aspecto profesional, Word proporciona encabezados, pies de página, páginas de portada y diseños de cuadro de texto que se complementan entre sí."
arch2="El vídeo proporciona una manera eficaz para ayudarle a demostrar el punto. Cuando haga clic en Vídeo en línea, puede pegar el código para insertar del vídeo que desea agregar. También puede escribir una palabra clave para buscar en línea el vídeo que mejor se adapte a su documento.Para otorgar a su documento un aspecto profesional, Word proporciona encabezados, pies de página, páginas de portada y diseños de cuadro de texto que se complementan entre sí."
arch3="440a93fe-45d7-4ccc-a6ee-baf10ce7388a"
directorio = ['arch1 [{}]'.format(str(len(arch1))+" bytes"),'arch2 [{}]'.format(str(len(arch2))+" bytes"),'arch3 [{}]'.format(str(len(arch3))+" bytes")]
while True:
comando = str(input("→ "))
comando = comando.split(' ')
if comando[0] =="dir":
print(' '.join(directorio))
elif comando[0] == "open":
if comando[2]=='R':
if comando[1]=="arch1":
identificador = 1
print('Archivo abierto ({}) → {}'.format(comando[2],identificador))
A=modos(comando[1],identificador,comando[2],A,posicion)
elif comando[1]=="arch2":
identificador = 2
print('Archivo abierto ({}) → {}'.format(comando[2],identificador))
A=modos(comando[1],identificador,comando[2],A,posicion)
elif comando[1]=="arch3":
identificador = 3
print('Archivo abierto ({}) → {}'.format(comando[2],identificador))
A=modos(comando[1],identificador,comando[2],A,posicion)
else:
print('Lo siento, no se puede abrir el archivo {}'.format(comando[3]))
elif comando[2]=='A':
if comando[1]=="arch1":
identificador = 1
print('Archivo abierto ({}) → {}'.format(comando[2],identificador))
A=modos(comando[1],identificador,comando[2],A,posicion)
elif comando[1]=="arch2":
identificador = 2
print('Archivo abierto ({}) → {}'.format(comando[2],identificador))
A=modos(comando[1],identificador,comando[2],A,posicion)
elif comando[1]=="arch3":
identificador = 3
print('Archivo abierto ({}) → {}'.format(comando[2],identificador))
A=modos(comando[1],identificador,comando[2],A,posicion)
else:
print('Lo siento, no se puede abrir el archivo {}'.format(comando[3]))
elif comando[2]=='W':
if comando[1]=="arch1":
identificador = 1
print('Archivo abierto ({}) → {}'.format(comando[2],identificador))
A=modos(comando[1],identificador,comando[2],A,posicion)
elif comando[1]=="arch2":
identificador = 2
print('Archivo abierto ({}) → {}'.format(comando[2],identificador))
A=modos(comando[1],identificador,comando[2],A,posicion)
elif comando[1]=="arch3":
identificador = 3
print('Archivo abierto ({}) → {}'.format(comando[2],identificador))
A=modos(comando[1],identificador,comando[2],A,posicion)
else:
print('Lo siento, no se puede abrir el archivo {}'.format(comando[3]))
else:
print('Error en el modo')
elif comando[0] == 'read':
print(A)
print('Tiene {} archivos'.format(len(A)))
if comando[1] == '1':
if A[0][4]==False:
encontrado = A[0].index(1)
if A[0][encontrado+1]=='R' or A[0][encontrado+1]=='A':
print(arch1[A[0][3]:A[0][3]+int(comando[2])])
else:
print('Accion ilegal')
else:
print("Error: el identificador #1 ya está cerrado")
elif comando[1] == '2':
if A[1][4]==False:
encontrado = A[1].index(2)
if A[1][encontrado+1]=='R' or A[1][encontrado+1]=='A':
print(arch2[A[1][3]:A[1][3]+int(comando[2])])
else:
print('Accion ilegal')
else:
print("Error: el identificador #2 ya está cerrado")
elif comando[1] == '3':
if A[2][4]==False:
encontrado = A[2].index(3)
if A[2][encontrado+1]=='R' or A[2][encontrado+1]=='A':
print(arch3[A[2][3]:A[2][3]+int(comando[2])])
else:
print('Accion ilegal')
else:
print("Error: el identificador #1 ya está cerrado")
else:
print('ERROR: No tienes permisos de realizar esta accion')
elif comando[0] == 'seek':
if comando[1] == '1':
if A[0][4]==False:
encontrado = A[0].index(1)
if A[0][encontrado+1]=='R' or A[0][encontrado+1]=='A':
posicion = int(comando[2])
A[0][3] = posicion
else:
print("Error: El identificador #1 ya está cerrado")
elif comando[1] == '2':
if A[1][4]==False:
encontrado = A[1].index(2)
if A[1][encontrado+1]=='R'or A[1][encontrado+1]=='A':
posicion = int(comando[2])
A[1][3] = posicion
else:
print("Error: El identificador #2 ya está cerrado")
elif comando[1] == '3':
if A[2][4]==False:
encontrado = A[2].index(3)
if A[2][encontrado+1]=='R' or A[2][encontrado+1]=='A' :
posicion = int(comando[2])
A[2][3] = posicion
else:
print("Error: El identificador #3 ya está cerrado")
else:
print('ERROR: No tienes permisos de realizar esta accion')
print(A)
elif comando[0] == 'write':
if comando[1] == '1':
if A[0][4]==False:
encontrado = A[0].index(1)
if A[0][encontrado+1]=='W' or A[0][encontrado+1]=='A':
cadenaarem=arch1[posicion:posicion+int(comando[2])+1]
arch1 = arch1.replace(cadenaarem,comando[3])
else:
print('Accion ilegal')
else:
print("Error: El identificador #1 ya está cerrado")
elif comando[1] == '2':
if A[1][4]==False:
encontrado = A[1].index(2)
print("Encontrado en {} y dos posiciones adelantadas esta en modo {}".format(encontrado,A[1][encontrado+1]))
if A[1][encontrado+1]=='W':
cadenaarem=arch2[posicion:posicion+int(comando[2])+1]
arch2 = arch2.replace(cadenaarem,comando[3])
elif A[1][encontrado+1]=='A':
cadenaarem=arch2[posicion:posicion+int(comando[2])+1]
arch2 = arch2.replace(cadenaarem,comando[3])
print('Se modificó con exito')
else:
print('Accion ilegal')
else:
print("Error: El identificador #2 ya está cerrado")
elif comando[1] == '3':
if A[2][4]==False:
encontrado = A[2].index(3)
if A[2][encontrado+1]=='W' or A[2][encontrado+1]=='A':
cadenaarem=arch3[posicion:posicion+int(comando[2])+1]
arch3 = arch3.replace(cadenaarem,comando[3])
else:
print('Accion ilegal')
else:
print("Error: El identificador #3 ya está cerrado")
elif comando[0] == 'close':
if comando[1] =='1':
A[0][4] = True
print(0)
elif comando[1] =='2':
A[1][4]=True
print(0)
elif comando[1] =='3':
A[2][4]=True
print(0)
elif comando[0] == 'quit':
break
else:
print("No hay comando asociado a {}".format(comando))
| 2.421875
| 2
|
engine/test/unit/app/test_app_api.py
|
pcanto-hopeit/hopeit.engine
| 15
|
12776811
|
from typing import Optional
import pytest
import hopeit.app.api as api
from hopeit.app.config import EventType
from hopeit.server.api import APIError
from mock_app import mock_app_api_get, MockData, mock_app_api_post, mock_app_api_get_list
from mock_app import mock_api_app_config, mock_api_spec # noqa: F401
def test_api_from_config(monkeypatch, mock_api_spec, mock_api_app_config): # noqa: F811
monkeypatch.setattr(api, 'spec', mock_api_spec)
spec = api.api_from_config(
mock_app_api_get, app_config=mock_api_app_config, event_name='mock-app-api-get', plugin=None)
assert spec == mock_api_spec['paths']['/api/mock-app-api/test/mock-app-api']['get']
def test_api_from_config_missing(monkeypatch, mock_api_spec, mock_api_app_config): # noqa: F811
monkeypatch.setattr(api, 'spec', mock_api_spec)
with pytest.raises(APIError):
api.api_from_config(
mock_app_api_get, app_config=mock_api_app_config, event_name='mock-app-noapi', plugin=None)
mock_api_app_config.events['mock-app-api-get-list'].type = EventType.POST
with pytest.raises(APIError):
api.api_from_config(
mock_app_api_get_list, app_config=mock_api_app_config, event_name='mock-app-api-get-list', plugin=None)
def test_event_api(monkeypatch, mock_api_spec, mock_api_app_config): # noqa: F811
monkeypatch.setattr(api, 'spec', mock_api_spec)
spec = api.event_api(
description="Test app api",
payload=(str, "Payload"),
query_args=[('arg1', Optional[int], "Argument 1")],
responses={200: (MockData, "MockData result")}
)(mock_app_api_get, app_config=mock_api_app_config, event_name='mock-app-api-get', plugin=None)
assert spec['description'] == \
mock_api_spec['paths']['/api/mock-app-api/test/mock-app-api']['get']['description']
assert spec['parameters'][0] == \
mock_api_spec['paths']['/api/mock-app-api/test/mock-app-api']['get']['parameters'][0]
assert spec['responses'] == \
mock_api_spec['paths']['/api/mock-app-api/test/mock-app-api']['get']['responses']
def test_event_api_post(monkeypatch, mock_api_spec, mock_api_app_config): # noqa: F811
monkeypatch.setattr(api, 'spec', mock_api_spec)
mock_api_spec['paths']['/api/mock-app-api/test/mock-app-api']['post']['parameters'][0]['description'] = \
'arg1'
mock_api_spec['paths']['/api/mock-app-api/test/mock-app-api']['post']['requestBody']['description'] = \
'MockData'
spec = api.event_api(
description="Description Test app api part 2",
payload=MockData,
query_args=['arg1'],
responses={200: int}
)(mock_app_api_post, app_config=mock_api_app_config, event_name='mock-app-api-post', plugin=None)
assert spec['summary'] == \
mock_api_spec['paths']['/api/mock-app-api/test/mock-app-api']['post']['summary']
assert spec['description'] == \
mock_api_spec['paths']['/api/mock-app-api/test/mock-app-api']['post']['description']
assert spec['parameters'][0] == \
mock_api_spec['paths']['/api/mock-app-api/test/mock-app-api']['post']['parameters'][0]
assert spec['requestBody'] == \
mock_api_spec['paths']['/api/mock-app-api/test/mock-app-api']['post']['requestBody']
assert spec['responses'] == \
mock_api_spec['paths']['/api/mock-app-api/test/mock-app-api']['post']['responses']
def test_app_base_route_name(mock_api_app_config): # noqa: F811
assert api.app_base_route_name(mock_api_app_config.app) == "/api/mock-app-api/test"
assert api.app_base_route_name(mock_api_app_config.app, plugin=mock_api_app_config.app) == \
"/api/mock-app-api/test/mock-app-api/test"
| 1.828125
| 2
|
tests/test_client/test_init.py
|
Flickswitch/phx_events
| 0
|
12776812
|
import asyncio
from urllib.parse import urlencode
import pytest
from phx_events.client import PHXChannelsClient
pytestmark = pytest.mark.asyncio
class TestPHXChannelsClientInit:
def setup(self):
self.socket_url = 'ws://test.socket/url/'
self.channel_auth_token = '<PASSWORD>'
self.phx_channels_client = PHXChannelsClient(self.socket_url, self.channel_auth_token)
def test_async_logger_child_set_as_logger_on_client(self):
from phx_events.async_logger import async_logger
assert self.phx_channels_client.logger.parent == async_logger
def test_channel_socket_url_has_token_if_specified(self):
no_token_client = PHXChannelsClient(self.socket_url)
assert no_token_client.channel_socket_url == self.socket_url
assert self.phx_channels_client.channel_socket_url == f'{self.socket_url}?token={self.channel_auth_token}'
def test_channel_socket_url_token_is_made_url_safe(self):
unsafe_token = '==??=='
safe_token_client = PHXChannelsClient(self.socket_url, channel_auth_token=unsafe_token)
assert safe_token_client.channel_socket_url != f'{self.socket_url}?token={unsafe_token}'
assert safe_token_client.channel_socket_url == f'{self.socket_url}?{urlencode({"token": unsafe_token})}'
def test_event_loop_set_by_default_if_not_specified(self):
no_loop_specified_client = PHXChannelsClient(self.socket_url)
assert isinstance(no_loop_specified_client._loop, asyncio.BaseEventLoop)
def test_event_loop_set_to_argument_if_specified(self):
event_loop = asyncio.get_event_loop()
specified_loop_client = PHXChannelsClient(self.socket_url, event_loop=event_loop)
assert specified_loop_client._loop == event_loop
| 2.109375
| 2
|
apps/stats/urls.py
|
puertoricanDev/horas
| 10
|
12776813
|
from django.urls import re_path
from .views import StatsView
urlpatterns = [re_path("^$", StatsView.as_view(), name="stats")]
| 1.507813
| 2
|
newbeginning/network/test-analysis/distributionplots.py
|
arnavkapoor/fsmresults
| 0
|
12776814
|
<gh_stars>0
# import plotly.plotly as py
# import plotly.graph_objs as go
# import plotly.figure_factory as FF
import math
import numpy as np
import pandas as pd
import matplotlib as mplt
import itertools
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(color_codes=True)
neededfiles = ['aim.fsm','battlefield2.fsm','counterstrike-source.fsm','halflife2-deathmatch.fsm','dns.fsm','h323.fsm','hotline.fsm','ntp.fsm','rtp.fsm','ssl.fsm','tsp.fsm','yahoo.fsm']
for filename in neededfiles:
filename=filename.split('.')[0]
df = pd.read_csv('./individual-stats/'+filename+'.csv')
freq=(df['Percentage'].values.tolist())
vals = df['Length'].values.tolist()
bmk = filename
print(freq)
#create handle combinations
# linestyles = ['-', ':', '-.', '--']
# markers = ['x', '^', 'o', '*']
# handlestyles = itertools.product(linestyles, markers)
gaussian = sns.kdeplot(freq,shade=True,bw=2)
#plt.show()
# layout ={
# 'title':filename,
# 'yaxis': {
# 'title' : 'Testcases/second'
# },
# 'xaxis': {
# 'title' : 'Log Number of Testcases'
# },
# }
plt.xticks(np.arange(vals[0], vals[-1], step=1))
fig = gaussian.get_figure()
fig.savefig('./individualgraphspercent/'+bmk+"gaussian.svg",dpi=1000)
# fig = dict(data=dataPanda,layout=layout)
# py.image.save_as(fig, './individualgraphs/'+filename+'2.png')
| 2.3125
| 2
|
vice/toolkit/hydrodisk/data/download.py
|
rcooke-ast/VICE
| 22
|
12776815
|
<reponame>rcooke-ast/VICE
import urllib.request
import sys
import os
PATH = os.path.dirname(os.path.abspath(__file__))
NSUBS = int(30) # hard coded into VICE
def download(verbose = True):
r"""
Downloads the h277 supplementary data from VICE's source tree on GitHub
"""
if not os.path.exists("%s/h277" % (PATH)): os.mkdir("%s/h277" % (PATH))
for sub in range(NSUBS):
url = "https://raw.githubusercontent.com/giganano/VICE/v1.3.x/vice/"
url += "toolkit/hydrodisk/data/h277/sub%d.dat" % (sub)
urllib.request.urlretrieve(url, "%s/h277/sub%d.dat" % (PATH, sub))
if verbose: sys.stdout.write("\rDownloading subsample: %d of %d" % (
sub + 1, NSUBS))
if verbose: sys.stdout.write("\n")
def _h277_exists():
r"""
Determines if the h277 supplementary data has been downloaded.
"""
status = True
for sub in range(NSUBS):
status &= os.path.exists("%s/h277/sub%d.dat" % (PATH, sub))
if not status: break
return status
def _h277_remove():
r"""
Removes the h277 supplementary data.
"""
try:
for sub in range(NSUBS):
filename = "%s/h277/sub%d.dat" % (PATH, sub)
if os.path.exists(filename): os.remove(filename)
os.rmdir("%s/h277" % (PATH))
except (FileNotFoundError, OSError):
raise FileNotFoundError("Supplementary data not found.")
| 2.765625
| 3
|
xroms0/depth.py
|
bjornaa/xroms0
| 0
|
12776816
|
"""Vertical structure functions for ROMS
:func:`sdepth`
Depth of s-levels
:func:`zslice`
Slice a 3D field in s-coordinates to fixed depth
:func:`multi_zslice`
Slice a 3D field to several depth levels
:func:`z_average`
Vertical average of a 3D field
:func:`s_stretch`
Compute vertical stretching arrays Cs_r or Cs_w
"""
# -----------------------------------
# <NAME> <<EMAIL>>
# Institute of Marine Research
# Bergen, Norway
# 2010-09-30
# -----------------------------------
from typing import Union, List
import numpy as np
import xarray as xr
Surface = Union[float, np.ndarray] # Surface z = ....
def sdepth(H, Hc, C, stagger="rho", Vtransform=1):
"""Depth of s-levels
*H* : arraylike
Bottom depths [meter, positive]
*Hc* : scalar
Critical depth
*cs_r* : 1D array
s-level stretching curve
*stagger* : [ 'rho' | 'w' ]
*Vtransform* : [ 1 | 2 ]
defines the transform used, defaults 1 = Song-Haidvogel
Returns an array with ndim = H.ndim + 1 and
shape = cs_r.shape + H.shape with the depths of the
mid-points in the s-levels.
Typical usage::
>>> fid = Dataset(roms_file)
>>> H = fid.variables['h'][:, :]
>>> C = fid.variables['Cs_r'][:]
>>> Hc = fid.variables['hc'].getValue()
>>> z_rho = sdepth(H, Hc, C)
"""
H = np.asarray(H)
Hshape = H.shape # Save the shape of H
H = H.ravel() # and make H 1D for easy shape maniplation
C = np.asarray(C)
N = len(C)
outshape = (N,) + Hshape # Shape of output
if stagger == "rho":
S = -1.0 + (0.5 + np.arange(N)) / N # Unstretched coordinates
elif stagger == "w":
S = np.linspace(-1.0, 0.0, N)
else:
raise ValueError("stagger must be 'rho' or 'w'")
if Vtransform == 1: # Default transform by Song and Haidvogel
A = Hc * (S - C)[:, None]
B = np.outer(C, H)
return (A + B).reshape(outshape)
elif Vtransform == 2: # New transform by Shchepetkin
N = Hc * S[:, None] + np.outer(C, H)
D = 1.0 + Hc / H
return (N / D).reshape(outshape)
else:
raise ValueError("Unknown Vtransform")
# ------------------------------------
def sdepth_w(H, Hc, cs_w):
"""Return depth of w-points in s-levels
Kept for backwards compatibility
use *sdepth(H, Hc, cs_w, stagger='w')* instead
"""
return sdepth(H, Hc, cs_w, stagger="w")
# ------------------------------------------
# Vertical slicing e.t.c.
# ------------------------------------------
def zslice2(F, S, z):
"""Vertical slice of a 3D ROMS field
Vertical interpolation of a field in s-coordinates to
(possibly varying) depth level
*F* : array with vertical profiles, first dimension is vertical
*S* : array with depths of the F-values,
*z* : Depth level(s) for output, scalar or ``shape = F.shape[1:]``
The z values should be negative
Return value : array, `shape = F.shape[1:]`, the vertical slice
Example:
H is an array of depths (positive values)
Hc is the critical depth
C is 1D containing the s-coordinate stretching at rho-points
returns F50, interpolated values at 50 meter with F50.shape = H.shape
>>> z_rho = sdepth(H, Hc, C)
>>> F50 = zslice(F, z_rho, -50.0)
"""
# TODO:
# Option to Save A, D, Dm
# => faster interpolate more fields to same depth
F = np.asarray(F)
S = np.asarray(S)
z = np.asarray(z, dtype="float")
Fshape = F.shape # Save original shape
if S.shape != Fshape:
raise ValueError("F and z_r must have same shape")
if z.shape and z.shape != Fshape[1:]:
raise ValueError("z must be scalar or have shape = F.shape[1:]")
# Flatten all non-vertical dimensions
N = F.shape[0] # Length of vertical dimension
M = F.size // N # Combined length of horizontal dimension(s)
F = F.reshape((N, M))
S = S.reshape((N, M))
if z.shape:
z = z.reshape((M,))
# Find integer array C with shape (M,)
# with S[C[i]-1, i] < z <= S[C[i], i]
# C = np.apply_along_axis(np.searchsorted, 0, S, z)
# but the following is much faster
C = np.sum(S < z, axis=0)
C = C.clip(1, N - 1)
# For vectorization
# construct index array tuples D and Dm such that
# F[D][i] = F[C[i], i]
# F[Dm][i] = F[C[i]-1, i]
I = np.arange(M, dtype="int")
D = (C, I)
Dm = (C - 1, I)
# Compute interpolation weights
A = (z - S[Dm]) / (S[D] - S[Dm])
A = A.clip(0.0, 1.0) # Control the extrapolation
# Do the linear interpolation
R = (1 - A) * F[Dm] + A * F[D]
# Give the result the correct s
R = R.reshape(Fshape[1:])
return R
# -----------------------------------------------
def s_stretch(N, theta_s, theta_b, stagger="rho", Vstretching=1):
"""Compute a s-level stretching array
*N* : Number of vertical levels
*theta_s* : Surface stretching factor
*theta_b* : Bottom stretching factor
*stagger* : "rho"|"w"
*Vstretching* : 1|2|4
"""
if stagger == "rho":
S = -1.0 + (0.5 + np.arange(N)) / N
elif stagger == "w":
S = np.linspace(-1.0, 0.0, N + 1)
else:
raise ValueError("stagger must be 'rho' or 'w'")
if Vstretching == 1:
cff1 = 1.0 / np.sinh(theta_s)
cff2 = 0.5 / np.tanh(0.5 * theta_s)
return (1.0 - theta_b) * cff1 * np.sinh(theta_s * S) + theta_b * (
cff2 * np.tanh(theta_s * (S + 0.5)) - 0.5
)
elif Vstretching == 2:
a, b = 1.0, 1.0
Csur = (1 - np.cosh(theta_s * S)) / (np.cosh(theta_s) - 1)
Cbot = np.sinh(theta_b * (S + 1)) / np.sinh(theta_b) - 1
mu = (S + 1) ** a * (1 + (a / b) * (1 - (S + 1) ** b))
return mu * Csur + (1 - mu) * Cbot
elif Vstretching == 4:
C = (1 - np.cosh(theta_s * S)) / (np.cosh(theta_s) - 1)
C = (np.exp(theta_b * C) - 1) / (1 - np.exp(-theta_b))
return C
elif Vstretching == 5:
if stagger == "w":
K = np.arange(N + 1)
if stagger == "rho":
K = np.arange(0.5, N + 1)
S1 = -(K * K - 2 * K * N + K + N * N - N) / (N * N - N)
S2 = -0.01 * (K * K - K * N) / (1 - N)
S = S1 + S2
C = (1 - np.cosh(theta_s * S)) / (np.cosh(theta_s) - 1)
C = (np.exp(theta_b * C) - 1) / (1 - np.exp(-theta_b))
else:
raise ValueError("Unknown Vstretching")
def invert_s(F: xr.DataArray, value: Surface):
"""Return highest (shallowest) s-value such that F(s,...) = value
F = DataArray with z_rho as coordinate
The vertical dimension in F must be first, axis=0
F must not have a time dimension
Returns D, Dm, a
F[Dm] <= value <= F[D] (or opposite inequalities)
and a is the interpolation weight:
value = (1-a)*F(K-1) + a*F(K)
a = nan if this is not possible
"""
val = value
# Work on numpy arrays
F0 = F.values
# z_rho = F.z_rho.values
# s_rho = F.s_rho.values
val = np.asarray(val, dtype="float")
# Fshape = F.shape # Save original shape
# if val.shape and val.shape != Fshape[1:]:
# raise ValueError("z must be scalar or have shape = F.shape[1:]")
# Flatten all non-vertical dimensions
N = F.shape[0] # Length of vertical dimension
M = F0.size // N # Combined length of horizontal dimensions
F0 = F0.reshape((N, M))
if val.shape: # Value may be space dependent
val = val.reshape((M,))
# Look for highest s-value where G is negative
G = (F0[1:, :] - val) * (F0[:-1, :] - val)
G = G[::-1, :] # Reverse
K = N - 1 - (G <= 0).argmax(axis=0)
# Define D such that F[D][i] = F[K[i], i]
I = np.arange(M)
D = (K, I)
Dm = (K - 1, I)
# Compute interpolation weights
a = (val - F0[Dm]) / (F0[D] - F0[Dm] + 1e-30)
# Only use 0 <= a <= 1
a[np.abs(a - 0.5) > 0.5] = np.nan #
return D, Dm, a
class HorizontalSlicer:
"""Reduce to horizontal view by slicing
F = DataArray, time-independent, first dimension is vertical
value = slice value
If F is not monotonous, returns the shallowest depth where F = value
"""
def __init__(self, F: xr.DataArray, value: Surface) -> None:
self.D, self.Dm, self.a = invert_s(F, value)
self.M = len(self.a)
# self.dims = F.dims
def __call__(self, G: xr.DataArray) -> xr.DataArray:
"""G must have same vertical and horizontal dimensions as F"""
if "ocean_time" in G.dims:
ntimes = G.shape[0]
kmax = G.shape[1]
R: List[np.ndarray] = []
for t in range(ntimes):
G0 = G.isel(ocean_time=t).values
G0 = G0.reshape((kmax, self.M))
R0 = (1 - self.a) * G0[self.Dm] + self.a * G0[self.D]
R0 = R0.reshape(G.shape[2:])
R.append(R0)
R1 = np.array(R)
else:
kmax = G.shape[0]
G0 = G.values
G0 = G0.reshape((kmax, self.M))
R1 = (1 - self.a) * G0[self.Dm] + self.a * G0[self.D]
R1 = R1.reshape(G.shape[1:])
# Return a DataArray
# Should have something on z_rho?
dims = list(G.dims)
dims.remove("s_rho")
coords = {dim: G.coords[dim] for dim in dims}
coords["lon_rho"] = G.coords["lon_rho"]
coords["lat_rho"] = G.coords["lat_rho"]
return xr.DataArray(R1, dims=dims, coords=coords, attrs=G.attrs)
| 2.75
| 3
|
tests/test_table_input.py
|
abcnishant007/sklearn-evaluation
| 351
|
12776817
|
<filename>tests/test_table_input.py
from unittest import TestCase
from sklearn_evaluation import table
class TestMissingInput(TestCase):
def test_feature_importances(self):
with self.assertRaisesRegex(ValueError, "needed to tabulate"):
table.feature_importances(None)
| 2.640625
| 3
|
src/api.py
|
edgartan/slack_movie_app
| 0
|
12776818
|
import sys
sys.path.insert(1, "lib/")
# This going against PEP-8 will refactor once we have a build pipeline
import os
import json
import logging
import requests
import cachetools.func
class MovieApis:
api_key = os.environ.get("API_KEY")
# instance method
@cachetools.func.ttl_cache(maxsize=20, ttl=300)
def get_movie_details(movie_id: str, region: str) -> dict:
params = {
"api_key": MovieApis.api_key,
"region": region
}
try:
r = requests.get(
f"https://api.themoviedb.org/3/movie/{movie_id}", params)
r.raise_for_status()
except Exception as e:
logging.exception(f"Error getting movie details: {e}")
data = json.loads(r.text)
return data
# instance method
@cachetools.func.ttl_cache(maxsize=5, ttl=300)
def get_list_of_movies(pages: int) -> list:
movie_list = []
params = {
"api_key": MovieApis.api_key,
"region": "US",
"page": []
}
for pages in range(pages):
params["page"].append(pages + 1)
try:
r = requests.get(
"https://api.themoviedb.org/3/movie/now_playing", params)
r.raise_for_status()
except requests.exceptions.RequestException as e:
logging.exception(f"Error getting list of movies: {e}")
data = json.loads(r.text)
for item in data["results"]:
movie = {
"text": {
"type": "plain_text",
"text": item["original_title"]
},
"value": str(item["id"])
}
movie_list.append(movie)
return movie_list
# Start your app
if __name__ == "__main__":
MovieApis.get_list_of_movies("en-US", 3)
| 2.4375
| 2
|
src/checker/httpstatusmonitor.py
|
red-lever-solutions/system-checker
| 1
|
12776819
|
<filename>src/checker/httpstatusmonitor.py
from .mylog import log
import requests
def monitor(url, method="GET", data=None, headers=None, verify_ssl=True):
log.debug("Checking http status code of %s", url)
try:
if method.lower() == "get":
response = requests.get(url, data=data, headers=headers, verify=verify_ssl)
elif method.lower() == "post":
response = requests.post(url, data=data, headers=headers, verify=verify_ssl)
else:
raise Exception("Method not supported {:s}".format(method.upper()))
except Exception as e:
return {"success": False, "message": str(e)}
log.debug("Got status code %s", response.status_code)
if response.status_code == requests.codes.ok:
success = True
message = "{0:s} ({1:d})".format(url, response.status_code)
else:
success = False
message = "{0:s} ({1:d})".format(url, response.status_code)
return {
"success": success,
"message": message
}
| 2.8125
| 3
|
update_data.py
|
claire9501/d3-group-project
| 0
|
12776820
|
# Dependencies
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
# Go to existing database with automap_base
from sqlalchemy.ext.automap import automap_base
# Work through mapper to use python code
from sqlalchemy.orm import Session, relationship
# Inspect with python
from sqlalchemy import create_engine, inspect
# Allow us to declare column types
from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc
from sqlalchemy.ext.declarative import declarative_base
import datetime
import pandas as pd
import numpy as np
import json
def update_weather(lat_search):
# Sets an object to utilize the default declarative base in SQL Alchemy
Base = declarative_base()
## Class base template to upload to sqlite
class WeatherSeries(Base):
__tablename__ = 'weatherSeries'
id = Column(Integer, primary_key=True)
city = Column(String(50))
country = Column(String(200))
region = Column(String(80))
avgtemp = Column(Float)
date = Column(String(12))
date_epoch = Column(Float)
maxtemp = Column(Float)
mintemp = Column(Float)
sunhour = Column(Float)
totalsnow = Column(Float)
uv_index = Column(Float)
magnitude = Column(Float)
place = Column(String(80))
lat = Column(String(12))
long = Column(String(12))
# Create Database Connection
# ----------------------------------
# Creates a connection to our DB
# Engine opens the door. Conn is the walk through sign
engine = create_engine("sqlite:///earthquake_weather.sqlite")
conn = engine.connect()
# Create a "Metadata" Layer That Abstracts our SQL Database
# ----------------------------------
# Create (if not already in existence) the tables associated with our classes.
Base.metadata.create_all(engine)
# Create a Session Object to Connect to DB
# ----------------------------------
session = Session(bind=engine)
def weatherTimeSeries(query_call):
Base = automap_base()
Base.prepare(engine, reflect=True)
# Check db table names
# Base.classes.keys()
weather_table = Base.classes.weatherSeries
weather_container = session.query(weather_table).filter(weather_table.lat == query_call).all()
weather_data = []
def spellDate(datestring):
date_time_obj = datetime.datetime.strptime(datestring, '%Y-%m-%d')
month_name = date_time_obj.strftime("%B")
day = date_time_obj.strftime("%d")
year = date_time_obj.strftime("%Y")
month_day = month_name + " " + day
month_day_year = month_name + " " + day + ", " + year
date = {
"month_day": month_day,
"month_day_year": month_day_year,
}
return date
for data in weather_container:
date_date = data.date
date_to_pass = spellDate(date_date)
container = {
"city": data.city,
"country": data.country,
"region": data.region,
"avgtemp": data.avgtemp,
"date": date_to_pass,
"date_epoch": data.date_epoch,
"maxtemp": data.maxtemp,
"mintemp": data.mintemp,
"sunhour": data.sunhour,
"totalsnow": data.totalsnow,
"uv_index": data.uv_index,
"magnitude": data.magnitude,
"place": data.place,
"lat": data.lat,
"long": data.long
}
weather_data.append(container)
return weather_data
latitude = lat_search
weather_data = weatherTimeSeries(latitude)
# Return results
return weather_data
#################################################################
## Facts
##################################################################
def aboveSixQuakeCall():
# Sets an object to utilize the default declarative base in SQL Alchemy
Base = declarative_base()
## Class base template to upload to sqlite
class WeatherSeries(Base):
__tablename__ = 'weatherSeries'
id = Column(Integer, primary_key=True)
city = Column(String(50))
country = Column(String(200))
region = Column(String(80))
avgtemp = Column(Float)
date = Column(String(12))
date_epoch = Column(Float)
maxtemp = Column(Float)
mintemp = Column(Float)
sunhour = Column(Float)
totalsnow = Column(Float)
uv_index = Column(Float)
magnitude = Column(Float)
place = Column(String(80))
lat = Column(String(12))
long = Column(String(12))
# Create Database Connection
# ----------------------------------
# Creates a connection to our DB
# Engine opens the door. Conn is the walk through sign
engine = create_engine("sqlite:///earthquake_weather.sqlite")
conn = engine.connect()
# Create a "Metadata" Layer That Abstracts our SQL Database
# ----------------------------------
# Create (if not already in existence) the tables associated with our classes.
Base.metadata.create_all(engine)
# Create a Session Object to Connect to DB
# ----------------------------------
session = Session(bind=engine)
def aboveSixQuake():
Base = automap_base()
Base.prepare(engine, reflect=True)
# Check db table names
# Base.classes.keys()
weather_table = Base.classes.weatherSeries
weather_container = session.query(weather_table).filter(weather_table.magnitude > 6).all()
weather_highesteq = session.query(weather_table).order_by(desc(weather_table.magnitude)).order_by(desc(weather_table.date)).limit(4).all()
weather_facts = []
magnitude_list = []
count = 0
magnitude_keep = 6
for data in weather_highesteq:
magnitude = data.magnitude
# Get highest recorded earthquake
if data.magnitude > magnitude_keep:
magnitude_keep = data.magnitude
location = data.country
city = data.city
temp_low = data.mintemp
temp_high = data.maxtemp
avg_temp_at_time = data.avgtemp
date = data.date
magnitude = magnitude_keep
else:
continue
# Counter
for data in weather_container:
count += 1
def spellDate(datestring):
date_time_obj = datetime.datetime.strptime(datestring, '%Y-%m-%d')
month_name = date_time_obj.strftime("%B")
day = date_time_obj.strftime("%d")
year = date_time_obj.strftime("%Y")
month_day = month_name + " " + day
month_day_year = month_name + " " + day + ", " + year
date = {
"month_day": month_day,
"month_day_year": month_day_year,
}
return date
# Get avgtemp from list
# def Average(lst):
# return sum(lst) / len(lst)
# quake_avg = Average(magnitude_list)
spell_dates = spellDate(date)
container = {
"count": count,
# "avgtemp": quake_avg,
"highest_magnitude": magnitude_keep,
"highest_city": city,
"highest_location": location,
"temp_low": temp_low,
"temp_high": temp_high,
"avg_temp_at_time": avg_temp_at_time,
"date": spell_dates,
}
weather_facts.append(container)
return weather_facts
weather_facts = aboveSixQuake()
# Return results
return weather_facts
#################################################################
## Facts - Latest Quake
##################################################################
def latestQuakesCall():
# Sets an object to utilize the default declarative base in SQL Alchemy
Base = declarative_base()
## Class base template to upload to sqlite
class WeatherSeries(Base):
__tablename__ = 'weatherSeries'
id = Column(Integer, primary_key=True)
city = Column(String(50))
country = Column(String(200))
region = Column(String(80))
avgtemp = Column(Float)
date = Column(String(12))
date_epoch = Column(Float)
maxtemp = Column(Float)
mintemp = Column(Float)
sunhour = Column(Float)
totalsnow = Column(Float)
uv_index = Column(Float)
magnitude = Column(Float)
place = Column(String(80))
lat = Column(String(12))
long = Column(String(12))
# Create Database Connection
# ----------------------------------
# Creates a connection to our DB
# Engine opens the door. Conn is the walk through sign
engine = create_engine("sqlite:///earthquake_weather.sqlite")
conn = engine.connect()
# Create a "Metadata" Layer That Abstracts our SQL Database
# ----------------------------------
# Create (if not already in existence) the tables associated with our classes.
Base.metadata.create_all(engine)
# Create a Session Object to Connect to DB
# ----------------------------------
session = Session(bind=engine)
def latestQuakes():
Base = automap_base()
Base.prepare(engine, reflect=True)
weather_table = Base.classes.weatherSeries
weather_container = session.query(weather_table).order_by(desc(weather_table.date)).limit(5).all()
weather_facts5 = []
weather_facts5_done = []
def spellDate(datestring):
date_time_obj = datetime.datetime.strptime(datestring, '%Y-%m-%d')
month_name = date_time_obj.strftime("%B")
day = date_time_obj.strftime("%d")
year = date_time_obj.strftime("%Y")
month_day = month_name + " " + day
month_day_year = month_name + " " + day + ", " + year
date = {
"month_day": month_day,
"month_day_year": month_day_year,
}
return date
for data in weather_container:
spell_dates = spellDate( data.date)
container = {
"date": spell_dates,
"country": data.country,
"region": data.region,
"magnitude": data.magnitude,
"maxtemp": data.maxtemp,
"mintemp": data.mintemp,
"avgtemp": data.avgtemp,
}
weather_facts5.append(container)
return weather_facts5
weather_facts5 = latestQuakes()
# Return results
return weather_facts5
#################################################################
## Analysis Chart
##################################################################
def analysisChartCall():
# Sets an object to utilize the default declarative base in SQL Alchemy
Base = declarative_base()
## Class base template to upload to sqlite
class WeatherSeries(Base):
__tablename__ = 'weatherSeries'
id = Column(Integer, primary_key=True)
city = Column(String(50))
country = Column(String(200))
region = Column(String(80))
avgtemp = Column(Float)
date = Column(String(12))
date_epoch = Column(Float)
maxtemp = Column(Float)
mintemp = Column(Float)
sunhour = Column(Float)
totalsnow = Column(Float)
uv_index = Column(Float)
magnitude = Column(Float)
place = Column(String(80))
lat = Column(String(12))
long = Column(String(12))
# Create Database Connection
# ----------------------------------
# Creates a connection to our DB
# Engine opens the door. Conn is the walk through sign
engine = create_engine("sqlite:///earthquake_weather.sqlite")
conn = engine.connect()
# Create a "Metadata" Layer That Abstracts our SQL Database
# ----------------------------------
# Create (if not already in existence) the tables associated with our classes.
Base.metadata.create_all(engine)
# Create a Session Object to Connect to DB
# ----------------------------------
session = Session(bind=engine)
def analysisChart():
Base = automap_base()
Base.prepare(engine, reflect=True)
weather_table = Base.classes.weatherSeries
analysis_container = session.query(weather_table).order_by(desc(weather_table.date)).all()
analysis_list_temp = []
x=1
for data in analysis_container:
# get specific data from db
container = {
"date": data.date,
"magnitude": data.magnitude,
"maxtemp": data.maxtemp,
"mintemp": data.mintemp,
# "avgtemp": data.avgtemp,
"lat": data.lat,
}
analysis_list_temp.append(container)
# Create df for parsing
temp_df = pd.DataFrame(analysis_list_temp)
# Sort by lat and date, reset index
temp_df = temp_df.sort_values(by=['lat', 'date'], ascending=False).reset_index(drop=True)
# Make copy of df, remove 2nd and 3rd log keeping 1st and 4th log of one eq entry.
run_df = temp_df.copy()
while x < len(temp_df.index):
run_df=run_df.drop(x)
x+=1
run_df=run_df.drop(x)
x+=3
# Reset index
run_df = run_df.reset_index(drop=True)
# get difference of weather change from day of eq and few days before
i = 0
new_col = []
# Icon list will tell style which icon to display
icon_list = []
while i < len(run_df.index):
# for data in run_df.index:
first = run_df.iloc[i,2]
second = run_df.iloc[i+1, 2]
difference = first - second
new_col.append(difference)
new_col.append(difference)
i+=2
# Add new list to df as a new column
run_df['difference'] = new_col
# Remove duplicates
run_df2 = run_df.copy()
v = 1
while v < len(run_df.index):
run_df2=run_df2.drop(v)
v+=2
# Count up, nochange, down
up_count = 0
nochange_count = 0
down_count = 0
for x in run_df2['difference']:
if x > 0:
icon = "up"
up_count+=1
icon_list.append(icon)
elif x == 0:
icon = "nochange"
nochange_count+=1
icon_list.append(icon)
else:
icon = "down"
down_count+=1
icon_list.append(icon)
# Add new list to df as a new column
run_df2['icon'] = icon_list
# select only the columns we need
run_df2 = run_df2[['date','magnitude','lat','difference','icon']]
# # Turn df into list of tuples
records = run_df2.to_records(index=False)
analysis_chart = list(records)
# Create list of tuple
analysis_list = []
for data in analysis_chart:
container2 = {
"date": data.date,
"magnitude": data.magnitude,
"lat": data.lat,
"difference": data.difference,
"icon": data.icon,
}
analysis_list.append(container2)
diff_count = len(run_df2['difference'])
above_percentage = "{:.0%}".format(up_count / diff_count)
atzero_percentage = "{:.0%}".format(nochange_count / diff_count)
belowzero_percentage = "{:.0%}".format(down_count / diff_count)
container3 = {
"abovezero": up_count,
"abovezeropercent": above_percentage,
"atzero": nochange_count,
"atzeropercent": atzero_percentage,
"belowzero": down_count,
"belowzeropercent": belowzero_percentage,
}
analysis_list.append(container3)
return analysis_list
analysis_list = analysisChart()
return analysis_list
| 2.96875
| 3
|
CDSB_series/cumul/main.py
|
WFDetector/WFDetection
| 0
|
12776821
|
import numpy as np
import sys
#for calculate the loss
from sklearn.metrics import log_loss
from sklearn.metrics import make_scorer
#import three machine learning models
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedShuffleSplit
#for standardizing the data
from sklearn import preprocessing
from sklearn.model_selection import GridSearchCV
import os
from os import mkdir, listdir
from os.path import join, isdir, dirname
from time import strftime
import constants as ct
import configparser
import argparse
import logging
import random
import pandas
import pickle
import joblib
logger = logging.getLogger('cumul')
random.seed(1123)
np.random.seed(1123)
'''params'''
r = 10
def score_func(ground_truths, predictions):
global MON_SITE_NUM, tps, wps, fps, ps, ns, flag
tp, wp, fp, p, n = 0, 0, 0, 0 ,0
for truth,prediction in zip(ground_truths, predictions):
if truth != MON_SITE_NUM:
p += 1
else:
n += 1
if prediction != MON_SITE_NUM:
if truth == prediction:
tp += 1
else:
if truth != MON_SITE_NUM:
wp += 1
# logger.info('Wrong positive:%d %d'%(truth, prediction))
else:
fp += 1
# logger.info('False positive:%d %d'%(truth, prediction))
# logger.info('%4d %4d %4d %4d %4d'%(tp, wp, fp, p, n))
if flag:
tps += tp
wps += wp
fps += fp
ps += p
ns += n
try:
r_precision = tp*n / (tp*n+wp*n+r*p*fp)
except:
r_precision = 0.0
# logger.info('r-precision:%.4f',r_precision)
# return r_precision
return tp/p
def read_conf(file):
cf = configparser.ConfigParser()
cf.read(file)
return dict(cf['default'])
def parse_arguments():
parser = argparse.ArgumentParser(description='It simulates adaptive padding on a set of web traffic traces.')
parser.add_argument('fp',
metavar='<feature path>',
help='Path to the directory of the extracted features')
parser.add_argument('type',
metavar='<model type>',
help='train a clean or dirty model',
default="None")
parser.add_argument('--log',
type=str,
dest="log",
metavar='<log path>',
default='stdout',
help='path to the log file. It will print to stdout by default.')
# Parse arguments
args = parser.parse_args()
config_logger(args)
return args
def config_logger(args):
# Set file
log_file = sys.stdout
if args.log != 'stdout':
log_file = open(args.log, 'w')
ch = logging.StreamHandler(log_file)
# Set logging format
ch.setFormatter(logging.Formatter(ct.LOG_FORMAT))
logger.addHandler(ch)
# Set level format
logger.setLevel(logging.INFO)
#SVM with RBF kernel for open world!!
def GridSearch(train_X,train_Y):
global OPEN_WORLD
#find the optimal gamma
param_grid = [
{
'C': [2**11,2**13,2**15,2**17],
'gamma' : [2**-3,2**-1,2**1,2**3]
}
]
if OPEN_WORLD:
my_scorer = make_scorer(score_func, greater_is_better=True)
else:
my_scorer = "accuracy"
# clf = GridSearchCV(estimator = SVC(kernel = 'rbf'), param_grid = param_grid, \
# scoring = 'accuracy', cv = 10, verbose = 2, n_jobs = -1)
clf = GridSearchCV(estimator = SVC(kernel = 'rbf'), param_grid = param_grid, \
scoring = my_scorer, cv = 5, verbose = 0, n_jobs = -1)
clf.fit(train_X, train_Y)
# logger.info('Best estimator:%s'%clf.best_estimator_)
# logger.info('Best_score_:%s'%clf.best_score_)
return clf
if __name__ == '__main__':
global MON_SITE_NUM, tps, wps, fps, ps, ns, flag, OPEN_WORLD
tps, wps, fps, ps, ns = 0,0,0,0,0
flag = 0
args = parse_arguments()
# logger.info("Arguments: %s" % (args))
cf = read_conf(ct.confdir)
MON_SITE_NUM = int(cf['monitored_site_num'])
if cf['open_world'] == '1':
UNMON_SITE_NUM = int(cf['unmonitored_site_num'])
OPEN_WORLD = 1
else:
OPEN_WORLD = 0
# logger.info('loading data...')
dic = np.load(args.fp,allow_pickle=True).item()
X = np.array(dic['feature'])
y = np.array(dic['label'])
if not OPEN_WORLD:
X = X[y<MON_SITE_NUM]
y = y[y<MON_SITE_NUM]
# print(X.shape, y.shape)
#normalize the data
scaler = preprocessing.MinMaxScaler((-1,1))
X = scaler.fit_transform(X)
# logger.info('data are transformed into [-1,1]')
# find the optimal params
# logger.info('GridSearchCV...')
clf = GridSearch(X,y)
C = clf.best_params_['C']
gamma = clf.best_params_['gamma']
#C, gamma = 131072, 8.000000
# C, gamma = 8192, 8.00
# logger.info('Best params are: %d %f'%(C,gamma))
# sss = StratifiedShuffleSplit(n_splits=10, test_size=0.1, random_state=0)
# folder_num = 0
# flag = 1
# for train_index, test_index in sss.split(X,y):
# # logger.info('Testing fold %d'%folder_num)
# folder_num += 1
# # print("TRAIN:", train_index, "TEST:", test_index)
# X_train, X_test = X[train_index], X[test_index]
# y_train, y_test = y[train_index], y[test_index]
# model = SVC(C = C, gamma = gamma, kernel = 'rbf')
# model.fit(X_train, y_train)
# y_pred = model.predict(X_test)
# r_precision = score_func(y_test, y_pred)
# # logger.info('%d-presicion is %.4f'%(r, r_precision))
# print("%d %d %d %d %d"%(tps,wps,fps,ps,ns))
model = SVC(C = C, gamma = gamma, kernel = 'rbf')
model.fit(X, y)
joblib.dump(model, join(ct.modeldir,args.fp.split("/")[-1][:-4]+'.pkl'))
print('model have been saved')
| 2.09375
| 2
|
text_analysis_tools/api/sentiment/sentiment.py
|
yu3peng/text_analysis_tools
| 149
|
12776822
|
# -*- coding: utf-8 -*-
import os
import json
import jieba.analyse
import jieba
CURRENT_PATH = os.path.dirname(os.path.abspath(__file__))
sentiment_path = os.path.join(CURRENT_PATH, 'data', 'sentimentDict.json')
stopwords_path = os.path.join(CURRENT_PATH, 'data', 'stopwords.txt.json')
degree_path = os.path.join(CURRENT_PATH, 'data', 'degreeDict.json')
not_path = os.path.join(CURRENT_PATH, 'data', 'notDict.json')
jieba_dic_path = os.path.join(CURRENT_PATH, 'data', 'jieba.dic')
# 加载情感词典
jieba.load_userdict(jieba_dic_path)
class SentimentAnalysis():
def __init__(self):
self.sentiment_score_dic = self.load_json(sentiment_path)
self.degree_score = self.load_json(degree_path)
self.notwords = self.load_json(not_path)
def load_json(self, json_file_path):
with open(json_file_path, 'r', encoding='utf-8') as f:
return json.loads(f.read(), encoding='utf-8')
def analysis(self, sentence):
words = jieba.lcut(sentence)
score = self.sentiment_score_dic.get(words[0], 0)
if len(words) > 1:
score += self.sentiment_score_dic.get(words[1], 0) * self.notwords.get(words[0], 1) * self.degree_score.get(words[0], 1)
if len(words) > 2:
for i in range(2, len(words)):
score += self.sentiment_score_dic.get(words[i], 0) * self.notwords.get(words[i-1], 1) * \
self.degree_score.get(words[i-1], 1) * self.degree_score.get(words[i-2], 1) * \
self.notwords.get(words[i-2], 1)
if score < 0:
return {'negative': score}
if score > 0:
return {'positive': score}
return {'middle': score}
| 2.921875
| 3
|
infra_macros/macro_lib/convert/sphinx.py
|
martarozek/buckit
| 0
|
12776823
|
#!/usr/bin/env python2
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""
Rules for building documentation with Sphinx (sphinx-doc.org)
This provides two new targets:
* sphinx_wiki
* sphinx_manpage
Common Attributes:
name: str
Name of the buck target
python_binary_deps: List[target]
python_library_deps: List[target]
list of python_binary dependencies to include in the link-tree
Sphinx ``autodoc`` allows documents to reference doc-blocks from
modules, classes, etc. For python it does this by importing the
modules. This means the dependencies need to be assembled in the
same PYTHONPATH, with all native library dependencies built, etc.
It is important to differentiate between python_binary_deps and
python_library_deps because we cannot do introspection on the targets
themselves. For ``python_binary`` we actually depend on the
"{name}-library" target rather than the binary itself.
apidoc_modules: Dict[module_path, destination_dir]
``sphinx-apidoc`` is a command many run to auto-generate ".rst" files
for a Python package. ``sphinx-apidoc`` runs and outputs a document
tree, with ``.. automodule::`` and ``.. autoclass::`` references, which
is used by the subsequent Sphinx run to build out docs for those
modules, classes, functions, etc.
The output if ``sphinx-apidoc`` is a directory tree of its own, which
will merged in with the directory tree in ``srcs`` using ``rsync``.
The destination directory will be the name of ``destination_dir``
provided.
Keep in mind ``sphinx-apidoc`` runs at the root of ``PYTHONPATH``.
A rule like::
apidoc_modules = {
"mypackage.mymodule": "mymodule",
}
Will run ``sphinx-apidoc`` with the argument mypackage/mymodule,
and merge the output into the "mymodule" subdirectory with the
rest of ``srcs``.
genrule_srcs: Dict[binary_target, destination_dir]
Similar to ``apidoc_modules``, ``genrule_srcs`` provides a way to
generate source files during the build. The target needs to be a
binary target (runnable with "$(exe {target}) $OUT"), and needs to
accept a single argument "$OUT": the directory to write files to.
The ``destination_dir`` is the sub-directory to merge the files
into, alongside the declared ``srcs``.
config: Dict[str, Dict[str, Union[bool, int, str, List, Dict]]
This provides a way to override or add settings to conf.py,
sphinx-build and others
Section headers:
conf.py
sphinx-build
sphinx-apidoc
These need to serialize to JSON
label: List[str]
This provides a way to add one or more labels to the target, similar
to ``label`` for ``genrule``
sphinx_wiki
----------
This utilizes the Sphinx "xml" builder to generate a document
compliant with the Docutils DTD
Attributes:
srcs: List[Path]
list of document source files (usually .rst or .md)
wiki_root_path
Base URI location for documents to reside
This gets added to the conf.py, but typically is not used by Sphinx
in the build process. It is included here as metadata which can
be used by other tools via ``buck query``.
sphinx_manpage
--------------
This utilizes the Sphinx "man" builder to generate a Unix `Manual Page`
Attributes:
src: Path
The path to the source file (usually .rst or .md)
description: str
A one-line description of the program suitable for the NAME section
author: str
The program author
section: int
The manpage ``section``, defaults to ``1`` which is reserved for
programs
manpage_name: str [Optional]
The name of the manpage to use. The default is to use the target name
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
with allow_unsafe_import(): # noqa: magic
import collections
import json
import os
FBSPHINX_WRAPPER = '//fbsphinx:bin'
SPHINX_WRAPPER = '//fbsphinx:sphinx'
SPHINXCONFIG_TGT = '//:.sphinxconfig'
if False:
# avoid flake8 warnings for some things
from . import (
load,
read_config,
include_defs,
)
def import_macro_lib(path):
global _import_macro_lib__imported
include_defs('{}/{}.py'.format(
read_config('fbcode', 'macro_lib', '//macro_lib'), path
), '_import_macro_lib__imported')
ret = _import_macro_lib__imported
del _import_macro_lib__imported # Keep the global namespace clean
return ret
base = import_macro_lib('convert/base')
Rule = import_macro_lib('rule').Rule
python = import_macro_lib('convert/python')
fbcode_target = import_macro_lib('fbcode_target')
load("@fbcode_macros//build_defs:python_typing.bzl",
"get_typing_config_target")
SPHINX_SECTION = 'sphinx'
class _SphinxConverter(base.Converter):
"""
Produces a RuleTarget named after the base_path that points to the
correct platform default as defined in data
"""
def __init__(self, context):
super(_SphinxConverter, self).__init__(context)
self._converters = {
'python_binary': python.PythonConverter(context, 'python_binary'),
}
def get_allowed_args(self):
return {
'name',
'python_binary_deps',
'python_library_deps',
'apidoc_modules',
'genrule_srcs',
'config',
}
def get_buck_rule_type(self):
return 'genrule'
def _gen_genrule_srcs_rules(
self,
base_path,
name,
genrule_srcs,
):
"""
A simple genrule wrapper for running some target which generates rst
"""
if not genrule_srcs:
return
for target, outdir in genrule_srcs.items():
rule = fbcode_target.parse_target(target, base_path)
if '/' in outdir:
root, rest = outdir.split('/', 1)
else:
root = outdir
rest = '.'
yield Rule('genrule', collections.OrderedDict((
('name', name + '-genrule_srcs-' + rule.name),
('out', root),
('bash', 'mkdir -p $OUT/{rest} && $(exe {target}) $OUT/{rest}'.format(
target=target,
rest=rest,
)),
)))
def _gen_apidoc_rules(
self,
base_path,
name,
fbsphinx_wrapper_target,
apidoc_modules,
):
"""
A simple genrule wrapper for running sphinx-apidoc
"""
if not apidoc_modules:
return
for module, outdir in apidoc_modules.items():
command = ' '.join((
'mkdir -p $OUT && $(exe :{fbsphinx_wrapper_target})',
'buck apidoc',
module,
'$OUT',
)).format(
fbsphinx_wrapper_target=fbsphinx_wrapper_target,
)
yield Rule('genrule', collections.OrderedDict((
('name', name + '-apidoc-' + module),
('out', outdir),
('bash', command),
)))
def convert(
self,
base_path,
name,
apidoc_modules=None,
config=None,
genrule_srcs=None,
python_binary_deps=(),
python_library_deps=(),
src=None,
srcs=None,
visibility=None,
**kwargs
):
"""
Entry point for converting sphinx rules
"""
if srcs is None:
srcs = [src]
python_deps = tuple(python_library_deps) + tuple((
_dep + '-library'
for _dep
in tuple(python_binary_deps)
)) + (FBSPHINX_WRAPPER,)
fbsphinx_wrapper_target = '%s-fbsphinx-wrapper' % name
for rule in self._converters['python_binary'].convert(
base_path,
name=fbsphinx_wrapper_target,
par_style='xar',
py_version='>=3.6',
main_module='fbsphinx.bin.fbsphinx_wrapper',
deps=python_deps,
):
yield rule
additional_doc_rules = []
for rule in self._gen_apidoc_rules(
base_path,
name,
fbsphinx_wrapper_target,
apidoc_modules,
):
additional_doc_rules.append(rule)
yield rule
for rule in self._gen_genrule_srcs_rules(
base_path,
name,
genrule_srcs,
):
additional_doc_rules.append(rule)
yield rule
command = ' '.join((
'echo {BUCK_NONCE} >/dev/null &&',
'$(exe :{fbsphinx_wrapper_target})',
'buck run',
'--target {target}',
'--builder {builder}',
'--sphinxconfig $(location {SPHINXCONFIG_TGT})',
"--config '{config}'",
"--generated-sources '{generated_sources}'",
'.', # source dir
'$OUT',
)).format(
BUCK_NONCE=os.environ.get('BUCK_NONCE', ''),
fbsphinx_wrapper_target=fbsphinx_wrapper_target,
target='//{}:{}'.format(base_path, name),
builder=self.get_builder(),
SPHINXCONFIG_TGT=SPHINXCONFIG_TGT,
config=json.dumps(config or {}),
generated_sources=json.dumps([
'$(location {})'.format(rule.target_name)
for rule
in additional_doc_rules
]),
)
yield Rule('genrule', collections.OrderedDict((
('name', name),
('type', self.get_fbconfig_rule_type()),
('out', 'builder=%s' % self.get_builder()),
('bash', command),
('srcs', srcs),
('labels', self.get_labels(name, **kwargs)),
)))
def get_labels(self, name, **kwargs):
return ()
def get_extra_confpy_assignments(self, name, **kwargs):
return collections.OrderedDict()
class SphinxWikiConverter(_SphinxConverter):
"""
Concrete class for converting sphinx_wiki rules
"""
def get_allowed_args(self):
allowed_args = super(SphinxWikiConverter, self).get_allowed_args()
allowed_args.update({
'srcs',
'wiki_root_path',
})
return allowed_args
def get_fbconfig_rule_type(self):
return 'sphinx_wiki'
def get_builder(self):
return 'wiki'
def get_labels(self, name, **kwargs):
return (
'wiki_root_path:%s' % kwargs.get('wiki_root_path'),
)
class SphinxManpageConverter(_SphinxConverter):
"""
Concrete class for converting sphinx_manpage rules
"""
def get_allowed_args(self):
allowed_args = super(SphinxManpageConverter, self).get_allowed_args()
allowed_args.update({
'src',
'author',
'description',
'section',
'manpage_name',
})
return allowed_args
def get_fbconfig_rule_type(self):
return 'sphinx_manpage'
def get_builder(self):
return 'manpage'
def get_labels(self, name, **kwargs):
return (
'description:%s' % kwargs.get('description'),
'author:%s' % kwargs.get('author'),
'section:%d' % kwargs.get('section', 1),
'manpage_name:%s' % kwargs.get('manpage_name', name),
)
def get_extra_confpy_assignments(self, name, **kwargs):
return {
'man_pages': [{
'doc': 'master_doc',
'name': kwargs.get('manpage_name', name),
'description': kwargs.get('description'),
'author': kwargs.get('author'),
'section': kwargs.get('section', 1),
}],
}
| 1.804688
| 2
|
Factorial.py
|
holtjma/pyku
| 0
|
12776824
|
'''
Factorial int
Recurse Implementation
Auth.: holtjma
'''
def dec(Input):
'''
Pyku for decrement:
y equals input
y equals y minus two
return y plus one
'''
y = Input
y = y-2
return y+1
def fac(x):
'''
Pyku for factorial:
if x and not false
return x times fac dec x
return 1 and 1
'''
if x and not False:
return x*fac(dec(x))
return 1 & 1
if __name__ == '__main__':
print 'x\tfac(x)'
for x in xrange(1, 10):
print str(x)+'\t'+str(fac(x))
| 3.6875
| 4
|
tintest.py
|
SaxonWang99/CMPE273-WeWin
| 0
|
12776825
|
import requests
import json
print("register node 5000, 5001, 5002, 5003, 5004, 5005")
m_node = {
"nodes" : ["http://127.0.0.1:5000","http://127.0.0.1:5001",
"http://127.0.0.1:5002","http://127.0.0.1:5003","http://127.0.0.1:5004","http://127.0.0.1:5005"]
}
r = requests.post('http://127.0.0.1:5000/nodes/register',json = m_node)
print(r.text)
print("---------------------------------------------------------------")
print("get all info from chain")
r = requests.get('http://127.0.0.1:5000/chain')
print(r.text)
print("---------------------------------------------------------------")
block = {
"upc" : "300871365612",
"item_no":1,
"owner": "TinVu",
}
print("mine 1000 products to blockchain")
for a in range(1, 5):
block['item_no'] = a
#print(block)
r = requests.post('http://127.0.0.1:5000/register',json = block)
r = requests.get('http://127.0.0.1:5000/chain')
print(r.text)
print("---------------------------------------------------------------")
trans_block = {
"upc" : "300871365612",
"item_no":1,
"current_owner": "TinVu",
"new_owner": "A",
}
#r = requests.post('http://127.0.0.1:5000/transaction',json = trans_block)
print("do transaction 10 products to blockchain")
for a in range(1, 5):
trans_block['new_owner'] = trans_block['new_owner']+str(a)
trans_block['item_no'] = a
#print(trans_block)
r = requests.post('http://127.0.0.1:5000/transaction',json = trans_block)
r = requests.get('http://127.0.0.1:5000/chain')
print(r.text)
print("---------------------------------------------------------------")
print("validate 300871365612,1,TinVu ")
murl ='http://127.0.0.1:5000/validate/300871365612,1,TinVu'
r = requests.get(murl)
print(r.text)
print("---------------------------------------------------------------")
print("validate 300871365612,1,A1")
murl ='http://127.0.0.1:5000/validate/300871365612,1,A1'
r = requests.get(murl)
print(r.text)
print("---------------------------------------------------------------")
| 2.78125
| 3
|
scripts/NoIp/noIp.py
|
tatan8425/hal-9000-webserver
| 0
|
12776826
|
<gh_stars>0
import requests, socket
def get_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except Exception:
IP = '127.0.0.1'
finally:
s.close()
return IP
username = "USERNAME"
password = "PASSWORD"
hostname = "HOSTNAME" # your domain name hosted in no-ip.com
# Gets the current public IP of the host machine.
myip = get_ip()
# Gets the existing dns ip pointing to the hostname.
old_ip = socket.gethostbyname(hostname)
# Noip API - dynamic DNS update.
# https://www.noip.com/integrate/request.
def update_dns(config):
r = requests.get("http://{}:{}@dynupdate.no-ip.com/nic/update?hostname={}&myip={}".format(*config))
if r.status_code != requests.codes.ok:
print(r.content)
pass
# Update only when ip is different.
if myip != old_ip:
update_dns( (username, password, hostname, myip) )
pass
| 2.96875
| 3
|
python/hijack/sacrifice.py
|
mum-chen/funny
| 1
|
12776827
|
<gh_stars>1-10
from kidnapper import Kidnapper, Human
@Kidnapper.ransom
class Sacrifice(Human):
def say_name(self):
print("I'm Sacrifice")
| 2.640625
| 3
|
src/keras_exp/distrib/cluster_parsers/slurm.py
|
avolkov1/keras_experiments
| 92
|
12776828
|
# Taken from: https://github.com/jhollowayj/tensorflow_slurm_manager
# ref:
# https://github.com/jhollowayj/tensorflow_slurm_manager/blob/master/slurm_manager.py
# @IgnorePep8
'''
'''
from __future__ import print_function
import os
import re
# import socket
# depends on hostlist: pip install python-hostlist
import hostlist
from .base import ClusterParser
__all__ = ('SlurmClusterParser',)
# It may be useful to know that slurm_nodeid tells you which node you are one
# (in case there is more than one task on any given node...)
# Perhaps you could better assign parameter servers be distributed across all
# nodes before doubleing up on one.
class SlurmClusterParser(ClusterParser):
'''
:param num_param_servers: Default -1 meaning one parameter server per
node. The remaining processes on the node are workers. The
num_parameter_servers be less than or equal to the number of
individual physical nodes
:param starting_port: Starting port for setting up jobs. Default: 2300
TODO: Maybe use SLURM_STEP_RESV_PORTS environment if available.
https://stackoverflow.com/a/36803148/3457624
:param str network: Use a specific network cluster.
Ex. network='ib.cluster' The hosts are then specified as:
'{}.{}'.format(hostname, network)
'''
def __init__(self, num_param_servers=-1, starting_port=2300,
network=None):
num_workers = None
# Check Environment for all needed SLURM variables
# SLURM_NODELIST for backwards compatability if needed.
assert 'SLURM_JOB_NODELIST' in os.environ
assert 'SLURM_TASKS_PER_NODE' in os.environ
assert 'SLURM_PROCID' in os.environ
assert 'SLURM_NPROCS' in os.environ
assert 'SLURM_NNODES' in os.environ
# Grab SLURM variables
# expands 'NAME1(x2),NAME2' -> 'NAME1,NAME1,NAME2'
self._hostnames = hostlist.expand_hostlist(
os.environ['SLURM_JOB_NODELIST'])
if network is not None:
self._hostnames = [
# socket.gethostbyname('{}.{}'.format(hname, network))
'{}.{}'.format(hname, network)
for hname in self._hostnames]
# expands '1,2(x2)' -> '1,2,2'
self._num_tasks_per_host = self._parse_slurm_tasks_per_node(
os.environ['SLURM_TASKS_PER_NODE'])
# index into hostnames/num_tasks_per_host lists
self._my_proc_id = int(os.environ['SLURM_PROCID'])
self.num_processes = int(os.environ['SLURM_NPROCS'])
self.nnodes = int(os.environ['SLURM_NNODES'])
# Sanity check that everything has been parsed correctly
nhosts = len(self.hostnames)
assert nhosts == len(self.num_tasks_per_host)
assert nhosts == self.nnodes
assert self.num_processes == sum(self.num_tasks_per_host)
# Numbber of PS/Workers
# Note: I'm making the assumption that having more than one PS/node
# doesn't add any benefit. It makes code simpler in
# self.build_cluster_spec()
self._num_parameter_servers = min(num_param_servers, nhosts) \
if num_param_servers > 0 else nhosts
if num_workers is None:
# Currently I'm not using num_workers'
# TODO: What happens to num_workers once I allocate less PS than
# they requested?
# default to all other nodes doing something
self.num_workers = self.num_processes - self.num_parameter_servers
# Default port to use
self._starting_port = starting_port # use user specified port
def _parse_slurm_tasks_per_node(self, num_tasks_per_nodes):
'''
SLURM_TASKS_PER_NODE Comes in compressed, so we need to uncompress it:
e.g: if slurm gave us the following setup:
Host 1: 1 process
Host 2: 3 processes
Host 3: 3 processes
Host 4: 4 processes
Then the environment variable SLURM_TASKS_PER_NODE = '1,3(x2),4'
But we need it to become this => [1, 3, 3, 4]
'''
final_list = []
num_tasks_per_nodes = num_tasks_per_nodes.split(',')
for node in num_tasks_per_nodes:
if 'x' in node: # "n(xN)"; n=tasks, N=repeats
n_tasks, n_nodes = [int(n) for n in re.findall('\d+', node)]
final_list += [n_tasks] * n_nodes
else:
final_list.append(int(node))
return final_list
@property
def num_tasks_per_host(self):
'''List of integers with each element specifying number of tasks on a
host. This list and hostnames list must be in the same order.'''
return self._num_tasks_per_host
@property
def hostnames(self):
'''List of hosts with each element specifying the host name.'''
return self._hostnames
@property
def num_parameter_servers(self):
'''Number of parameter servers to create/use in the cluster.'''
return self._num_parameter_servers
@property
def my_proc_id(self):
'''Current process's id or rank.'''
return self._my_proc_id
@property
def starting_port(self):
'''Current process's id or rank.'''
return self._starting_port
if __name__ == '__main__':
# run test via: srun -l python -m keras_exp.distrib.cluster_parsers.slurm
from ._test import test
scpar = SlurmClusterParser()
test(scpar)
# sys.exit(0)
| 2.265625
| 2
|
solutions/python3/problem665.py
|
tjyiiuan/LeetCode
| 0
|
12776829
|
# -*- coding: utf-8 -*-
"""
665. Non-decreasing Array
Given an array nums with n integers, your task is to check if it could become non-decreasing
by modifying at most 1 element.
We define an array is non-decreasing if nums[i] <= nums[i + 1] holds for every i (0-based) such that (0 <= i <= n - 2).
Constraints:
1 <= n <= 10 ^ 4
- 10 ^ 5 <= nums[i] <= 10 ^ 5
"""
class Solution:
def checkPossibility(self, nums):
ind_set = set()
for ind in range(1, len(nums)):
if nums[ind] - nums[ind - 1] < 0:
ind_set.add(ind)
if not ind_set:
return True
elif len(ind_set) > 1:
return False
else:
wrong_ind = ind_set.pop()
if wrong_ind == 1 or wrong_ind == len(nums) - 1:
return True
else:
return nums[wrong_ind] >= nums[wrong_ind - 2] or nums[wrong_ind + 1] >= nums[wrong_ind - 1]
| 3.671875
| 4
|
Backend/src/flask_app.py
|
BSAkash/ChatBot
| 0
|
12776830
|
from flask import Flask, render_template, request, jsonify
import conversation
# import traceback
app = Flask(__name__)
app.config["DEBUG"] = True
conversation.initBrain()
@app.route('/')
def index():
return render_template('main_page.html')
@app.route('/api/', methods=["GET","POST"])
def api():
try:
if request.method == "POST":
data = request.get_json()
query = data['query']
reply = conversation.botAnswer(query)
# dict can also be used as param for jsonify
return jsonify(
response=reply,
mode="reply"
)
except Exception as e:
return jsonify(
response="Error: " + str(e) # + '\n>> Traceback <<\n' + str(traceback.print_exc())
)
@app.route('/quote', methods=["GET"])
def quote():
from apis import quotes
try:
return quotes.getQuote()
except Exception as e:
return "Error: " + str(e)
@app.route('/test', methods=["GET"])
def test():
from apis import quotes
try:
return "Test Successful!"
except Exception as e:
return "Error: " + str(e)
if __name__ == "__main__":
app.run()
| 2.515625
| 3
|
tools/w3af/w3af/core/controllers/core_helpers/consumers/tests/test_base_consumer.py
|
sravani-m/Web-Application-Security-Framework
| 3
|
12776831
|
"""
test_base_consumer.py
Copyright 2011 <NAME>
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import unittest
from mock import Mock
from nose.plugins.attrib import attr
from w3af.core.controllers.core_helpers.consumers.base_consumer import BaseConsumer
from w3af.core.controllers.w3afCore import w3afCore
from w3af.core.data.request.fuzzable_request import FuzzableRequest
from w3af.core.data.parsers.doc.url import URL
class TestBaseConsumer(unittest.TestCase):
def setUp(self):
self.bc = BaseConsumer([], w3afCore(), 'TestConsumer')
def test_handle_exception(self):
url = URL('http://moth/')
fr = FuzzableRequest(url)
try:
raise Exception()
except Exception, e:
self.bc.handle_exception('audit', 'sqli', fr, e)
exception_data = self.bc.out_queue.get()
self.assertTrue(exception_data.traceback is not None)
self.assertEqual(exception_data.phase, 'audit')
self.assertEqual(exception_data.plugin, 'sqli')
self.assertEqual(exception_data.exception, e)
def test_terminate(self):
self.bc.start()
self.bc._teardown = Mock()
self.bc.terminate()
self.assertEqual(self.bc._teardown.call_count, 1)
def test_terminate_terminate(self):
self.bc.start()
self.bc._teardown = Mock()
self.bc.terminate()
self.assertEqual(self.bc._teardown.call_count, 1)
| 1.78125
| 2
|
11/11.py
|
pemcconnell/euler
| 0
|
12776832
|
<reponame>pemcconnell/euler<gh_stars>0
import re
SIZE = 4
GREATESTPRODUCT = 0
def find_biggest(cells):
global GREATESTPRODUCT
gp = 0
l = len(cells)
for i,num in enumerate(cells):
if i > (l - SIZE):
break
x = SIZE-1
p = 1
while x >= 0:
p *= int(cells[i+x])
x -= 1
if p > gp:
gp = p
if gp > GREATESTPRODUCT:
GREATESTPRODUCT = gp
data = []
cl = 0
lines = open('11.txt', 'r').read().split('\n')
for line in lines:
cells = line.split(' ')
if cl == 0:
cl = len(cells)
data.append(cells)
# left/right
find_biggest(cells)
diagonals = []
for i,line in enumerate(lines):
x = 0
v = []
dr = []
dl = []
while x < cl:
v.append(data[x][i])
if not dr == False:
try:
dr.append(data[x][i+x])
except IndexError:
if len(dr) < SIZE:
dr = False
if not dl == False:
try:
dl.append(data[x][i-x])
except IndexError:
if len(dl) < SIZE:
dl = False
x += 1
if not dr == False:
diagonals += dr
if not dl == False:
diagonals += dl
#up/down
find_biggest(v)
#diagonals
find_biggest(diagonals)
print GREATESTPRODUCT
| 2.953125
| 3
|
utils.py
|
Air-Fighter/KnowledgeEnhancedTE
| 1
|
12776833
|
<gh_stars>1-10
"""
Utility functions.
"""
import argparse
# list comprehension operator
# COMP = require("pl.comprehension").new()
def printerr(msg):
print '\033[1;31;40m',
print msg,
print '\033[0m'
def get_args():
parser = argparse.ArgumentParser(description='Structured Attention PyTorch Version')
parser.add_argument('-t', '--train-size', type=int, default=0,
help='Number of samples used in training (default: 0)')
parser.add_argument('--dim', type=int, default=150,
help='LSTM memory dimension')
parser.add_argument('-e', '--epoches', type=int, default=30,
help='Number of training epoches')
parser.add_argument('-lr', '--learning_rate', type=float, default=1.0,
help='Learning rate')
parser.add_argument('-b', '--batch_size', type=int, default=32,
help='Batch size')
parser.add_argument('--hidden-dim', type=int, default=200,
help='Number of hidden units')
parser.add_argument('--dataset_prefix', type=str, default='./sampledata/',
help='Prefix of path to dataset')
parser.add_argument('-d', '--drop_out', type=float, default=0.2,
help='Dropout rate')
parser.add_argument('-w', '--word-embedding', type=str, default='./sampledata/wordembedding',
help='Path to word embedding')
parser.add_argument('--gpu-id', type=int, default=-1,
help='The gpu device to use. -1 means use only CPU.')
parser.add_argument('--interactive', type=bool, default=True,
help='Show progress interactively')
parser.add_argument('--dump', default=None, help='Weights dump')
parser.add_argument('--eval', default=None, help='Evaluate weights')
parser.add_argument('--oovonly', type=bool, default=True,
help='Update OOV embeddings only')
parser.add_argument('-vfq', '--valid-freq', type=int, default=5,
help='Frequency of Validating model')
parser.add_argument('--snli-file', default=None, type=str,
help='Path and name of dumped SNLI object')
args = parser.parse_args()
return args
def get_tensor_size(tensor, separator):
sep = separator or " "
ret = []
for i in xrange(0, tensor.dim()):
ret[i] = tensor.size(i)
return sep + str(ret)
# share module parameters
def share_params(cell, src):
for c_param, s_param in zip(cell.parameters(), src.parameters()):
c_param.data.copy_(s_param.data)
"""
def share_params(cell, src)
if torch.type(cell) == 'nn.gModule' then
for i = 1, #cell.forwardnodes do
local node = cell.forwardnodes[i]
if node.data.module then
node.data.module:share(src.forwardnodes[i].data.module,
'weight', 'bias', 'gradWeight', 'gradBias')
end
end
elseif torch.isTypeOf(cell, 'nn.Module') then
cell:share(src, 'weight', 'bias', 'gradWeight', 'gradBias')
else
error('parameters cannot be shared for this input')
end
end
"""
def get_tensor_data_address(x):
pass
"""
function getTensorDataAddress(x)
return string.format("%x+%d", torch.pointer(x:storage():data()), x:storageOffset())
end
"""
def get_tensor_table_norm(t):
pass
"""
function getTensorTableNorm(t)
local ret = 0
for i, v in ipairs(t) do
ret = ret + v:norm()^2
end
return math.sqrt(ret)
end
"""
def inc_counts(counter, key):
if counter[key] is None:
counter[key] = 1
else:
counter[key] += 1
def table_length(tab):
pass
"""
function tableLength(tab)
local count = 0
for _ in pairs(tab) do count = count + 1 end
return count
end
"""
def repeat_tensor_as_table(tensor, count):
pass
"""
function repeatTensorAsTable(tensor, count)
local ret = {}
for i = 1, count do ret[i] = tensor end
return ret
end
"""
def flatten_table(tab):
pass
"""
function flattenTable(tab)
local ret = {}
for _, t in ipairs(tab) do
if torch.type(t) == "table" then
for _, s in ipairs(flattenTable(t)) do
ret[#ret + 1] = s
end
else
ret[#ret + 1] = t
end
end
return ret
end
"""
def get_tensor_table_size(tab, separator):
pass
"""
function getTensorTableSize(tab, separator)
local sep = separator or " "
local ret = {}
for i, t in ipairs(tab) do
ret[i] = getTensorSize(t, "x")
end
return stringx.join(sep, ret)
end
"""
def vector_string_compact(vec, separator):
pass
"""
function vectorStringCompact(vec, separator)
local sep = separator or " "
local ret = {}
for i = 1, vec:size(1) do
ret[i] = string.format("%d:%.4f", i, vec[i])
end
return stringx.join(sep, ret)
end
"""
def tensor_size(tensor):
pass
"""
function tensorSize(tensor)
local size = 1
for i=1, tensor:dim() do size = size * tensor:size(i) end
return size
end
"""
# http://nlp.stanford.edu/IR-book/html/htmledition/dropping-common-terms-stop-words-1.html
StopWords = ["a", "an", "and", "are", "as", "at", "be", "by",
"for", "from", "has", "in", "is", "of", "on", "that",
"the", "to", "was", "were", "will", "with", "."]
def is_stop_word(word):
return word in StopWords
| 2.6875
| 3
|
data/hr/csv2json.py
|
rbt-lang/rbt-proto
| 9
|
12776834
|
import sys
import csv
import json
icsv = csv.DictReader(sys.stdin)
ojson = { "departments": [] }
dept_data = ojson["departments"]
dept_name_idx = {}
for line in icsv:
dept_name = line["dept_name"]
dept = dept_name_idx.get(dept_name)
if dept is None:
dept = dept_name_idx[dept_name] = { "name": dept_name, "employees": [] }
dept_data.append(dept)
dept["employees"].append({
"name": line["empl_name"],
"surname": line["empl_surname"],
"position": line["empl_position"],
"salary": int(line["empl_salary"]) })
json.dump(ojson, sys.stdout, indent=2, sort_keys=True, separators=(',', ': '))
| 3.078125
| 3
|
src/cobald/daemon/runners/base_runner.py
|
thoto/cobald
| 7
|
12776835
|
import logging
import threading
from typing import Any
from cobald.daemon.debug import NameRepr
class BaseRunner(object):
flavour = None # type: Any
def __init__(self):
self._logger = logging.getLogger(
"cobald.runtime.runner.%s" % NameRepr(self.flavour)
)
self._payloads = []
self._lock = threading.Lock()
#: signal that runner should keep in running
self.running = threading.Event()
#: signal that runner has stopped
self._stopped = threading.Event()
self.running.clear()
self._stopped.set()
def __bool__(self):
with self._lock:
return bool(self._payloads) or self.running.is_set()
def register_payload(self, payload):
"""
Register ``payload`` for asynchronous execution
This runs ``payload`` as an orphaned background task as soon as possible.
It is an error for ``payload`` to return or raise anything without handling it.
"""
with self._lock:
self._payloads.append(payload)
def run_payload(self, payload):
"""
Register ``payload`` for synchronous execution
This runs ``payload`` as soon as possible, blocking until completion.
Should ``payload`` return or raise anything, it is propagated to the caller.
"""
raise NotImplementedError
def run(self):
"""
Execute all current and future payloads
Blocks and executes payloads until :py:meth:`stop` is called.
It is an error for any orphaned payload to return or raise.
"""
self._logger.info("runner started: %s", self)
try:
with self._lock:
assert not self.running.is_set() and self._stopped.is_set(), (
"cannot re-run: %s" % self
)
self.running.set()
self._stopped.clear()
self._run()
except Exception:
self._logger.exception("runner aborted: %s", self)
raise
else:
self._logger.info("runner stopped: %s", self)
finally:
with self._lock:
self.running.clear()
self._stopped.set()
def _run(self):
raise NotImplementedError
def stop(self):
"""Stop execution of all current and future payloads"""
if not self.running.wait(0.2):
return
self._logger.debug("runner disabled: %s", self)
with self._lock:
self.running.clear()
self._stopped.wait()
class OrphanedReturn(Exception):
"""A runnable returned a value without anyone to receive it"""
def __init__(self, who, value):
super().__init__("no caller to receive %s from %s" % (value, who))
self.who = who
self.value = value
| 2.171875
| 2
|
discrete_pole.py
|
yashpatel5400/ot_experiments
| 0
|
12776836
|
<filename>discrete_pole.py
import gym
import math
import numpy as np
import sklearn
from sklearn.linear_model import SGDRegressor
from sklearn.pipeline import Pipeline
from sklearn.kernel_approximation import RBFSampler
import matplotlib.pyplot as plt
env = gym.make('CartPole-v0')
env.reset()
episodes = 1000
gamma = 1.0
eps = .9
alpha = 1.0
# replace technically legal [-inf,inf] with reasonable bounds on values
max_state = env.observation_space.high
max_state[1] = 0.5
max_state[-1] = math.radians(50) / 1.
min_state = env.observation_space.low
min_state[1] = -0.5
min_state[-1] = -math.radians(50) / 1.
num_state_buckets = [1,1,6,12]
state_deltas = [(max_state[i] - min_state[i]) / num_state_buckets[i] for i in range(len(max_state))]
def get_discrete_state(state):
return [
max(
min(
int((state[i] - min_state[i]) / state_deltas[i]),
int((max_state[i] - min_state[i]) / state_deltas[i]) - 1
),
0) for i in range(len(state))]
def eps_greedy(epsilon, state, Q):
if np.random.random() < epsilon:
return env.action_space.sample()
s1, s2, s3, s4 = get_discrete_state(state)
return np.argmax(Q[s1, s2, s3, s4, :])
Q = np.zeros((num_state_buckets + [env.action_space.n]))
episode_lens = []
for episode in range(episodes):
if episode > 0 and episode % 200 == 0:
eps /= 2
alpha /= 2
print(f"episode : {episode}")
done = False
state = env.reset()
t = 0
while not done:
action = eps_greedy(eps, state, Q)
new_state, reward, done, _ = env.step(action)
s1, s2, s3, s4 = get_discrete_state(state)
new_s1, new_s2, new_s3, new_s4 = get_discrete_state(new_state)
td_delta = gamma * np.max(Q[new_s1, new_s2, new_s3, new_s4, :]) + reward \
- Q[s1, s2, s3, s4, action]
Q[s1, s2, s3, s4, action] += alpha * td_delta
state = new_state
t += 1
episode_lens.append(t)
# final test
state = env.reset()
done = False
while not done:
env.render()
state, _, done, _ = env.step(eps_greedy(0, state, Q))
env.close()
plt.plot(range(len(episode_lens)), episode_lens)
plt.show()
| 2.40625
| 2
|
app/modules/assets/controllers.py
|
systemaker/Flask-Easy-Template
| 11
|
12776837
|
<reponame>systemaker/Flask-Easy-Template
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ------- IMPORT DEPENDENCIES -------
import datetime
import sendgrid
import os
import json
from werkzeug.utils import secure_filename
from werkzeug.datastructures import CombinedMultiDict
from flask import request, render_template, flash, current_app, redirect, abort, jsonify, url_for
from forms import *
from time import time
from flask_login import login_required, current_user
from PIL import Image
# ------- IMPORT LOCAL DEPENDENCIES -------
from app import app, logger
from . import assets_page
from models import Asset
from app.modules.users.models import User
from app.helpers import *
from app.modules.localization.controllers import get_locale, get_timezone
from app import config_name
from constants import *
from app.modules.items.models import Item, AssetItem
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in app.config['ALLOWED_EXTENSIONS']
def resize_image_to_max(image_path, max_size):
max_size = max_size
image = Image.open(image_path)
original_size = max(image.size[0], image.size[1])
if original_size >= max_size:
# resized_file = open(image_path.split('.')[0] + '_resized.jpg', "w")
if (image.size[0] > image.size[1]):
resized_width = max_size
resized_height = int(round((max_size/float(image.size[0]))*image.size[1]))
else:
resized_height = max_size
resized_width = int(round((max_size/float(image.size[1]))*image.size[0]))
image = image.resize((resized_width, resized_height), Image.ANTIALIAS)
image.save(image_path, 'JPEG')
def get_asset_type(filetype):
asset_type = ''
if 'image' in filetype:
asset_type = 'image'
if 'application' in filetype:
asset_type = 'application'
if 'video' in filetype:
asset_type = 'video'
if 'audio' in filetype:
asset_type = 'audio'
if 'text' in filetype:
asset_type = 'text'
return asset_type
# ------- ROUTINGS AND METHODS -------
# All assets
@assets_page.route('/')
@assets_page.route('/<int:page>')
def index(page=1):
try:
m_assets = Asset()
list_assets = m_assets.all_data(page, app.config['LISTINGS_PER_PAGE'])
# html or Json response
if request_wants_json():
return jsonify([{'id' : d.id, 'title_en_US' : d.title_en_US, 'description_en_US' : d.description_en_US, 'title_fr_FR' : d.title_fr_FR, 'description_fr_FR' : d.description_fr_FR} for d in list_assets.items])
else:
return render_template("assets/index.html", list_assets=list_assets, app = app)
except Exception, ex:
print("------------ ERROR ------------\n" + str(ex.message))
#abort(404)
# Show asset
@assets_page.route('/<int:id>/show')
def show(id=1):
try:
m_assets = Asset()
m_asset = m_assets.read_data(id)
# html or Json response
if request_wants_json():
return jsonify(data = m_asset)
else:
return render_template("assets/show.html", asset=m_asset, app = app)
except Exception, ex:
print("------------ ERROR ------------\n" + str(ex.message))
flash(str(ex.message), category="warning")
abort(404)
# New asset
@assets_page.route('/new', methods=['GET', 'POST'])
@login_required
def new():
try :
# request.form only contains form input data. request.files contains file upload data.
# You need to pass the combination of both to the form.
form = Form_Record_Add(CombinedMultiDict((request.files, request.form)))
users = User.query.filter(User.is_active == True).all()
items = Item.query.filter(Item.is_active == True).all()
if request.method == 'POST':
if form.validate():
# check if the post request has the file part
if 'data_file_name' not in request.files :
# redirect to the form page or Json response
if request_wants_json() :
return jsonify(data = {message : "No file part", form : form}), 422, {'Content-Type': 'application/json'}
else:
flash("No file part", category="danger")
return redirect(request.url)
# file = request.files['data_file_name']
file = form.data_file_name.data
# if user does not select file, browser also submit a empty part without filename
if file.filename == '' :
# redirect to the form page or Json response
if request_wants_json() :
return jsonify(data = {message : "No selected file", form : form}), 422, {'Content-Type': 'application/json'}
else:
flash("No selected file", category="danger")
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
filename = filename.encode('utf-8')
target_dir = os.path.abspath(app.config['UPLOAD_FOLDER'])
target = target_dir + '/' + filename
print("------------ FILE ------------\n" + str(target))
# if target not exist
if not os.path.isdir(target_dir):
os.mkdir(target_dir)
file.save(target)
filetype = file.content_type
# guess asset type
asset_type = get_asset_type(filetype)
if asset_type == "image" :
# resize if too high
resize_image_to_max(target, app.config['MAX_SIZE'])
filesize = os.stat(target).st_size
# image processing thumbnail
infilename, ext = os.path.splitext(target)
filewidth = 0
fileheight = 0
if asset_type == "image" :
im = Image.open(target)
filewidth, fileheight = im.size
im.thumbnail(app.config['THUMBNAIL_SIZE'])
im.save(infilename + ".thumbnail" + ext)
assets = Asset()
sanitize_form = {
'data_file_name': filename,
'data_content_type': filetype,
'data_file_size': filesize,
'asset_type' : asset_type,
'width': filewidth,
'height': fileheight,
'description_en_US' : form.description_en_US.data,
'description_fr_FR' : form.description_fr_FR.data,
'user' : form.user.data,
'items' : form.items.data,
'is_active' : form.is_active.data
}
assets.create_data(sanitize_form)
logger.info("Adding a new record.")
if request_wants_json():
return jsonify(data = { message :"Record added successfully.", form: form }), 200, {'Content-Type': 'application/json'}
else :
flash("Record added successfully.", category="success")
return redirect("/assets")
form.action = url_for('assets_page.new')
# html or Json response
if request_wants_json():
return jsonify(data = form), 200, {'Content-Type': 'application/json'}
else:
return render_template("assets/edit.html", form=form, users = users, items = items, title_en_US='New', app = app)
except Exception, ex:
print("------------ ERROR ------------\n" + str(ex.message))
flash(str(ex.message), category="warning")
abort(404)
# Edit asset
@assets_page.route('/<int:id>/edit', methods=['GET', 'POST'])
@login_required
def edit(id=1):
try :
# check_admin()
assets = Asset()
asset = assets.query.get_or_404(id)
# users = User.query.all()
users = User.query.filter(User.is_active == True).all()
items = Item.query.filter(Item.is_active == True).all()
# request.form only contains form input data. request.files contains file upload data.
# You need to pass the combination of both to the form.
form = Form_Record_Add(CombinedMultiDict((request.files, request.form)))
if request.method == 'POST':
if form.validate():
# file = request.files['data_file_name']
file = form.data_file_name.data
# if user does not select file, browser also submit a empty part without filename
# value not required in edit mode
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
filename = filename.encode('utf-8')
target_dir = os.path.abspath(app.config['UPLOAD_FOLDER'])
target = target_dir + '/' + filename
# Remove previous image
prev_target = target_dir + '/' + asset.data_file_name
print("------------ PREV FILE ------------\n" + str(prev_target))
# remove previous thumbnail first
prev_infilename, prev_ext = os.path.splitext(prev_target)
os.remove(prev_infilename + '.thumbnail' + prev_ext) if os.path.isfile(prev_infilename + '.thumbnail' + prev_ext) else None
# remove previous file last
os.remove(prev_target) if os.path.isfile(prev_target) else None
print("------------ FILE ------------\n" + str(target))
# if target not exist
if not os.path.isdir(target_dir):
os.mkdir(target_dir)
file.save(target)
filetype = file.content_type
# guess asset type
asset_type = get_asset_type(filetype)
if asset_type == "image" :
# resize if too high
resize_image_to_max(target, app.config['MAX_SIZE'])
filesize = os.stat(target).st_size
# image processing thumbnail
infilename, ext = os.path.splitext(target)
filewidth = 0
fileheight = 0
if asset_type == "image" :
im = Image.open(target)
filewidth, fileheight = im.size
im.thumbnail(app.config['THUMBNAIL_SIZE'])
im.save(infilename + ".thumbnail" + ext)
if not file :
filename = asset.data_file_name
filetype = asset.data_content_type
filesize = asset.data_file_size
filewidth = asset.width
fileheight = asset.height
asset_type = asset.asset_type
sanitize_form = {
'data_file_name': filename,
'data_content_type': filetype,
'data_file_size': filesize,
'asset_type' : asset_type,
'width': filewidth,
'height': fileheight,
'description_en_US' : form.description_en_US.data,
'description_fr_FR' : form.description_fr_FR.data,
'user' : form.user.data,
'items' : form.items.data,
'is_active' : form.is_active.data
}
assets.update_data(asset.id, sanitize_form)
logger.info("Editing a new record.")
if request_wants_json():
return jsonify(data = { message :"Record updated successfully.", form: form }), 200, {'Content-Type': 'application/json'}
else :
flash("Record updated successfully.", category="success")
return redirect("/assets")
form.action = url_for('assets_page.edit', id = asset.id)
form.data_file_name.data = asset.data_file_name
form.data_content_type.data = asset.data_content_type
form.data_file_size.data = asset.data_file_size
form.asset_type.data = asset.asset_type
form.width.data = asset.width
form.height.data = asset.height
form.description_en_US.data = asset.description_en_US
form.description_fr_FR.data = asset.description_fr_FR
if asset.user :
form.user.data = asset.user.id
if asset.items :
form.items.data = asset.items
form.is_active.data = asset.is_active
# html or Json response
if request_wants_json():
return jsonify(data = form), 200, {'Content-Type': 'application/json'}
else:
return render_template("assets/edit.html", form=form, users = users, items = items, title_en_US='Edit', app = app)
except Exception, ex:
print("------------ ERROR ------------\n" + str(ex.message))
flash(str(ex.message), category="warning")
abort(404)
# Delete asset
@assets_page.route('/<int:id>/destroy')
@login_required
def destroy(id=1):
try:
assets = Asset()
asset = assets.query.get_or_404(id)
target_dir = os.path.abspath(app.config['UPLOAD_FOLDER'])
target = target_dir + '/' + asset.data_file_name
# remove thumbnail first
infilename, ext = os.path.splitext(target)
os.remove(infilename + '.thumbnail' + ext) if os.path.isfile(infilename + '.thumbnail' + ext) else None
# remove file last
os.remove(target) if os.path.isfile(target) else None
assets.destroy_data(asset.id)
# html or Json response
if request_wants_json():
return jsonify(data = {message:"Record deleted successfully.", asset : m_asset})
else:
flash("Record deleted successfully.", category="success")
return redirect(url_for('assets_page.index'))
except Exception, ex:
print("------------ ERROR ------------\n" + str(ex.message))
flash(str(ex.message), category="warning")
abort(404)
| 2.09375
| 2
|
complexity_normalized.py
|
DReichLab/adna-workflow
| 9
|
12776838
|
import argparse
import subprocess
from pathlib import Path
from multiprocessing import Pool
picard_jar = None
def normalized_unique_reads(number_of_reads, bam):
try:
downsampled_library = downsample(number_of_reads, bam)
unique_bam = remove_duplicates(downsampled_library)
unique_reads = count_bam_reads(unique_bam)
return (bam, number_of_reads, unique_reads)
except:
return (bam, number_of_reads, -1)
def count_bam_reads(filename):
result = subprocess.run(['samtools', 'view', '-c', filename], check=True, stdout=subprocess.PIPE)
count = int(result.stdout.strip().decode('utf-8'))
return count
def downsample(number_of_reads, bam):
# compute the fraction of reads to retain
read_count = count_bam_reads(bam)
fraction = number_of_reads / read_count
if fraction > 1.0:
raise ValueError('Cannot upsample {} from {:d} to {:d}'.format(bam, read_count, number_of_reads))
bam_path = Path(bam)
if bam_path.suffix != '.bam':
raise ValueError('Not a BAM {}'.format(bam))
output = '{}_{:d}.bam'.format(bam_path.stem, number_of_reads)
# run Picard to downsample
subprocess.run(['java', '-Xmx4500m', '-jar', picard_jar, 'DownsampleSam', 'PROBABILITY={:f}'.format(fraction), 'I={}'.format(bam), 'O={}'.format(output)], check=True)
return output
def remove_duplicates(bam):
bam_path = Path(bam)
output = '{}.noduplicates.bam'.format(bam_path.stem)
# run Picard MarkDuplicates to remove duplicates to get unique read count
subprocess.run(['java', '-Xmx4500m', '-jar', picard_jar, 'MarkDuplicates', 'I={}'.format(bam), 'O={}'.format(output), 'M={}.{}'.format(output, 'dedup_stats'), 'REMOVE_DUPLICATES=true', 'BARCODE_TAG=XD', 'ADD_PG_TAG_TO_READS=false', 'MAX_FILE_HANDLES=1000'], check=True)
return output
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Normalize complexity using number of unique reads given a number of reads (hitting targets)", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-n', "--num_threads", help="size of thread pool", type=int, default=1)
parser.add_argument("--picard", help="Broad picard jar", default='/n/groups/reich/matt/pipeline/static/picard-v2.17.10.jar')
parser.add_argument("bams", help="bam files, filtered to 1240k targets and sorted", nargs='+')
args = parser.parse_args()
picard_jar = args.picard
pool = Pool(processes=args.num_threads)
results = []
for number_of_reads in [5e5, 1e6, 2e6, 4e6]:
for library in args.bams:
results.append(pool.apply_async(normalized_unique_reads, args=(int(number_of_reads), library) ) )
pool.close()
pool.join()
for result in results:
values = result.get()
library_path = Path(values[0])
print('{}\t{:d}\t{:d}'.format(library_path.stem, int(values[1]), values[2]))
| 2.578125
| 3
|
src/main.py
|
wene37/WeConnect-SolarManager
| 2
|
12776839
|
#!/usr/bin/python
import logging
import logging.handlers
import configparser
from time import sleep
from SolarManager import SolarManager
def log_setup():
formatter = logging.Formatter("%(asctime)s :: %(name)s :: %(levelname)s :: %(message)s")
logLevel = logging.INFO
log_handler = logging.handlers.TimedRotatingFileHandler("logs/SolarManager.log", when="midnight", interval=1, backupCount=30)
log_handler.setFormatter(formatter)
log_handler.setLevel(logLevel)
logger = logging.getLogger()
logger.addHandler(log_handler)
logger.setLevel(logLevel)
log_setup()
LOG = logging.getLogger("SolarManager.Service")
LOG.info("Starting service.")
try:
configFileName = "config.txt"
configParser = configparser.ConfigParser()
configParser.read(configFileName)
sleepTimeSeconds = configParser.getint("SolarManager", "SolarCheckInterval")
solarManager = SolarManager.SolarManager(configParser.get("WeConnect", "Username"), configParser.get("WeConnect", "Password"), configFileName)
while True:
solarManager.run()
LOG.info(f"Sleeping for {sleepTimeSeconds} seconds")
sleep(sleepTimeSeconds)
except Exception as e:
LOG.error(f"An error occured while running the service: {e}", exc_info=True)
raise e
| 2.5625
| 3
|
mundo_1/desafios/desafio_027.py
|
lvfds/Curso_Python3
| 0
|
12776840
|
<gh_stars>0
"""
Faça um programa que leia o nome completo de uma pessoa, mostrando em seguida o primeiro e o último nome separadamente.
Ex: <NAME>
primeiro = Ana
último = Sousa
"""
valor_digitado = input('Digite seu nome completo: ')
transformar_valor_digitado_em_lista = valor_digitado.split()
primeiro_nome = transformar_valor_digitado_em_lista[0]
ultimo_nome = transformar_valor_digitado_em_lista[-1]
print(f'Seu primeiro nome é: {primeiro_nome}')
print(f'Seu último nome é: {ultimo_nome}')
| 3.640625
| 4
|
persister/observations/management/commands/initialize_data.py
|
City-of-Helsinki/hel-data-pipe
| 1
|
12776841
|
from django.core.management.base import BaseCommand
from observations.models import Datasourcetype
class Command(BaseCommand):
def handle(self, *args, **options):
Datasourcetype.objects.get_or_create(
name="Digital Matter Sensornode LoRaWAN",
defaults={
"description": "Digital Matter Sensornode LoRaWAN",
"parser": "sensornode",
},
)
| 1.90625
| 2
|
ezancestry/process.py
|
arvkevi/ezancestry
| 26
|
12776842
|
<gh_stars>10-100
import warnings
from pathlib import Path
import joblib
import pandas as pd
from cyvcf2 import VCF
from loguru import logger
from sklearn.preprocessing import OneHotEncoder
from snps import SNPs
from ezancestry.config import aisnps_directory as _aisnps_directory
from ezancestry.config import aisnps_set as _aisnps_set
from ezancestry.config import models_directory as _models_directory
from ezancestry.config import samples_directory as _samples_directory
warnings.simplefilter(action="ignore", category=pd.errors.DtypeWarning)
def get_1kg_labels(samples_directory=None):
"""
Get the ancestry labels for the 1000 Genomes Project samples.
:param aisnps_directory: [description]
:type aisnps_directory: [type]
:return: DataFrame of sample-level population information
:rtype: pandas DataFrame
"""
if samples_directory is None:
samples_directory = _samples_directory
dfsamples = pd.read_csv(
Path(samples_directory).joinpath(
"integrated_call_samples_v3.20130502.ALL.panel"
),
sep="\t",
)
dfsamples.set_index("sample", inplace=True)
dfsamples.drop(columns=["Unnamed: 4", "Unnamed: 5"], inplace=True)
dfsamples.columns = ["population", "superpopulation", "gender"]
return dfsamples
def vcf2df(vcf_fname, dfsamples):
"""Convert a vcf file (from the 1kg aisnps) to a pandas DataFrame
:param vcf_fname: path to the vcf file with aisnps for every 1kg sample
:type vcf_fname: str
:param dfsamples: DataFrame with sample-level info on each 1kg sample.
:type dfsamples: pandas DataFrame
:return: DataFrame with genotypes for aisnps as columns and samples as rows.
:rtype: pandas DataFrame
"""
vcf_file = VCF(vcf_fname)
df = pd.DataFrame(index=vcf_file.samples)
for variant in vcf_file():
# TODO: ensure un-phasing variants is the desired behavior
# sorted() normalizes the order of the genotypes
df[variant.ID] = [
"".join(sorted(gt.replace("|", ""))) for gt in variant.gt_bases
]
df = df.join(dfsamples, how="inner")
return df
def encode_genotypes(
df,
aisnps_set="kidd",
overwrite_encoder=False,
models_directory=None,
aisnps_directory=None,
):
"""One-hot encode the genotypes
:param df: A DataFrame of samples with genotypes as columns
:type df: pandas DataFrame
:param aisnps_set: One of either {kidd, seldin}
:type aisnps_set: str
:param overwrite_encoder: Flag whether or not to overwrite the saved encoder for the given aisnps_set. Default: False, will load the saved encoder model.
:type overwrite_encoder: bool
:param models_directory: Path to the directory where the saved encoder model is saved. Default: None, will use the default location.
:type models_directory: str
:param aisnps_directory: Path to the directory where the aisnps are saved. Default: None, will use the default location.
:type aisnps_directory: str
:return: pandas DataFrame of one-hot encoded columns for genotypes and OHE instance
:rtype: pandas DataFrame, OneHotEncoder instance
"""
if models_directory is None:
models_directory = _models_directory
if aisnps_directory is None:
aisnps_directory = _aisnps_directory
models_directory = Path(models_directory)
aisnps_directory = Path(aisnps_directory)
aisnps_set = aisnps_set.lower()
try:
aisnps = pd.read_csv(
aisnps_directory.joinpath(
f"thousand_genomes.{aisnps_set}.dataframe.csv"
),
nrows=0,
index_col=0,
).drop(columns=["population", "superpopulation", "gender"])
except FileNotFoundError:
logger.critical("""aisnps_set must be either "kidd" or "seldin".""")
return
# concact will add snps (columns) to the df that aren't in the user-submitted
# df. Then drop the snps (columns) that are in the user-submitted df, but not
# in the aisnps set.
df = pd.concat([aisnps, df])[aisnps.columns]
# TODO: Impute missing values
# imputer = knnImputer(n_neighbors=9)
# imputed_aisnps = imputer.fit_transform(df)
if overwrite_encoder:
# 1. Use a different encoder technique (OHE works for now)
# 2. Pass a list of valid genotypes (overkill, dimensionality explodes)
ohe = OneHotEncoder(
sparse=False,
handle_unknown="ignore",
)
X = ohe.fit_transform(df.values)
# overwrite the old encoder with a new one
joblib.dump(
ohe, models_directory.joinpath(f"one_hot_encoder.{aisnps_set}.bin")
)
logger.info(
f"Wrote a new encoder to {models_directory}/one_hot_encoder.{aisnps_set}.bin"
)
else:
ohe = joblib.load(
models_directory.joinpath(f"one_hot_encoder.{aisnps_set}.bin")
)
logger.info(
f"Successfully loaded an encoder from {models_directory}/one_hot_encoder.{aisnps_set}.bin"
)
X = ohe.transform(df.values)
return pd.DataFrame(
X, index=df.index, columns=ohe.get_feature_names(df.columns.tolist())
)
def process_user_input(input_data, aisnps_directory=None, aisnps_set=None):
"""Process the user-submitted input data.
:param input_data: [description]
:type input_data: [type]
:param aisnps_directory: [description], defaults to None
:type aisnps_directory: [type], optional
:param aisnps_set: [description], defaults to None
:type aisnps_set: [type], optional
:return: DataFrame where samples are row and genotypes are columns
:rtype: pandas DataFrame
"""
if aisnps_directory is None:
aisnps_directory = _aisnps_directory
if aisnps_set is None:
aisnps_set = _aisnps_set
aisnps_directory = Path(aisnps_directory)
aisnpsdf = pd.read_csv(
aisnps_directory.joinpath(f"{aisnps_set}.aisnp.txt"),
dtype={"rsid": str, "chromosome": str, "position_hg19": int},
sep="\t",
)
try:
input_data_is_pathlike = bool(Path(input_data))
except TypeError:
input_data_is_pathlike = False
# If the user-submitted input ata is a directory, loop over all the files
# to create a DataFrame of all the input data.
if input_data_is_pathlike:
if Path(input_data).is_dir():
snpsdf = pd.DataFrame(
columns=[
col
for col in aisnpsdf.columns
if col not in ["rsid", "chromosome", "position_hg19"]
]
)
for filepath in Path(input_data).iterdir():
try:
snpsdf = pd.concat(
[snpsdf, _input_to_dataframe(filepath, aisnpsdf)]
)
except Exception as e:
logger.debug(e)
logger.warning(
f"Skipping {filepath} because it was not valid"
)
return snpsdf
# The user-submitted input data is a single file.
else:
# _input_to_dataframe needs a Path object
input_data = Path(input_data)
try:
snpsdf = _input_to_dataframe(input_data, aisnpsdf)
# SNPs will try to read the DataFrame file
if snpsdf is not None:
return snpsdf
logger.debug(
"input_data is not a valid SNPs format, that's ok, trying to read as a pre-formatted DataFrame"
)
except Exception as e:
logger.debug(e)
# read the user-submitted preformatted data as a DataFrame
try:
snpsdf = pd.read_csv(
input_data,
index_col=0,
sep=None,
engine="python",
dtype=str,
)
# Need to clean up the dataframe if there is extra stuff in it
# keep the first column, it's the index
cols_to_keep = [snpsdf.columns[0]]
for col in snpsdf.columns[1:]:
if col.startswith("rs"):
cols_to_keep.append(col)
return snpsdf[cols_to_keep]
except:
raise ValueError(
f"{input_data} is not a valid file or directory. Please provide a valid file or directory."
)
else:
snpsdf = _input_to_dataframe(input_data, aisnpsdf)
return snpsdf
def _input_to_dataframe(input_data, aisnpsdf):
"""Reads one file and returns a pandas DataFrame.
:param aisnpsdf: A DataFrame of aisnps
:type aisnpsdf: pandas DataFrame
:param input_data: Path object to the file to be read or a SNPs DataFrame
:type input_data: Path
:return: A DataFrame of one record and many columns for each aisnp.
:rtype: pandas DataFrame
"""
# try to read a single file
try:
is_pathlike = bool(Path(input_data))
except TypeError:
is_pathlike = False
if is_pathlike:
try:
snpsobj = SNPs(str(input_data))
if snpsobj.count == 0:
logger.debug(f"No snps found in the input_data")
return None
snpsdf = snpsobj.snps
except FileNotFoundError:
logger.critical(f"Could not find file {input_data}")
sample_id = Path(input_data).name
else:
snpsdf = input_data
sample_id = "sample"
snpsdf = snpsdf.reset_index()
snpsdf.rename(
columns={"chrom": "chromosome", "pos": "position_hg19"},
inplace=True,
)
# subset to aisnps
snpsdf = aisnpsdf.merge(
snpsdf, on=["rsid", "chromosome", "position_hg19"], how="left"
)
# inform user how many missing snps
n_aisnps = snpsdf["genotype"].notnull().sum()
n_aisnps_total = snpsdf.shape[0]
logger.info(
f"{sample_id} has a valid genotype for {n_aisnps} out of a possible {n_aisnps_total} ({(n_aisnps / n_aisnps_total) * 100}%)"
)
snpsdfT = pd.DataFrame(columns=snpsdf["rsid"].tolist())
snpsdfT.loc[sample_id] = snpsdf["genotype"].tolist()
return snpsdfT
| 2.265625
| 2
|
chapter03/battle_scene.py
|
gothedistance/python-book
| 17
|
12776843
|
<reponame>gothedistance/python-book<filename>chapter03/battle_scene.py
import random
# 自分のヒットポイント
my_hit_point = 15
# スライムのヒットポイント
slime_hit_point = 8
# こうげきの順番
# ここでは自分から攻撃するものとする
index = 0
# どちらかのヒットポイントがあるまで戦う
# ヒットポイントが0以下になると繰り返しが終わる
while slime_hit_point > 0 and my_hit_point > 0:
# ランダムに与えるダメージを決定
attack = random.randint(1, 7)
# 自分とスライムが交互に攻撃をする
if index % 2 == 0:
print('スライムに ' + str(attack) + ' のダメージ')
slime_hit_point -= attack
else:
print('ゆうしゃに ' + str(attack) + ' のダメージ')
my_hit_point -= attack
index += 1
# whileを抜けたらどっちかが死んでる
# 自分のヒットポイントが残ってれば、スライム撃破!
if my_hit_point > 0:
print('スライムをやっつけた')
else:
print('ゆうしゃは死んでしまった')
| 3.265625
| 3
|
cctbx/sgtbx/direct_space_asu/plane_group_reference_table.py
|
dperl-sol/cctbx_project
| 155
|
12776844
|
<filename>cctbx/sgtbx/direct_space_asu/plane_group_reference_table.py
from __future__ import absolute_import, division, print_function
from cctbx.sgtbx.direct_space_asu import direct_space_asu
from cctbx.sgtbx.direct_space_asu.short_cuts import *
from six.moves import range
def asu_01(): # p_1 (s.g. 1)
return (direct_space_asu('P 1')
& x0
& +x1
& y0
& +y1
& z0
& +z1
)
def asu_02(): # p_2 (s.g. 3)
return (direct_space_asu('P 2')
& x0(y2)
& x2(y2)
& y0
& +y1
& z0
& +z1
)
def asu_03(): # p_m (s.g. 6)
return (direct_space_asu('P -2x')
& x0
& x2
& y0
& +y1
& z0
& +z1
)
def asu_04(): # p_g (s.g. 7)
return (direct_space_asu('P -2xb')
& x0(+y2)
& x2(+y2)
& y0
& +y1
& z0
& +z1
)
def asu_05(): # c_m (s.g. 8)
return (direct_space_asu('C -2x')
& x0
& x2
& y0
& +y2
& z0
& +z1
)
def asu_06(): # p_2_m_m (s.g. 25)
return (direct_space_asu('P 2 -2')
& x0
& x2
& y0
& y2
& z0
& +z1
)
def asu_07(): # p_2_m_g (s.g. 28)
return (direct_space_asu('P 2 -2a')
& x0(y2)
& x4
& y0
& +y1
& z0
& +z1
)
def asu_08(): # p_2_g_g (s.g. 32)
return (direct_space_asu('P 2 -2ab')
& x0
& x2(-y0)
& y0
& +y2
& z0
& +z1
)
def asu_09(): # c_2_m_m (s.g. 35)
return (direct_space_asu('C 2 -2')
& x0
& x4(y4)
& y0
& y2
& z0
& +z1
)
def asu_10(): # p_4 (s.g. 75)
return (direct_space_asu('P 4')
& x0(-y0)
& x2
& y0
& y2(-x2)
& z0
& +z1
)
def asu_11(): # p_4_m_m (s.g. 99)
return (direct_space_asu('P 4 -2')
& x0
& y2
& -p0
& z0
& +z1
)
def asu_12(): # p_4_g_m (s.g. 100)
return (direct_space_asu('P 4 -2ab')
& x0(-y0)
& y0
& m2
& z0
& +z1
)
def asu_13(): # p_3 (s.g. 143)
return (direct_space_asu('P 3')
& x0(-y0)
& y0
& k1
& m1(-h1 | -k1)
& h1
& z0
& +z1
)
def asu_14(): # p_3_m_1 (s.g. 156)
return (direct_space_asu('P 3 -2"')
& h0
& m1
& k0
& z0
& +z1
)
def asu_15(): # p_3_1_m (s.g. 157)
return (direct_space_asu('P 3 -2')
& y0
& k1
& m1(y3)
& p0
& z0
& +z1
)
def asu_16(): # p_6 (s.g. 168)
return (direct_space_asu('P 6')
& y0
& k1
& m1(y3)
& p0(-y0)
& z0
& +z1
)
def asu_17(): # p_6_m_m (s.g. 183)
return (direct_space_asu('P 6 -2')
& y0
& k1
& -h0
& z0
& +z1
)
def get_asu(point_group_number):
return eval("asu_%02d" % point_group_number)()
if (__name__ == "__main__"):
for i in range(1,17+1):
get_asu(i).show_summary()
| 1.945313
| 2
|
python/fixrgraph/db/scripts/querydb.py
|
LesleyLai/biggroum
| 7
|
12776845
|
<reponame>LesleyLai/biggroum
""" Test script used to query the db programmatically
"""
import sys
import os
import optparse
import logging
import string
import collections
from fixrgraph.db.isodb import IsoDb
import sqlalchemy
if (len(sys.argv) != 2):
print "Not enough param"
db_path=sys.argv[1]
if (not os.path.isfile(db_path)):
print "db does not exist"
sys.exit(0)
db = IsoDb(db_path)
def get_query_res(db, query):
res = db.engine.execute(query)
return res.fetchone()
print os.path.basename(db_path)
db.engine.execute("update logs set user_time = 0.01 where user_time = 0 and status = \"ok\"")
res=get_query_res(db, "SELECT count(isos.id) from isos")
isos=res[0]
print "Total iso: %d" % (isos)
res=get_query_res(db, "SELECT count(logs.id) from logs")
logs=res[0]
print "Num of logs: %d" % (logs)
if (isos != logs):
print "--- WARNING: logs and isos are different ---"
res=get_query_res(db, "SELECT count(logs.id),sum(logs.user_time),avg(logs.user_time) from logs inner join isos on isos.id = logs.idiso where logs.status = \"ok\"")
solved=res[0]
print "Solved iso: %d" % (solved)
print "Tot time: %f" % (res[1])
print "Avg for iso %f" % (res[2])
res=get_query_res(db, "SELECT count(logs.id) from logs inner join isos on isos.id = logs.idiso where logs.status = \"to\"")
to=res[0]
print "Tot to: %d" % (to)
if (solved + to != isos):
print "--- WARNING: solved + to (%d) is different from isos (%d)---" % (solved+to, isos)
#db.engine.execute("DELETE from logs")
#db.engine.execute("DELETE from isos")
# db.engine.execute("DELETE from logs where logs.id in "\
# "(SELECT logs.id from logs " \
# "inner join isos on (isos.id = logs.idiso) " \
# "where isos.isoname >= \"be.\" and isoname <= \"c\")")
# db.engine.execute("DELETE from isos where isos.isoname >= \"be.\" and isoname <= \"c\"")
| 2.59375
| 3
|
login_register/apps/userAuth/urls/v1/urls.py
|
Mr-IT007/django-vue
| 1
|
12776846
|
from django.urls import path
from rest_framework.routers import SimpleRouter
from apps.userAuth.views.v1.views import SendCodeView, UserViewSet
router = SimpleRouter(trailing_slash=False)
router.register('user', UserViewSet, base_name='user')
urlpatterns = [
path('sendcode', SendCodeView.as_view(), name='sendcode'),
# path('register', RegisterView.as_view(), name='register'),
# path('active', ActiveView.as_view(), name='active'),
]
urlpatterns += router.urls
| 1.828125
| 2
|
src/replys/serializers.py
|
banfstory/REST-API-DJANGO
| 0
|
12776847
|
from rest_framework import serializers
from .models import Reply
from users.models import Profile
from comments.models import Comment
class ReplySerializer(serializers.HyperlinkedModelSerializer):
user = serializers.PrimaryKeyRelatedField(queryset=Profile.objects.all())
comment = serializers.PrimaryKeyRelatedField(queryset=Comment.objects.all())
class Meta:
model = Reply
fields = ('url', 'id', 'content', 'date_replied', 'user', 'comment')
read_only_fields = ['id', 'date_replied']
def get_fields(self):
fields = super().get_fields()
request = self.context.get('request', None) # to get the request object to access the method
if request.method == 'GET':
fields['user'] = serializers.HyperlinkedRelatedField(many=False, view_name='profile-detail', read_only=True)
fields['comment'] = serializers.HyperlinkedRelatedField(many=False, view_name='comment-detail', read_only=True)
# if the method been used is PUT than make name read only
if request and request.method == 'PUT':
fields['user'].read_only = True
fields['comment'].read_only = True
return fields
| 2.1875
| 2
|
src/ipyannotations/__init__.py
|
tabaspki/ipyannotations
| 0
|
12776848
|
"""Annotate data in jupyter notebooks."""
__version__ = "0.2.0"
from .images import PolygonAnnotator, PointAnnotator, BoxAnnotator
__all__ = ["PolygonAnnotator", "PointAnnotator", "BoxAnnotator"]
| 1.632813
| 2
|
hpc/collect.py
|
samvonderdunk/artistoo
| 0
|
12776849
|
#!/usr/bin/env python
#
# collect.py renames files from subdirectories
#
# Copyright <NAME>, 2007--2018
"""
Synopsis:
Rename files or folders following a pattern containing an integer index,
as in 'image0001.png'. The file will be moved in the current directory
The number in the file name is incremented automatically for each file, and
also if files with this name already exist. Thus pre-existing files are not
overwritten, such that 'collect.py' can be used to pool together many similar
files in a common directory.
Syntax:
collect.py PATTERN [INTEGER] [--copy] PATH1 [PATH2] [PATH3] ...
Arguments:
PATTERN specifies the name of the output files, and should contain a variable
part that will be replaced by an integer. It can be a 'scanf' compatible
pattern such as '%i' or '%0Xi', for example 'image%04i.png'.
A character '%' repeated multiple times, such as `%%%%` or `%%%%%%`, can
also be used to specify the size of the integer portion of the name.
The pattern can include a '/' that would indicate a directory, and if this
directory does not exist, collect.py will create it before moving the file.
if specified, `--copy` will copy the files/directory instead of moving them
if specified, INTEGER is the first index to be used (default=0)
PATH1, PATH2, etc. is a list of files or directories
Examples:
collect.py image%%%%.png *.png
will rename image files to: image0000.png, image0001.png, etc.
collect.py --copy image%%%%.png 1 run*/image.png
will copy the image files, starting at index 1
collect.py run%%%%/config.cym config*.cym
will create directories `run????` and move the `config*.cym` files into them
<NAME>, 2012--2018. Last modified 2.10.2017
"""
import sys, shutil, os, curses.ascii
#------------------------------------------------------------------------
def copy_recursive(src, dst):
"""Copy directory recursively"""
if os.path.isfile(src):
shutil.copy2(src, dst)
elif os.path.isdir(src):
try:
os.mkdir(dst)
except OSError:
pass
files = os.listdir(src)
for f in files:
s = os.path.join(src, f)
d = os.path.join(dst, f)
copy_recursive(s, d)
def main(args):
"""rename files"""
do_copy = False
arg = args.pop(0);
# check if 'copy' specified before pattern
if arg=='-c' or arg=='--copy' or arg=='copy=1':
do_copy = True
pattern = args.pop(0);
else:
pattern = arg
# check validity of the pattern
if os.path.isfile(pattern):
sys.stderr.write("Error: first argument should be the pattern used to build output file name")
return 1
try:
res = ( pattern % 0 )
except:
# check for repeated '%' character:
for n in range(10,0,-1):
s = pattern.find('%'*n)
if s > 0:
pattern = pattern.replace('%'*n, '%0'+str(n)+'i', 1);
break
try:
res = ( pattern % 0 )
except:
sys.stderr.write("Error: the pattern should accept an integer: eg. '%04i'\n")
return 1
for c in res:
if curses.ascii.isspace(c):
sys.stderr.write("Error: the pattern includes or generates white space character\n")
return 1
# go
paths = []
idx = 0
# parse arguments:
for arg in args:
if arg=='-c' or arg=='--copy' or arg=='copy=1':
do_copy = True
elif args[0].isdigit():
idx = int(args[0])
elif os.path.isfile(arg) or os.path.isdir(arg):
paths.append(arg)
else:
sys.stderr.write("Error: '%s' is not a file or directory" % arg)
return 1
# process all files
res = []
for src in paths:
while idx < 1000000:
dst = pattern % idx
idx += 1
if dst == src:
res.append(dst)
break
if not os.path.exists(dst):
#make directory if name include a directory that does not exist:
dir = os.path.dirname(dst)
if dir and not os.path.isdir(dir):
os.mkdir(dir)
# process file:
if do_copy:
copy_recursive(src, dst)
else:
os.rename(src, dst)
res.append(dst)
print("%s -> %s" % (src, dst))
break
return res
#------------------------------------------------------------------------
if __name__ == "__main__":
if len(sys.argv) < 2 or sys.argv[1].endswith("help"):
print(__doc__)
else:
main(sys.argv[1:])
| 3.875
| 4
|
models/codegnngru.py
|
AntonPrazdnichnykh/CodeGNN-pytorch
| 0
|
12776850
|
<reponame>AntonPrazdnichnykh/CodeGNN-pytorch<gh_stars>0
from typing import Tuple, List, Dict, Union
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Optimizer
from torch.optim.lr_scheduler import _LRScheduler
from pytorch_lightning import LightningModule
from omegaconf import DictConfig
from models.parts import GCNLayer, GRUDecoder
from utils.common import PAD, SOS, EOS, TOKEN, NODE
from utils.training import configure_optimizers_alon
from utils.vocabulary import Vocabulary
from utils.metrics import PredictionStatistic
class CodeGNNGRU(LightningModule):
def __init__(self, config, vocabulary):
super().__init__()
self.save_hyperparameters()
self._config = config
self._vocabulary = vocabulary
if SOS not in vocabulary.label_to_id:
raise ValueError(f"Can't find SOS token in label to id vocabulary")
self._label_pad_id = vocabulary.label_to_id[PAD]
self._metric_skip_tokens = [
vocabulary.label_to_id[i] for i in [PAD, EOS, SOS] if i in vocabulary.label_to_id
]
#source code embedding and node token embeddings
self.token_embedding = nn.Embedding(
len(vocabulary.token_to_id), config.embedding_size, padding_idx=vocabulary.token_to_id[PAD]
)
#node embeddings
self.node_embedding = nn.Embedding(
len(vocabulary.node_to_id), config.embedding_size, padding_idx=vocabulary.node_to_id[PAD]
)
#Encoder
self.source_code_enc = nn.GRU(
config.embedding_size,
config.hidden_size,
config.encoder_num_layers,
dropout=config.rnn_dropout if config.encoder_num_layers > 1 else 0,
batch_first=True,
)
gcn_layers = [GCNLayer(config.embedding_size, config.gcn_hidden_size)]
gcn_layers.extend(
[GCNLayer(config.gcn_hidden_size, config.gcn_hidden_size) for _ in range(config.num_hops - 1)]
)
self.gcn_layers = nn.ModuleList(gcn_layers)
self.ast_rnn_enc = nn.GRU(
config.gcn_hidden_size,
config.hidden_size,
config.encoder_num_layers,
dropout=config.rnn_dropout if config.encoder_num_layers > 1 else 0,
batch_first=True,
)
#Decoder
self.decoder = GRUDecoder(config, vocabulary)
# saving predictions
if not os.path.exists(config.output_dir):
os.mkdir(config.output_dir)
self._test_outputs = []
self._val_outputs = []
self.val = False
#SWA
self.swa = (config.hyper_parameters.optimizer == "SWA")
@property
def config(self) -> DictConfig:
return self._config
@property
def vocabulary(self) -> Vocabulary:
return self._vocabulary
def configure_optimizers(self) -> Tuple[List[Optimizer], List[_LRScheduler]]:
return configure_optimizers_alon(self._config.hyper_parameters, self.parameters())
def forward(
self,
source_code,
ast_nodes,
ast_node_tokens,
ast_edges,
target = None,
):
sc_emb = self.token_embedding(source_code)
ast_node_emb = self.node_embedding(ast_nodes) + self.token_embedding(ast_node_tokens).sum(2) # no second term in original implementation, but why not
sc_enc, sc_h = self.source_code_enc(sc_emb)
ast_enc = ast_node_emb
for i in range(self._config.num_hops):
ast_enc = self.gcn_layers[i](ast_enc, ast_edges)
ast_enc, _ = self.ast_rnn_enc(ast_enc, sc_h)
output_logits = self.decoder(
sc_enc,
ast_enc,
sc_h,
self._config.max_label_parts + 1,
target
)
# target_emb = self.target_embedding(target)
# dec_out, _ = self.decoder(target_emb, h_0=sc_h)
#
# sc_attn = F.softmax(torch.bmm(sc_enc, dec_out.transpose(1, 2)), dim=-1)
# sc_context = torch.bmm(sc_attn, sc_enc)
#
# ast_attn = F.softmax(torch.bmm(ast_enc, dec_out.transpose(1, 2)), dim=-1)
# ast_context = torch.bmm(ast_attn, ast_enc)
#
# context = torch.cat((sc_context, dec_out, ast_context), dim=1)
#
# out = F.relu(self.tdd(context))
#
# return self.linear(out.view(batch_size, -1))
return output_logits
def _calculate_loss(self, logits: torch.Tensor, labels: torch.Tensor) -> torch.Tensor:
"""
Calculate cross entropy ignoring PAD index
:param logits: [seq_len; batch_size; vocab_size]
:param labels: [seq_len; batch_size]
:return: [1]
"""
batch_size = labels.shape[-1]
_logits = logits.permute(1, 2, 0)
_labels = labels.transpose(0, 1)
loss = F.cross_entropy(_logits, _labels, reduction="none")
mask = _labels != self._vocabulary.label_to_id[PAD]
loss = loss * mask
loss = loss.sum() / batch_size
return loss
def training_step(
self, batch: Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor],
batch_idx: int,
) -> Dict:
source_code, ast_nodes, ast_node_tokens, ast_edges, labels = batch
logits = self(source_code, ast_nodes, ast_node_tokens, ast_edges, labels)
labels = batch[-1]
loss = self._calculate_loss(logits, labels)
prediction = logits.argmax(-1)
statistic = PredictionStatistic(True, self._label_pad_id, self._metric_skip_tokens)
batch_metric = statistic.update_statistic(labels, prediction)
log: Dict[str, Union[float, torch.Tensor]] = {'train/loss': loss}
for key, value in batch_metric.items():
log[f"train/{key}"] = value
self.log_dict(log)
self.log("f1", batch_metric["f1"], prog_bar=True, logger=False)
return {"loss": loss, "statistic": statistic}
def validation_step(
self, batch: Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor],
batch_idx: int,
test: bool = False,
) -> Dict:
if self.swa and not self.val:
print("Validation starts")
self.trainer.optimizers[0].swap_swa_sgd()
self.val = True
source_code, ast_nodes, ast_node_tokens, ast_edges, labels = batch
logits = self(source_code, ast_nodes, ast_node_tokens, ast_edges)
labels = batch[-1]
loss = self._calculate_loss(logits, labels)
prediction = logits.argmax(-1)
if test:
self._test_outputs.append(prediction.detach().cpu())
else:
self._val_outputs.append(prediction.detach().cpu())
statistic = PredictionStatistic(True, self._label_pad_id, self._metric_skip_tokens)
statistic.update_statistic(labels, prediction)
return {"loss": loss, "statistic": statistic}
def test_step(
self, batch: Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor],
batch_idx: int,
) -> Dict:
return self.validation_step(batch, batch_idx, test=True)
# ========== On epoch end ==========
def _shared_epoch_end(self, outputs: List[Dict], group: str):
with torch.no_grad():
mean_loss = torch.stack([out["loss"] for out in outputs]).mean().item()
statistic = PredictionStatistic.create_from_list([out["statistic"] for out in outputs])
epoch_metrics = statistic.get_metric()
log: Dict[str, Union[float, torch.Tensor]] = {f"{group}/loss": mean_loss}
for key, value in epoch_metrics.items():
log[f"{group}/{key}"] = value
self.log_dict(log)
self.log(f"{group}_loss", mean_loss)
def training_epoch_end(self, outputs: List[Dict]):
self._shared_epoch_end(outputs, "train")
def validation_epoch_end(self, outputs: List[Dict]):
self._shared_epoch_end(outputs, "val")
torch.save(self._val_outputs,
f"{self._config.output_dir}/{self._config.hyper_parameters.optimizer}_epoch{self.current_epoch}_val_outputs.pkl")
self._val_outputs = []
print("Validation finished")
if self.swa:
self.trainer.optimizers[0].swap_swa_sgd()
self.val = False
def test_epoch_end(self, outputs: List[Dict]):
self._shared_epoch_end(outputs, "test")
torch.save(self._test_outputs,
f"{self._config.output_dir}/{self._config.hyper_parameters.optimizer}_test_outputs.pkl")
| 2.015625
| 2
|