text stringlengths 8 6.05M |
|---|
#! /usr/bin/python
import sys
from xml import sax
from xml.sax import saxutils
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import HtmlFormatter
class code_getter_handler(sax.handler.ContentHandler):
def __init__(self, outstream):
self.targetstags = ["pre"]
self.name = ""
self.contents = ""
self.out = outstream
self.xmlGenerator = saxutils.XMLGenerator(self.out)
return
def startElement(self, name, attrs):
self.name = name
if self.isToBeEvaluated():
self.out.write('<pre class="emlist highlight">')
else:
self.xmlGenerator.startElement(name, attrs)
return
def endElement(self, name):
if len(self.contents) > 0:
self.out.write(highlight(self.contents, PythonLexer(), HtmlFormatter(nowrap=True)))
self.xmlGenerator.endElement(name)
self.name = ""
self.contents = ""
return
def characters(self, contents):
if self.isToBeEvaluated():
self.contents += contents
else:
self.xmlGenerator.characters(contents)
return
def isToBeEvaluated(self):
return ( self.name in self.targetstags )
def main():
#parser = sax.make_parser()
inputStr = "<pre class=\"emlist\">import time\n" + \
"version = release = time.strftime('%Y.%m.%d')" + \
"</pre>"
print inputStr
sax.parseString(inputStr, code_getter_handler(sys.stdout))
return
#if __name__ == "__main__":
main()
print "done..."
|
# N๊ฐ์ ์ซ์๊ฐ ๊ณต๋ฐฑ ์์ด ์ฐ์ฌ์๋ค.
# ์ด ์ซ์๋ฅผ ๋ชจ๋ ํฉํด์ ์ถ๋ ฅํ๋ ํ๋ก๊ทธ๋จ์ ์์ฑํ์์ค.
N = int(input())
numbers = list(map(int, input()))
summ = 0
for i in range(N):
summ += numbers[i]
print(summ)
|
ak1=list(map(int,input().split()))
bk1=list(map(int,input().split()))
for i in range(0,ak1[1]):
bk1=[bk1[-1]] + bk1[:-1]
print(*bk1,end=' ')
|
from .app import increment |
import torch
import torchvision.datasets as dsets
from torchvision import transforms
class Data_Loader():
def __init__(self, train, dataset, image_path, image_size, batch_size, shuf=True):
self.dataset = dataset
self.path = image_path
self.imsize = image_size
self.batch = batch_size
self.shuf = shuf
self.train = train
def transform(self, resize, totensor, normalize, centercrop):
options = []
if centercrop:
options.append(transforms.CenterCrop(160))
if resize:
options.append(transforms.Resize((self.imsize,self.imsize)))
if totensor:
options.append(transforms.ToTensor())
if normalize:
options.append(transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)))
transform = transforms.Compose(options)
return transform
def load_lsun(self, classes='church_outdoor_train'):
transforms = self.transform(True, True, True, False)
dataset = dsets.LSUN(self.path, classes=[classes], transform=transforms)
return dataset
def load_celeb(self):
transforms = self.transform(True, True, True, True)
dataset = dsets.ImageFolder(self.path+'/CelebA', transform=transforms)
return dataset
def loader(self):
if self.dataset == 'lsun':
dataset = self.load_lsun()
elif self.dataset == 'celeb':
dataset = self.load_celeb()
loader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=self.batch,
shuffle=self.shuf,
num_workers=2,
drop_last=True)
return loader
|
# -*- coding: utf-8 -*-
from .orrmscedule import app
|
from dataclasses import dataclass
import arrow
@dataclass
class Plan:
"""An entry in the database."""
name: str
due_date: arrow.Arrow
def __repr__(self) -> str:
date, time = self.due_date.date(), self.due_date.time()
return f'{self.name}: {date.day}/{date.month}/{date.year} {time.hour}:{time.minute}:{time.second}' |
from visvis.vvmovie.images2gif import writeGif
#!/usr/bin/env python
from PIL import Image, ImageSequence
import sys, os
FRAME_DURATION = 1.0/60.0
#frames.reverse()
from images2gif import writeGif
################################################################################
# TEST CODE
################################################################################
if __name__ == "__main__":
import time
import pygame
import OpenGL.GL as gl
import OpenGL.GLU as glu
import numpy as np
import itertools
import fractions
import copy
import itertools
from checkerboard import CheckerBoard
from common import DEBUG, COLORS, VSYNC_PATCH_HEIGHT_DEFAULT, VSYNC_PATCH_WIDTH_DEFAULT, SCREEN_LB, SCREEN_LT, SCREEN_RB, SCREEN_RT
out_filename = "checkerboard_flasher.gif"
color1 = COLORS['white']
color2 = COLORS['black']
N_FRAMES = 600
FRAME_DURATION = 0.1 #second
CB1 = CheckerBoard(nrows,width, color1 = color1, color2 = color2)
CB2 = CheckerBoard(nrows,width, color1 = color2, color2 = color1)
#white/black alterning for intermediate signals
CB_cycle = itertools.cycle((CB1,CB2))
CB = CB_cycle.next()
#setup background color
RGB_args = COLORS['neutral-gray']
gl.glClearColor(*RGB_args, 1.0)
frames = []
for n in range(N_FRAMES):
#prepare rendering model
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glLoadIdentity()
#move so that board is center and render
gl.glTranslatef(-0.5*BOARD_WIDTH,-0.5*BOARD_WIDTH,0.0)
CB.render()
writeGif(out_filename, frames, duration=original_duration/1000.0, dither=0)
|
from flask import Flask
WelcomePython = Flask(__name__)
@helloworld.route("/")
def run():
return "{\"message\":\"Welcome to Python Application V1\"}"
if __name__ == "__main__":
WelcomePython.run(host="0.0.0.0", port=int("5000"), debug=True)
|
from django.contrib import admin
from .models import *
# Register your models here.
class UserAdmin(admin.ModelAdmin):
list_display = ['Convergence_Name']
admin.site.register(User, UserAdmin)
admin.site.register(Convergence_Board)
admin.site.register(Convergence_Comment)
admin.site.register(Convergence_Comment_reply)
admin.site.register(Convergence_Files)
admin.site.register([Lecture, Lecture_Status])
|
# format for making IBM Watson API call
from watson_machine_learning_client import WatsonMachineLearningAPIClient
wml_credentials = {
"url": "https://us-south.ml.cloud.ibm.com",
"username": "*****",
"password": "*****",
"instance_id": "*****"
}
client = WatsonMachineLearningAPIClient(wml_credentials)
# actual details
from watson_machine_learning_client import WatsonMachineLearningAPIClient
wml_credentials = {
"url": "https://jp-tok.ml.cloud.ibm.com",
"username": "ecf736ef-6dc5-423e-b811-7b6f4b7c81fe",
"password": "b690cf53-0af7-4afa-aa02-2dff24638a55",
"instance_id": "326eab92-7d79-4c04-ae11-17a1caf0610b"
}
client = WatsonMachineLearningAPIClient(wml_credentials) |
from .cls import Classify
from .match import Match
from .ner import SequenceLabel
from .mrc import MRC
from .mlm import MaskLM
|
from itertools import product, combinations
import numpy as np
import pandas as pd
import pytest
from vivarium.testing_utilities import metadata
from vivarium_public_health.metrics.utilities import (QueryString, OutputTemplate, to_years, get_output_template,
get_susceptible_person_time, get_disease_event_counts,
get_age_sex_filter_and_iterables,
get_time_iterable, get_lived_in_span, get_person_time_in_span,
get_deaths, get_years_of_life_lost,
get_years_lived_with_disability, get_age_bins,
_MIN_YEAR, _MAX_YEAR, _MIN_AGE, _MAX_AGE)
@pytest.fixture(params=((0, 100, 5, 1000), (20, 100, 5, 1000)))
def ages_and_bins(request):
age_min = request.param[0]
age_max = request.param[1]
age_groups = request.param[2]
num_ages = request.param[3]
ages = np.linspace(age_min, age_max - age_groups/num_ages, num_ages)
bin_ages, step = np.linspace(age_min, age_max, age_groups, endpoint=False, retstep=True)
age_bins = pd.DataFrame({'age_start': bin_ages,
'age_end': bin_ages + step,
'age_group_name': [str(name) for name in range(len(bin_ages))]})
return ages, age_bins
@pytest.fixture
def sexes():
return ['Male', 'Female']
@pytest.fixture(params=list(product((True, False), repeat=3)))
def observer_config(request):
c = {'by_age': request.param[0],
'by_sex': request.param[1],
'by_year': request.param[2]}
return c
@pytest.fixture()
def builder(mocker):
builder = mocker.MagicMock()
df = pd.DataFrame({'age_start': [0, 1, 4],
'age_group_name': ['youngest', 'younger', 'young'],
'age_end': [1, 4, 6]})
builder.data.load.return_value = df
return builder
@pytest.mark.parametrize('reference, test', product([QueryString(''), QueryString('abc')], [QueryString(''), '']))
def test_query_string_empty(reference, test):
result = str(reference)
assert reference + test == result
assert reference + test == QueryString(result)
assert isinstance(reference + test, QueryString)
assert test + reference == result
assert test + reference == QueryString(result)
assert isinstance(test + reference, QueryString)
reference += test
assert reference == result
assert reference == QueryString(result)
assert isinstance(reference, QueryString)
test += reference
assert test == result
assert test == QueryString(result)
assert isinstance(test, QueryString)
@pytest.mark.parametrize('a, b', product([QueryString('a')], [QueryString('b'), 'b']))
def test_query_string(a, b):
assert a + b == 'a and b'
assert a + b == QueryString('a and b')
assert isinstance(a + b, QueryString)
assert b + a == 'b and a'
assert b + a == QueryString('b and a')
assert isinstance(b + a, QueryString)
a += b
assert a == 'a and b'
assert a == QueryString('a and b')
assert isinstance(a, QueryString)
b += a
assert b == 'b and a and b'
assert b == QueryString('b and a and b')
assert isinstance(b, QueryString)
def test_get_output_template(observer_config):
template = get_output_template(**observer_config)
assert isinstance(template, OutputTemplate)
assert '${measure}' in template.template
if observer_config['by_year']:
assert '_in_${year}' in template.template
if observer_config['by_sex']:
assert '_among_${sex}' in template.template
if observer_config['by_age']:
assert '_in_age_group_${age_group}' in template.template
@pytest.mark.parametrize('measure, sex, age, year',
product(['test', 'Test'], ['female', 'Female'],
[1.0, 1, 'Early Neonatal'], [2011, '2011']))
def test_output_template(observer_config, measure, sex, age, year):
template = get_output_template(**observer_config)
out1 = template.substitute(measure=measure, sex=sex, age_group=age, year=year)
out2 = template.substitute(measure=measure).substitute(sex=sex).substitute(age_group=age).substitute(year=year)
assert out1 == out2
def test_output_template_exact():
template = get_output_template(by_age=True, by_sex=True, by_year=True)
out = template.substitute(measure='Test', sex='Female', age_group=1.0, year=2011)
expected = 'test_in_2011_among_female_in_age_group_1.0'
assert out == expected
out = template.substitute(measure='Test', sex='Female', age_group='Early Neonatal', year=2011)
expected = 'test_in_2011_among_female_in_age_group_early_neonatal'
assert out == expected
def test_get_age_sex_filter_and_iterables(ages_and_bins, observer_config):
_, age_bins = ages_and_bins
age_sex_filter, (ages, sexes) = get_age_sex_filter_and_iterables(observer_config, age_bins)
assert isinstance(age_sex_filter, QueryString)
if observer_config['by_age'] and observer_config['by_sex']:
assert age_sex_filter == '{age_start} <= age and age < {age_end} and sex == "{sex}"'
for (g1, s1), (g2, s2) in zip(ages, age_bins.set_index('age_group_name').iterrows()):
assert g1 == g2
assert s1.equals(s2)
assert sexes == ['Male', 'Female']
elif observer_config['by_age']:
assert age_sex_filter == '{age_start} <= age and age < {age_end}'
for (g1, s1), (g2, s2) in zip(ages, age_bins.set_index('age_group_name').iterrows()):
assert g1 == g2
assert s1.equals(s2)
assert sexes == ['Both']
elif observer_config['by_sex']:
assert age_sex_filter == 'sex == "{sex}"'
assert len(ages) == 1
group, data = ages[0]
assert group == 'all_ages'
assert data['age_start'] == _MIN_AGE
assert data['age_end'] == _MAX_AGE
assert sexes == ['Male', 'Female']
else:
assert age_sex_filter == ''
assert len(ages) == 1
group, data = ages[0]
assert group == 'all_ages'
assert data['age_start'] == _MIN_AGE
assert data['age_end'] == _MAX_AGE
assert sexes == ['Both']
def test_get_age_sex_filter_and_iterables_with_span(ages_and_bins, observer_config):
_, age_bins = ages_and_bins
age_sex_filter, (ages, sexes) = get_age_sex_filter_and_iterables(observer_config, age_bins, in_span=True)
assert isinstance(age_sex_filter, QueryString)
if observer_config['by_age'] and observer_config['by_sex']:
expected = '{age_start} < age_at_span_end and age_at_span_start < {age_end} and sex == "{sex}"'
assert age_sex_filter == expected
for (g1, s1), (g2, s2) in zip(ages, age_bins.set_index('age_group_name').iterrows()):
assert g1 == g2
assert s1.equals(s2)
assert sexes == ['Male', 'Female']
elif observer_config['by_age']:
assert age_sex_filter == '{age_start} < age_at_span_end and age_at_span_start < {age_end}'
for (g1, s1), (g2, s2) in zip(ages, age_bins.set_index('age_group_name').iterrows()):
assert g1 == g2
assert s1.equals(s2)
assert sexes == ['Both']
elif observer_config['by_sex']:
assert age_sex_filter == 'sex == "{sex}"'
assert len(ages) == 1
group, data = ages[0]
assert group == 'all_ages'
assert data['age_start'] == _MIN_AGE
assert data['age_end'] == _MAX_AGE
assert sexes == ['Male', 'Female']
else:
assert age_sex_filter == ''
assert len(ages) == 1
group, data = ages[0]
assert group == 'all_ages'
assert data['age_start'] == _MIN_AGE
assert data['age_end'] == _MAX_AGE
assert sexes == ['Both']
@pytest.mark.parametrize('year_start, year_end', [(2011, 2017), (2011, 2011)])
def test_get_time_iterable_no_year(year_start, year_end):
config = {'by_year': False}
sim_start = pd.Timestamp(f'7-2-{year_start}')
sim_end = pd.Timestamp(f'3-15-{year_end}')
time_spans = get_time_iterable(config, sim_start, sim_end)
assert len(time_spans) == 1
name, (start, end) = time_spans[0]
assert name == 'all_years'
assert start == pd.Timestamp(f'1-1-{_MIN_YEAR}')
assert end == pd.Timestamp(f'1-1-{_MAX_YEAR}')
@pytest.mark.parametrize('year_start, year_end', [(2011, 2017), (2011, 2011)])
def test_get_time_iterable_with_year(year_start, year_end):
config = {'by_year': True}
sim_start = pd.Timestamp(f'7-2-{year_start}')
sim_end = pd.Timestamp(f'3-15-{year_end}')
time_spans = get_time_iterable(config, sim_start, sim_end)
years = list(range(year_start, year_end + 1))
assert len(time_spans) == len(years)
for year, time_span in zip(years, time_spans):
name, (start, end) = time_span
assert name == year
assert start == pd.Timestamp(f'1-1-{year}')
assert end == pd.Timestamp(f'1-1-{year+1}')
def test_get_susceptible_person_time(ages_and_bins, sexes, observer_config):
ages, age_bins = ages_and_bins
disease = 'test_disease'
states = [f'susceptible_to_{disease}', disease]
pop = pd.DataFrame(list(product(ages, sexes, states)), columns=['age', 'sex', disease])
pop['alive'] = 'alive'
# Shuffle the rows
pop = pop.sample(frac=1).reset_index(drop=True)
year = 2017
step_size = pd.Timedelta(days=7)
person_time = get_susceptible_person_time(pop, observer_config, disease, year, step_size, age_bins)
values = set(person_time.values())
assert len(values) == 1
expected_value = to_years(step_size)*len(pop)/2
if observer_config['by_sex']:
expected_value /= 2
if observer_config['by_age']:
expected_value /= len(age_bins)
assert np.isclose(values.pop(), expected_value)
# Doubling pop should double person time
pop = pd.concat([pop, pop], axis=0, ignore_index=True)
person_time = get_susceptible_person_time(pop, observer_config, disease, year, step_size, age_bins)
values = set(person_time.values())
assert len(values) == 1
assert np.isclose(values.pop(), 2*expected_value)
def test_get_disease_event_counts(ages_and_bins, sexes, observer_config):
ages, age_bins = ages_and_bins
disease = 'test_disease'
event_time = pd.Timestamp('1-1-2017')
states = [event_time, pd.NaT]
pop = pd.DataFrame(list(product(ages, sexes, states)), columns=['age', 'sex', f'{disease}_event_time'])
# Shuffle the rows
pop = pop.sample(frac=1).reset_index(drop=True)
counts = get_disease_event_counts(pop, observer_config, disease, event_time, age_bins)
values = set(counts.values())
assert len(values) == 1
expected_value = len(pop) / len(states)
if observer_config['by_sex']:
expected_value /= 2
if observer_config['by_age']:
expected_value /= len(age_bins)
assert np.isclose(values.pop(), expected_value)
# Doubling pop should double counts
pop = pd.concat([pop, pop], axis=0, ignore_index=True)
counts = get_disease_event_counts(pop, observer_config, disease, event_time, age_bins)
values = set(counts.values())
assert len(values) == 1
assert np.isclose(values.pop(), 2 * expected_value)
def test_get_lived_in_span():
dt = pd.Timedelta(days=5)
reference_t = pd.Timestamp('1-10-2010')
early_1 = reference_t - 2*dt
early_2 = reference_t - dt
t_start = reference_t
mid_1 = reference_t + dt
mid_2 = reference_t + 2*dt
t_end = reference_t + 3*dt
late_1 = reference_t + 4*dt
late_2 = reference_t + 5*dt
# 28 combinations, six of which are entirely out of the time span
times = [early_1, early_2, t_start, mid_1, mid_2, t_end, late_1, late_2]
starts, ends = zip(*combinations(times, 2))
pop = pd.DataFrame({'age': to_years(10*dt), 'entrance_time': starts, 'exit_time': ends})
lived_in_span = get_lived_in_span(pop, t_start, t_end)
# Indices here are from the combinatorics math. They represent
# 0: (early_1, early_2)
# 1: (early_1, t_start)
# 7: (early_2, t_start)
# 25: (t_end, late_1)
# 26: (t_end, late_2)
# 27: (late_1, late_2)
assert {0, 1, 7, 25, 26, 27}.intersection(lived_in_span.index) == set()
exit_before_span_end = lived_in_span.exit_time <= t_end
assert np.all(lived_in_span.loc[exit_before_span_end, 'age_at_span_end']
== lived_in_span.loc[exit_before_span_end, 'age'])
exit_after_span_end = ~exit_before_span_end
age_at_end = lived_in_span.age - to_years(lived_in_span.exit_time - t_end)
assert np.all(lived_in_span.loc[exit_after_span_end, 'age_at_span_end']
== age_at_end.loc[exit_after_span_end])
enter_after_span_start = lived_in_span.entrance_time >= t_start
age_at_start = lived_in_span.age - to_years(lived_in_span.exit_time - lived_in_span.entrance_time)
assert np.all(lived_in_span.loc[enter_after_span_start, 'age_at_span_start']
== age_at_start.loc[enter_after_span_start])
enter_before_span_start = ~enter_after_span_start
age_at_start = lived_in_span.age - to_years(lived_in_span.exit_time - t_start)
assert np.all(lived_in_span.loc[enter_before_span_start, 'age_at_span_start']
== age_at_start.loc[enter_before_span_start])
def test_get_lived_in_span_no_one_in_span():
dt = pd.Timedelta(days=365.25)
t_start = pd.Timestamp('1-1-2010')
t_end = t_start + dt
pop = pd.DataFrame({'entrance_time': t_start - 2*dt, 'exit_time': t_start - dt, 'age': range(100)})
lived_in_span = get_lived_in_span(pop, t_start, t_end)
assert lived_in_span.empty
pop = pd.DataFrame({'entrance_time': t_end + dt, 'exit_time': t_end + 2*dt, 'age': range(100)})
lived_in_span = get_lived_in_span(pop, t_start, t_end)
assert lived_in_span.empty
def test_get_person_time_in_span(ages_and_bins, observer_config):
_, age_bins = ages_and_bins
start = int(age_bins.age_start.min())
end = int(age_bins.age_end.max())
n_ages = len(list(range(start, end)))
n_bins = len(age_bins)
segments_per_age = [(i + 1)*(n_ages - i) for i in range(n_ages)]
ages_per_bin = n_ages // n_bins
age_bins['expected_time'] = [sum(segments_per_age[ages_per_bin*i:ages_per_bin*(i+1)]) for i in range(n_bins)]
age_starts, age_ends = zip(*combinations(range(start, end + 1), 2))
women = pd.DataFrame({'age_at_span_start': age_starts, 'age_at_span_end': age_ends, 'sex': 'Female'})
men = women.copy()
men.loc[:, 'sex'] = 'Male'
lived_in_span = pd.concat([women, men], ignore_index=True).sample(frac=1).reset_index(drop=True)
base_filter = QueryString("")
span_key = get_output_template(**observer_config).substitute(measure='person_time', year=2019)
pt = get_person_time_in_span(lived_in_span, base_filter, span_key, observer_config, age_bins)
if observer_config['by_age']:
for group, age_bin in age_bins.iterrows():
group_pt = sum(set([v for k, v in pt.items() if f'in_age_group_{group}' in k]))
if observer_config['by_sex']:
assert group_pt == age_bin.expected_time
else:
assert group_pt == 2 * age_bin.expected_time
else:
group_pt = sum(set(pt.values()))
if observer_config['by_sex']:
assert group_pt == age_bins.expected_time.sum()
else:
assert group_pt == 2 * age_bins.expected_time.sum()
def test_get_deaths(ages_and_bins, sexes, observer_config):
alive = ['dead', 'alive']
ages, age_bins = ages_and_bins
exit_times = [pd.Timestamp('1-1-2012'), pd.Timestamp('1-1-2013')]
causes = ['cause_a', 'cause_b']
pop = pd.DataFrame(list(product(alive, ages, sexes, exit_times, causes)),
columns=['alive', 'age', 'sex', 'exit_time', 'cause_of_death'])
# Shuffle the rows
pop = pop.sample(frac=1).reset_index(drop=True)
deaths = get_deaths(pop, observer_config, pd.Timestamp('1-1-2010'), pd.Timestamp('1-1-2015'), age_bins, causes)
values = set(deaths.values())
expected_value = len(pop) / (len(causes) * len(alive))
if observer_config['by_year']:
assert len(values) == 2 # Uniform across bins with deaths, 0 in year bins without deaths
expected_value /= 2
else:
assert len(values) == 1
value = max(values)
if observer_config['by_sex']:
expected_value /= 2
if observer_config['by_age']:
expected_value /= len(age_bins)
assert np.isclose(value, expected_value)
# Doubling pop should double counts
pop = pd.concat([pop, pop], axis=0, ignore_index=True)
deaths = get_deaths(pop, observer_config, pd.Timestamp('1-1-2010'), pd.Timestamp('1-1-2015'), age_bins, causes)
values = set(deaths.values())
if observer_config['by_year']:
assert len(values) == 2 # Uniform across bins with deaths, 0 in year bins without deaths
else:
assert len(values) == 1
value = max(values)
assert np.isclose(value, 2 * expected_value)
def test_get_years_of_life_lost(ages_and_bins, sexes, observer_config):
alive = ['dead', 'alive']
ages, age_bins = ages_and_bins
exit_times = [pd.Timestamp('1-1-2012'), pd.Timestamp('1-1-2013')]
causes = ['cause_a', 'cause_b']
pop = pd.DataFrame(list(product(alive, ages, sexes, exit_times, causes)),
columns=['alive', 'age', 'sex', 'exit_time', 'cause_of_death'])
# Shuffle the rows
pop = pop.sample(frac=1).reset_index(drop=True)
def life_expectancy(index):
return pd.Series(1, index=index)
ylls = get_years_of_life_lost(pop, observer_config, pd.Timestamp('1-1-2010'), pd.Timestamp('1-1-2015'),
age_bins, life_expectancy, causes)
values = set(ylls.values())
expected_value = len(pop) / (len(causes) * len(alive))
if observer_config['by_year']:
assert len(values) == 2 # Uniform across bins with deaths, 0 in year bins without deaths
expected_value /= 2
else:
assert len(values) == 1
value = max(values)
if observer_config['by_sex']:
expected_value /= 2
if observer_config['by_age']:
expected_value /= len(age_bins)
assert np.isclose(value, expected_value)
# Doubling pop should double counts
pop = pd.concat([pop, pop], axis=0, ignore_index=True)
ylls = get_years_of_life_lost(pop, observer_config, pd.Timestamp('1-1-2010'), pd.Timestamp('1-1-2015'),
age_bins, life_expectancy, causes)
values = set(ylls.values())
if observer_config['by_year']:
assert len(values) == 2 # Uniform across bins with deaths, 0 in year bins without deaths
else:
assert len(values) == 1
value = max(values)
assert np.isclose(value, 2 * expected_value)
def test_get_years_lived_with_disability(ages_and_bins, sexes, observer_config):
alive = ['dead', 'alive']
ages, age_bins = ages_and_bins
causes = ['cause_a', 'cause_b']
cause_a = ['susceptible_to_cause_a', 'cause_a']
cause_b = ['susceptible_to_cause_b', 'cause_b']
year = 2010
step_size = pd.Timedelta(days=7)
pop = pd.DataFrame(list(product(alive, ages, sexes, cause_a, cause_b)),
columns=['alive', 'age', 'sex'] + causes)
# Shuffle the rows
pop = pop.sample(frac=1).reset_index(drop=True)
def disability_weight(cause):
def inner(index):
sub_pop = pop.loc[index]
return pd.Series(1, index=index) * (sub_pop[cause] == cause)
return inner
disability_weights = {cause: disability_weight(cause) for cause in causes}
ylds = get_years_lived_with_disability(pop, observer_config, year, step_size, age_bins, disability_weights, causes)
values = set(ylds.values())
assert len(values) == 1
states_per_cause = len(cause_a)
expected_value = len(pop) / (len(alive) * states_per_cause) * to_years(step_size)
if observer_config['by_sex']:
expected_value /= 2
if observer_config['by_age']:
expected_value /= len(age_bins)
assert np.isclose(values.pop(), expected_value)
# Doubling pop should double person time
pop = pd.concat([pop, pop], axis=0, ignore_index=True)
ylds = get_years_lived_with_disability(pop, observer_config, year, step_size, age_bins, disability_weights, causes)
values = set(ylds.values())
assert len(values) == 1
assert np.isclose(values.pop(), 2 * expected_value)
@pytest.mark.parametrize('age_start, exit_age, result_age_end_values, result_age_start_values',
[(2, 5, {4, 5}, {2, 4}),
(0, None, {1, 4, 6}, {0, 1, 4}),
(1, 4, {4}, {1}),
(1, 3, {3}, {1}),
(0.8, 6, {1, 4, 6}, {0.8, 1, 4})])
def test_get_age_bins(builder, base_config, age_start, exit_age, result_age_end_values, result_age_start_values):
base_config.update({
'population': {
'age_start': age_start,
'exit_age': exit_age
}
}, **metadata(__file__))
builder.configuration = base_config
df = get_age_bins(builder)
assert set(df.age_end) == result_age_end_values
assert set(df.age_start) == result_age_start_values
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
divt.py
Created by Carlos J. Dรญaz on 2012-11.
Combina hselect y separa para todas las listas de objetos por tiempo
'''
#De una lista con $I saca otra con $I,UT,NIMAGES,EXPTIME
def hselect(x):
from pyraf import iraf
import sys, os, string
f=open(x,"r")
w=open('h'+x,'w')
for imagen in f:
imagen=string.strip(imagen)
linea=iraf.hselect(imagen, "$I,UT,NIMAGES,EXPTIME", 'yes', Stdout=1)
w.write(linea[0]+'\n')
f.close()
w.close()
#Separa las imagenes en base al tiempo de exp y las guarda en archivos-lista
def separat(x):
import sys, os, string
print '\n'
print ' <<------ Busqueda de imagenes erroneas ----->> '
print '\n'
#Quitamos las imagenes malas
h=open('Lista4','w')
f=open(x,'r')
e=open('Imgerror.log','a')
for foto in f:
info=foto.split()
imagen=info[0]
if len(info)>=4:
if len(info)==4:
if info[1]=='"':
nimagen=1
else:
nimagen=float(info[2])
elif len(info)==5:
nimagen=float(info[3])
else:
print imagen,"imagen erronea"
print '-------------------------'
e.write('%s\n' % (imagen))
if nimagen > 1:
h.write(foto)
else:
print imagen,"imagen erronea"
print '-------------------------'
e.write('%s\n' % (imagen))
else:
print imagen,"imagen erronea"
print '-------------------------'
e.write('%s\n' % (imagen))
f.close()
h.close()
print '\n'
print ' <<------ Division de las imagenes en grupos ----->> '
print '\n'
#Creo lista de tiempo
tiempo=[]
m=open('Lista4','r')
for foto in m:
info=foto.split()
if len(info)== 5:
UT=info[2][:-1]
hh=float(UT[:-6])
mm=float(UT[-5:-3])
ss=float(UT[-2:])
else:
UT=info[1]
hh=float(UT[:-6])
mm=float(UT[-5:-3])
ss=float(UT[-2:])
ts=hh*3600+mm*60+ss
tiempo.append(ts)
#print info[0] ,ts,info[3]
#Dividimos las imagenes por tiempo
m=open('Lista4','r')
a=0
g=1
w=0
t=[]
for foto in m:
info=foto.split()
imagen=info[0]
if a==0:
print imagen,'grupo',g
v=open(x+'grupo'+str(g),'w')
v.write(imagen+'\n')
t.insert(0,float(tiempo[0]))
else:
deltat=abs(tiempo[a-1]-tiempo[a])
Textra=(3.6) #-->><<
if len(info)== 4:
n=float(info[2])
texp=float(info[3])
TCond=n*(texp+Textra)
else:
n=float(info[3])
texp=float(info[4])
TCond=n*(texp+Textra)
if (TCond-deltat) >= 0 :
w=w+1
print imagen,'grupo',g,deltat,TCond
v.write(imagen+'\n')
t.insert(w,float(tiempo[a]))
else:
if (t[w]-t[0]) >= 600:
print 'CUIDADO:el tiempo trasncurrido en la lista del grupo ',g,' es demasiado grande'
else:
pass
g=g+1
t=[]
w=0
print '-------------------------'
print imagen,'grupo',g,deltat,TCond
v.close()
v=open(x+'grupo'+str(g),'w')
v.write(imagen+'\n')
t.insert(w,float(tiempo[a]))
a=a+1
m.close()
os.system('rm Lista4')
print '\n'
print ' <<------ Division de las imagenes en grupos terminada ----->> '
print '\n'
#Combina hselect y separa para todas las listas de objetos
import sys, os, string
o=open("lista_tiposo","r")
for cadalista in o:
cadalista=string.strip(cadalista)
hselect(cadalista)
separat('h'+cadalista)
o.close()
|
from django.shortcuts import render
# Create your views here.
from django.shortcuts import render
from django.http import HttpResponse
from .models import Agents_Details
from geopy.geocoders import Nominatim
from geopy import distance
def home(request):
# return HttpResponse("<h1> Subu </h1>")
return render(request,"AgentsList/index.html")
def nearest_city_list(city_name):
geolocator = Nominatim(user_agent="AgentsList")
loc1 = geolocator.geocode(city_name)
coordinate_1 = (loc1.latitude, loc1.longitude)
x = Agents_Details.objects.all()
agent_dict = {}
for i in x:
# loc = geolocator.geocode(i.zipcode)
coordinate_2 = (i.latitude, i.longitude)
adist = distance.geodesic(coordinate_1, coordinate_2).km
agent_dict[i.name, i.address, i.city, i.state, i.zipcode] = adist
sorted_dict = {k: v for k, v in sorted(agent_dict.items(), key=lambda item: item[1])}
nearestAgentstList = {k: sorted_dict[k] for k in list(sorted_dict)[:100]}
return nearestAgentstList
def nyList(request):
New_York_City_List = nearest_city_list(' New york city, New york')
return render(request, "AgentsList/newyork.html", {'distance_result': New_York_City_List})
def btList(request):
Boston_List = nearest_city_list('Boston, Massachusetts')
return render(request, "AgentsList/boston.html", {'distance_result': Boston_List})
def laList(request):
LA_List = nearest_city_list('Los Angeles, California')
return render(request, "AgentsList/la.html", {'distance_result': LA_List})
def ccList(request):
Chicago_List = nearest_city_list('Chicago, Illinois')
return render(request, "AgentsList/chicago.html", {'distance_result':Chicago_List})
def htList(request):
Houston_List = nearest_city_list('Houston, Texas')
return render(request, "AgentsList/houston.html", {'distance_result': Houston_List})
def pxList(request):
Phoenix_List = nearest_city_list('Phoenix, Arizona')
return render(request, "AgentsList/phoenix.html", {'distance_result': Phoenix_List})
def sjList(request):
San_Jose_List = nearest_city_list('San Jose, California')
return render(request, "AgentsList/sanjose.html", {'distance_result': San_Jose_List})
def csList(request):
Columbus_List = nearest_city_list('Columbus, Ohio')
return render(request, "AgentsList/columbus.html", {'distance_result': Columbus_List})
def dsList(request):
Dallas_List = nearest_city_list('Dallas, Texas')
return render(request, "AgentsList/dallas.html", {'distance_result': Dallas_List})
def sdList(request):
San_Diego_List = nearest_city_list('San Diego, California')
return render(request, "AgentsList/sandiego.html", {'distance_result': San_Diego_List})
def atList(request):
Austin_List = nearest_city_list('Austin, Texas')
return render(request, "AgentsList/austin.html", {'distance_result': Austin_List}) |
from __future__ import unicode_literals
from django.db import models
from django.conf import settings
from django.db.models import Q
from django.core.cache import cache
from django.core.exceptions import ValidationError
from django.contrib.auth.models import User
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import python_2_unicode_compatible
from friendship.exceptions import AlreadyExistsError, AlreadyFriendsError
from friendship.signals import (
friendship_request_created, friendship_request_rejected,
friendship_request_canceled,
friendship_request_viewed, friendship_request_accepted,
friendship_removed)
@python_2_unicode_compatible
class FriendshipRequest(models.Model):
""" Model to represent friendship requests """
from_user = models.ForeignKey(User, related_name='friendship_requests_sent', on_delete=models.CASCADE, unique=False)
to_user = models.ForeignKey(User, related_name='friendship_requests_received', on_delete=models.CASCADE, unique=False)
message = models.TextField(_('Message'), blank=True)
created = models.DateTimeField(default=timezone.now)
rejected = models.DateTimeField(blank=True, null=True)
viewed = models.DateTimeField(blank=True, null=True)
class Meta:
verbose_name = _('Friendship Request')
verbose_name_plural = _('Friendship Requests')
unique_together = ('from_user', 'to_user')
def __str__(self):
return "%s" % self.from_user.username
def accept(self):
""" Accept this friendship request """
relation1 = Friend.objects.create(
from_user=self.from_user,
to_user=self.to_user
)
relation2 = Friend.objects.create(
from_user=self.to_user,
to_user=self.from_user
)
friendship_request_accepted.send(
sender=self,
from_user=self.from_user,
to_user=self.to_user
)
self.delete()
# Delete any reverse requests
FriendshipRequest.objects.filter(
from_user=self.to_user,
to_user=self.from_user
).delete()
return True
def reject(self):
""" reject this friendship request """
self.rejected = timezone.now()
self.save()
friendship_request_rejected.send(sender=self)
def cancel(self):
""" cancel this friendship request """
self.delete()
friendship_request_canceled.send(sender=self)
return True
def mark_viewed(self):
self.viewed = timezone.now()
friendship_request_viewed.send(sender=self)
self.save()
return True
@python_2_unicode_compatible
class Friend(models.Model):
""" Model to represent Friendships """
to_user = models.ForeignKey(User, related_name='friends', on_delete=models.CASCADE)
from_user = models.ForeignKey(User, related_name='_unused_friend_relation', on_delete=models.CASCADE)
created = models.DateTimeField(default=timezone.now)
class Meta:
verbose_name = _('Friend')
verbose_name_plural = _('Friends')
unique_together = ('from_user', 'to_user')
def __str__(self):
return "User #%s is friends with #%s" % (self.to_user.username, self.from_user.username)
def mutual(self, request):
count = 0
touserfriend1 = Friend.objects.filter(from_user=request.user).values_list('to_user', flat=True)
touserfriend2 = Friend.objects.filter(to_user=request.user).values_list('from_user', flat=True)
total_friend1 = list(touserfriend1).append(list(touserfriend2))
fromuserfriend1 = Friend.objects.filter(from_user=self).values_list('to_user', flat=True)
fromuserfriend2 = Friend.objects.filter(to_user=self).values_list('from_user', flat=True)
total_friend2 = list(fromuserfriend1).append(list(fromuserfriend2))
for x in total_friend1:
for y in total_friend2:
if x == y:
count = count+1
return count
def save(self, *args, **kwargs):
# Ensure users can't be friends with themselves
if self.to_user == self.from_user:
raise ValidationError("Users cannot be friends with themselves.")
super(Friend, self).save(*args, **kwargs)
|
import time
from datetime import datetime
import traceback
import zipfile
import urllib.request
import os, sys, shutil, subprocess
import shlex
import subprocess
from subprocess import Popen, PIPE
if os.name == 'nt':
DOWNLOADS_PATH = os.path.join(os.getenv('USERPROFILE'), 'Downloads')
else:
try:
DOWNLOADS_PATH = os.path.join(os.path.expanduser('~'), 'downloads')
except:
DOWNLOADS_PATH = os.getcwd()
def run_cmd(command):
print("Running cmd: ", command)
cmd = command #shlex.split(command)
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# Poll process for new output until finished
while True:
nextline = process.stdout.readline().decode('UTF-8')
if nextline == '' and process.poll() is not None:
break
sys.stdout.write(nextline)
sys.stdout.flush()
output = process.communicate()[0]
exitCode = process.returncode
if (exitCode == 0):
print(output)
return output
else:
raise Exception(command, exitCode, output)
print("cmd executed")
def show_traceback(err):
"""Write the error on a error txt file show the traceback of the error"""
err_time = str(datetime.now()) #'2011-05-03 17:45:35.177000'
tb_error_msg = traceback.format_exc()
errormessage = "###########\n{}\nERROR:\n{}\n\nDetails:\n{}\n###########\n\n\n".format(err_time, err, tb_error_msg)
print(errormessage)
with open("ERRORS.txt", "a") as errfile:
errfile.write(errormessage)
return errormessage
# def ENV_PATH(unset=True):
# """
# Delete all values from PATH environment variables
# or update PATH environment variables
# """
# _environ = os.environ.copy()
# if unset:
# os.environ.clear()
# return _environ # Make sure to KEEP THIS
# else:
# os.environ.update(_environ)
def get_static():
try:
shutil.rmtree('./dist')
except:
pass
print("Copying static files..")
src = os.getcwd()
dst = os.path.join(src, "dist")
shutil.copytree(src, dst)
print("Static files colected!")
def prepare_dist(options):
"""
Create the 'dist' folder where the app will be bundled
Gather needed imports into a requirements.txt file
"""
if options["use_pipreqs"]:
print("Searching modules needed using 'pipreqs'...")
cmd = "pipreqs . --force --ignore dist"
run_cmd(cmd)
shutil.move('requirements.txt', 'dist/requirements.txt')
print("Done!")
else:
print("Searching modules needed using 'pip freeze'...")
cmd = "pip3.exe freeze > requirements.txt"
run_cmd(cmd)
shutil.move('requirements.txt', 'dist/requirements.txt')
print("Done!")
print("Checking which modules to exclude or to keep")
with open('dist/requirements.txt', 'r') as r:
modules_to_install = r.read().splitlines()
if options["exclude_modules"]:
modules_to_install = list(set.difference(set(modules_to_install),
set(options["exclude_modules"]
)))
if options["include_modules"]:
modules_to_install = modules_to_install + options["include_modules"]
print("Updating 'dist/requirements.txt' file")
with open('dist/requirements.txt', 'w') as r:
for module in modules_to_install:
if module.endswith("info") or module.startswith("pyvan"):
continue
if not module == modules_to_install[-1]:
r.write(str(module) + "\n")
else:
r.write(str(module))
print("Requirements check done!")
return modules_to_install
def get_files_from_url(url, dst):
"""
Copy the file from the url specified to the dst specified
"""
if os.path.isfile(dst):
print("Using already copied file", dst)
return
print("Copying data from ", url, " ..")
headers = {}
url_request = urllib.request.Request(url, headers=headers)
url_connect = urllib.request.urlopen(url_request)
with open(dst, 'wb') as f:
while True:
buffer = url_connect.read(1024)
if not buffer: break
f.write(buffer)
url_connect.close()
print("Succesfully copied to Downloads!")
def prepare_zip():
"""
Extracting python embeded zip
"""
print("Extracting .zip file..")
zip_ref = zipfile.ZipFile(os.path.join(DOWNLOADS_PATH, 'embeded_python.zip'), 'r')
zip_ref.extractall('./dist')
zip_ref.close()
print("Zip file extracted!")
time.sleep(1)
with open("./dist/python37._pth", 'w') as f:
for line in ['python37.zip', '.', '' 'import site']:
f.write(line)
f.write("\n")
print("Uncommented 'import site' line from 'python37._pth' file")
shutil.copy2(os.path.join(DOWNLOADS_PATH, 'get_pip.py'), './dist/get_pip.py')
print("Copied get_pip.py to './dist'")
def get_modules(modules_to_install):
"""
Install all needed modules
"""
os.chdir("./dist")
print("CD to dist")
print("Running get_pip.py from ", os.getcwd())
cmd = "python.exe get_pip.py"
run_cmd(cmd)
if not os.path.isdir("Scripts"):
raise Exception("ERROR: pip not installed!")
print("PIP installed!")
os.chdir("./Scripts")
print("CD to Scripts", os.getcwd())
cmd = "pip3.exe install -r ../requirements.txt --no-cache-dir --no-warn-script-location"
run_cmd(cmd)
print("Done!")
os.chdir("..")
print("CD back to 'dist'")
print("\nFinished installing dependencies!")
def prepare_main(options):
"""
Prepare main entry point of the app by copying all needed files to
the extracted embeded python folder and creating a .bat file which will run the script
"""
print("\nPreparing .bat/ executable file in ", os.getcwd())
if options["show_console"]:
bat_command = "START python " + options["main_file_name"]
else:
print("--noconsole ", os.getcwd())
with open(options["main_file_name"], 'r') as p:
out = p.read().splitlines()
no_console_hack = ['import sys, os',
"if sys.executable.endswith('pythonw.exe'):",
" sys.stdout = open(os.devnull, 'w')",
' sys.stderr = open(os.path.join(os.getenv(\'TEMP\'), \'stderr-{}\'.format(os.path.basename(sys.argv[0]))), "w")',
'']
file_with_hack = no_console_hack + out
with open(options["main_file_name"], "w") as m:
for line in file_with_hack:
m.write(line)
m.write("\n")
bat_command = "START pythonw " + options["main_file_name"]
bat_path = os.path.join(os.getcwd(), options["main_file_name"].replace(".py", ".bat"))
with open(bat_path, "w") as b:
b.write(bat_command)
def build(build_options):
if not os.path.isfile(build_options["main_file_name"]):
raise Exception("Entry point file(main_file_name) not found!")
get_static()
modules_to_install = prepare_dist(build_options)
if "https://" in build_options["get_pip_location"]:
get_files_from_url(build_options["get_pip_location"], os.path.join(DOWNLOADS_PATH, "get_pip.py"))
else:
print("Copying get_pip.py to dist..")
shutil.copy2(build_options["get_pip_location"], "dist/get_pip.py")
print("Done!")
if "https://" in build_options["embeded_python_location"]:
get_files_from_url(build_options["embeded_python_location"], os.path.join(DOWNLOADS_PATH, "embeded_python.zip"))
else:
print("Copying {} to dist..".format(build_options["embeded_python_location"]))
shutil.copy2(build_options["embeded_python_location"], "dist/embeded_python.zip")
print("Done!")
# dist_name = "dist/{}".format(build_options["main_file_name"].replace(".py", ""))
prepare_zip()
get_modules(modules_to_install)
prepare_main(build_options)
print("\n\nFinished! Folder 'dist' contains your runnable application!\n\n")
import click
@click.command()
@click.option('--main_file_name', "-f", default="main.py", help='Entry point of the program')
@click.option('--show_console', "-c", default=True, help='Show(console app) or not(gui app) the console window')
@click.option('--use_pipreqs', "-r", default=True, help='Try to minimize the size by installing only the required modules with the help of pipreq module')
# @click.option('--exclude_modules', "-e", multiple=True, default=None, help='List of modules to exclude')
# @click.option('--include_modules', "-i", multiple=True, default=None, help='List of modules to include')
@click.option('--get_pip_location', "-g", default="https://bootstrap.pypa.io/get-pip.py", help='Link to get_pip.py file to download')
@click.option('--embeded_python_location', "-p", default="https://www.python.org/ftp/python/3.7.3/python-3.7.3-embed-amd64.zip", help='Link to embeded python zip from python.com')
@click.option('--make_van', "-v", default=False, help='Make the preparation van.py to configure build.')
def cli(main_file_name, show_console, use_pipreqs, get_pip_location, embeded_python_location, make_van):
"""\npyvan - version 0.0.3\nMake runnable desktop apps from your python scripts more easily with pyvan!\n\n"""
if make_van:
van_data = 'import pyvan \n\ntry:\n pyvan.build({"main_file_name": "main.py", \n "show_console": False,\n "use_pipreqs": True,\n "exclude_modules":[],\n "include_modules":[],\n "get_pip_location": "https://bootstrap.pypa.io/get-pip.py",\n "embeded_python_location": "https://www.python.org/ftp/python/3.7.3/python-3.7.3-embed-amd64.zip", \n })\nexcept Exception as err:\n pyvan.show_traceback(err)\n input("\\nPress enter to exit..")\n\n'
with open("van.py", "w") as van:
van.write(van_data)
click.echo("Made the van.py file. \nModify it if needed and run python van.py to build the distributable.")
else:
build({"main_file_name": main_file_name,
"show_console": show_console,
"use_pipreqs": use_pipreqs,
"exclude_modules":[],
"include_modules":[],
"get_pip_location": get_pip_location,
"embeded_python_location": embeded_python_location,
})
if __name__ == '__main__':
cli() |
TOKEN = '1887185955:AAECQtgZJz-_sQHqH083e-uF9QfKTy0uU6g' # bot token
|
import definitions
import pickle
from wsdm.ts.helpers.regression import regression_utils
import numpy as np
nationality_model = None
profession_model = None
word2VecFeature = None
def load_modules(w2vFeature):
global nationality_model
global profession_model
global word2VecFeature
nationality_model = pickle.load(open(definitions.REGRESSION_MODEL_NATIONALITY_PATH, 'rb'))
profession_model = pickle.load(open(definitions.REGRESSION_MODEL_PROFESSION_PATH, 'rb'))
word2VecFeature = w2vFeature
def find_similarity(person_name, term, inputType):
global nationality_model
global profession_model
global word2VecFeature
data = regression_utils.get_features_values(person_name, term, inputType, word2VecFeature)
data = data.reshape(1,-1)
if inputType == definitions.TYPE_NATIONALITY:
score = nationality_model.predict(data)
elif inputType == definitions.TYPE_PROFESSION:
score = profession_model.predict(data)
else:
raise TypeError
assert len(score) == 1
result = score[0]
if result > 7:
result = 7
if result < 0:
result = 0
return result
|
from django.db import models
class Song(models.Model):
class Meta:
verbose_name = "Song"
verbose_name_plural = "Songs"
name = models.CharField(verbose_name="Name",max_length = 100)
duration = models.IntegerField(verbose_name="Duration")
uploaded_time = models.DateTimeField(auto_now=True, verbose_name="Uploaded time")
class Podcast(models.Model):
class Meta:
verbose_name = "Podcast"
verbose_name_plural = "Podcasts"
name = models.CharField(verbose_name="Name",max_length = 100)
host = models.CharField(verbose_name="Host",max_length = 100)
participants = models.TextField(verbose_name="Participants",null=True, blank=True)
duration = models.IntegerField(verbose_name="Duration")
uploaded_time = models.DateTimeField(auto_now=True, verbose_name="Uploaded time")
class AudioBook(models.Model):
class Meta:
verbose_name = "Audio Book"
verbose_name_plural = "Audio Books"
title = models.CharField(verbose_name="Title",max_length = 100)
author = models.CharField(verbose_name="Author",max_length = 100)
narrator = models.CharField(verbose_name="Narrator",max_length = 100)
duration = models.IntegerField(verbose_name="Duration")
uploaded_time = models.DateTimeField(auto_now=True, verbose_name="Uploaded time")
|
import re
from django import forms
from django.contrib.auth import get_user_model, authenticate
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.template.defaultfilters import filesizeformat, slugify
from django.utils.translation import ugettext, ugettext_lazy as _
from whiskydatabase.models import *
class CustomAuthenticationForm(forms.ModelForm):
email = forms.EmailField()
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
form_info = {
'email': '้ปๅญไฟก็ฎฑ',
'password': 'ๅฏ็ขผ'
}
error_messages = {
'invalid_login': _("้ปๅญไฟก็ฎฑๆๅฏ็ขผ้ฏ่ชค"),
'inactive': _("้ปๅญไฟก็ฎฑๆๅฏ็ขผ้ฏ่ชค"),
}
class Meta:
model = User
fields = ['email', 'password']
def __init__(self, request=None, *args, **kwargs):
super(CustomAuthenticationForm, self).__init__(*args, **kwargs)
for field in iter(self.fields):
if max(enumerate(iter(self.fields)))[0] != field:
self.fields[field].required = True
self.fields[field].label = self.form_info[field]
self.fields[field].widget.attrs.update({
'class': 'form-control',
"placeholder": "่ซ่ผธๅ
ฅ" + self.form_info[field]
})
self.fields[field].error_messages.update({
"required": "ๅฟ
ๅกซๆฌไฝ"
})
self.fields['email'].error_messages.update({"invalid": "่ซ่ผธๅ
ฅๅๆณ็้ปๅญไฟก็ฎฑ"})
def confirm_login_allowed(self, user):
if not user.is_active:
raise forms.ValidationError(
self.error_messages['inactive'],
code='inactive',
)
def get_user_id(self):
if self.user_cache:
return self.user_cache.id
return None
def get_user(self):
return self.user_cache
def clean(self):
email = self.cleaned_data.get('email')
password = self.cleaned_data.get('password')
if email and password:
UserModel = get_user_model()
try:
user = UserModel.objects.get(email=email)
except UserModel.DoesNotExist:
self.user_cache = None
else:
if user.check_password(password):
self.user_cache = user
else:
self.user_cache = None
if self.user_cache is None:
raise forms.ValidationError(
self.error_messages['invalid_login'],
code='invalid_login',
)
else:
self.confirm_login_allowed(self.user_cache)
return self.cleaned_data
class CustomUserCreationForm(UserCreationForm):
form_info = {
'nickname': 'ไฝฟ็จ่
ๅ็จฑ',
'email': '้ปๅญไฟก็ฎฑ',
'password1': 'ๅฏ็ขผ',
'password2': '็ขบ่ชๅฏ็ขผ',
}
error_messages = {'password_mismatch': _("ๅฏ็ขผไธไธ่ด")}
nickname = forms.CharField(label=_("Nickname"), required=True)
password1 = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password confirmation"), widget=forms.PasswordInput)
class Meta:
model = User
fields = ['nickname', 'email', 'password1', 'password2']
def clean_email(self):
email = self.cleaned_data.get('email')
if email and User.objects.filter(email=email).exists():
raise forms.ValidationError(u'ๆญคไฟก็ฎฑๅทฒ่ขซ่จปๅไฝฟ็จ')
return email
def __init__(self, *args, **kwargs):
super(CustomUserCreationForm, self).__init__(*args, **kwargs)
for field in iter(self.fields):
if max(enumerate(iter(self.fields)))[0] != field:
self.fields[field].required = True
self.fields[field].label = self.form_info[field]
self.fields[field].widget.attrs.update({
'class': 'form-control',
"placeholder": "่ซ่ผธๅ
ฅ" + self.form_info[field]
})
self.fields[field].error_messages.update({
"required": "ๅฟ
ๅกซๆฌไฝ"
})
self.fields['password2'].widget.attrs.update({"placeholder": "ๅๆฌก็ขบ่ชๅฏ็ขผ"})
def save(self, commit=True):
user = super(CustomUserCreationForm, self).save(commit=False)
user.username = self.cleaned_data["email"]
if commit:
user.save()
userprofile = UserProfile.objects.create(
user = user,
nickname = self.cleaned_data["nickname"]
)
userprofile.save()
return user |
import re
A = input()
A2 = re.sub('[CAMBRIDGE]', '', A)
print(A2)
# Done
|
import os
os.system('python train_miniimagenet.py --weight 0.1')
os.system('python train_miniimagenet.py --weight 0.01')
os.system('python train_miniimagenet.py --weight 0.001')
|
import sys
import os
import platform
from cx_Freeze import setup, Executable
base = None
if sys.platform == 'win32':
base = 'Win32GUI'
if platform.system() == "Windows":
PYTHON_DIR = os.path.dirname(os.path.abspath(__file__))
os.environ['TCL_LIBRARY'] = "C:\\Users\\Karthikeyan\\AppData\\Local\\Programs\\Python\\Python36-32\\tcl\\tcl8.6"
os.environ['TK_LIBRARY'] = "C:\\Users\\Karthikeyan\\AppData\\Local\\Programs\\Python\\Python36-32\\tcl\\tk8.6"
executables = [
Executable('testing_app.py', targetName='sample.exe', base=base)
]
options = {
'build_exe': {
# Sometimes a little fine-tuning is needed
# exclude all backends except wx
'include_files': ['chromedriver.exe', (os.path.join(PYTHON_DIR, 'DLLs', 'tcl86t.dll'), ''),
(os.path.join(PYTHON_DIR, 'DLLs', 'tk86t.dll'), ''),
(os.path.join(PYTHON_DIR, 'DLLs', 'sqlite3.dll'), '')]
}
}
setup(name='simple_Tkinter',
version='0.1',
description='Sample cx_Freeze Tkinter script',
executables=executables,
options=options
)
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import re
from collections import defaultdict
from pants.backend.jvm.targets.jar_dependency import JarDependency
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.targets.scala_library import ScalaLibrary
from pants.backend.jvm.tasks.nailgun_task import NailgunTask
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.build_graph.address import Address
from twitter.common.dirutil import safe_mkdir
from pants.contrib.spindle.targets.spindle_thrift_library import SpindleThriftLibrary
class SpindleGen(NailgunTask):
@classmethod
def product_types(cls):
return [
'scala',
]
@classmethod
def register_options(cls, register):
super(SpindleGen, cls).register_options(register)
register(
'--runtime-dependency',
default=['3rdparty:spindle-runtime'],
advanced=True,
type=list,
help='A list of targets that all spindle codegen depends on at runtime.',
)
cls.register_jvm_tool(register,
'spindle-codegen',
classpath=[
JarDependency(org='com.foursquare',
name='spindle-codegen-binary_2.10',
rev='3.0.0-M7'),
])
@property
def spindle_classpath(self):
return self.tool_classpath('spindle-codegen')
@property
def synthetic_target_extra_dependencies(self):
return set(
dep_target
for dep_spec in self.get_options().runtime_dependency
for dep_target in self.context.resolve(dep_spec)
)
@property
def namespace_out(self):
return os.path.join(self.workdir, 'src', 'jvm')
def codegen_targets(self):
return self.context.targets(lambda t: isinstance(t, SpindleThriftLibrary))
def sources_generated_by_target(self, target):
return [
os.path.join(self.namespace_out, relative_genned_source)
for thrift_source in target.sources_relative_to_buildroot()
for relative_genned_source in calculate_genfiles(thrift_source)
]
def execute_codegen(self, targets):
sources = self._calculate_sources(targets, lambda t: isinstance(t, SpindleThriftLibrary))
bases = set(
target.target_base
for target in self.context.targets(lambda t: isinstance(t, SpindleThriftLibrary))
)
scalate_workdir = os.path.join(self.workdir, 'scalate_workdir')
safe_mkdir(self.namespace_out)
safe_mkdir(scalate_workdir)
args = [
'--template', 'scala/record.ssp',
'--java_template', 'javagen/record.ssp',
'--thrift_include', ':'.join(bases),
'--namespace_out', self.namespace_out,
'--working_dir', scalate_workdir,
]
args.extend(sources)
result = self.runjava(classpath=self.spindle_classpath,
main='com.foursquare.spindle.codegen.binary.ThriftCodegen',
jvm_options=self.get_options().jvm_options,
args=args,
workunit_name='generate')
if result != 0:
raise TaskError('{} returned {}'.format(self.main_class, result))
def execute(self):
targets = self.codegen_targets()
build_graph = self.context.build_graph
with self.invalidated(targets, invalidate_dependents=True) as invalidation_check:
for vts in invalidation_check.invalid_vts:
invalid_targets = vts.targets
self.execute_codegen(invalid_targets)
invalid_vts_by_target = dict([(vt.target, vt) for vt in invalidation_check.invalid_vts])
vts_artifactfiles_pairs = defaultdict(list)
for target in targets:
java_synthetic_name = '{0}-{1}'.format(target.id, 'java')
java_sources_rel_path = os.path.relpath(self.namespace_out, get_buildroot())
java_synthetic_address = Address(java_sources_rel_path, java_synthetic_name)
java_generated_sources = [
os.path.join(os.path.dirname(source), 'java_{0}.java'.format(os.path.basename(source)))
for source in self.sources_generated_by_target(target)
]
java_relative_generated_sources = [os.path.relpath(src, self.namespace_out)
for src in java_generated_sources]
# We can't use context.add_new_target because it now does fancy management
# of synthetic target / target root interaction that breaks us here.
java_target_base = os.path.join(get_buildroot(), java_synthetic_address.spec_path)
if not os.path.exists(java_target_base):
os.makedirs(java_target_base)
build_graph.inject_synthetic_target(
address=java_synthetic_address,
target_type=JavaLibrary,
dependencies=[dep.address for dep in self.synthetic_target_extra_dependencies],
derived_from=target,
sources=java_relative_generated_sources,
)
java_synthetic_target = build_graph.get_target(java_synthetic_address)
# NOTE(pl): This bypasses the convenience function (Target.inject_dependency) in order
# to improve performance. Note that we can walk the transitive dependee subgraph once
# for transitive invalidation rather than walking a smaller subgraph for every single
# dependency injected. This walk is done below, after the scala synthetic target is
# injected.
for concrete_dependency_address in build_graph.dependencies_of(target.address):
build_graph.inject_dependency(
dependent=java_synthetic_target.address,
dependency=concrete_dependency_address,
)
if target in invalid_vts_by_target:
vts_artifactfiles_pairs[invalid_vts_by_target[target]].extend(java_generated_sources)
synthetic_name = '{0}-{1}'.format(target.id, 'scala')
sources_rel_path = os.path.relpath(self.namespace_out, get_buildroot())
synthetic_address = Address(sources_rel_path, synthetic_name)
generated_sources = [
'{0}.{1}'.format(source, 'scala')
for source in self.sources_generated_by_target(target)
]
relative_generated_sources = [os.path.relpath(src, self.namespace_out)
for src in generated_sources]
synthetic_target = self.context.add_new_target(
address=synthetic_address,
target_type=ScalaLibrary,
dependencies=self.synthetic_target_extra_dependencies,
sources=relative_generated_sources,
derived_from=target,
java_sources=[java_synthetic_target.address.spec],
)
# NOTE(pl): This bypasses the convenience function (Target.inject_dependency) in order
# to improve performance. Note that we can walk the transitive dependee subgraph once
# for transitive invalidation rather than walking a smaller subgraph for every single
# dependency injected. This walk also covers the invalidation for the java synthetic
# target above.
for dependent_address in build_graph.dependents_of(target.address):
build_graph.inject_dependency(dependent=dependent_address,
dependency=synthetic_target.address)
# NOTE(pl): See the above comment. The same note applies.
for concrete_dependency_address in build_graph.dependencies_of(target.address):
build_graph.inject_dependency(
dependent=synthetic_target.address,
dependency=concrete_dependency_address,
)
build_graph.walk_transitive_dependee_graph(
[target.address],
work=lambda t: t.mark_transitive_invalidation_hash_dirty(),
)
if target in self.context.target_roots:
self.context.target_roots.append(synthetic_target)
if target in invalid_vts_by_target:
vts_artifactfiles_pairs[invalid_vts_by_target[target]].extend(generated_sources)
if self.artifact_cache_writes_enabled():
self.update_artifact_cache(vts_artifactfiles_pairs.items())
def _calculate_sources(self, thrift_targets, target_filter):
sources = set()
def collect_sources(target):
if target_filter(target):
sources.update(target.sources_relative_to_buildroot())
for target in thrift_targets:
target.walk(collect_sources)
return sources
# Slightly hacky way to figure out which files get generated from a particular thrift source.
# TODO(benjy): This could be emitted by the codegen tool.
# That would also allow us to easily support 1:many codegen.
NAMESPACE_PARSER = re.compile(r'^\s*namespace\s+([^\s]+)\s+([^\s]+)\s*$')
def calculate_genfiles(source):
abs_source = os.path.join(get_buildroot(), source)
with open(abs_source, 'r') as thrift:
lines = thrift.readlines()
namespaces = {}
for line in lines:
match = NAMESPACE_PARSER.match(line)
if match:
lang = match.group(1)
namespace = match.group(2)
namespaces[lang] = namespace
namespace = namespaces.get('java')
if not namespace:
raise TaskError('No namespace provided in source: {}'.format(abs_source))
return calculate_scala_record_genfiles(namespace, abs_source)
def calculate_scala_record_genfiles(namespace, source):
"""Returns the generated file basenames, add .java or .scala to get the full path."""
basepath = namespace.replace('.', '/')
name = os.path.splitext(os.path.basename(source))[0]
return [os.path.join(basepath, name)]
|
import cv as cv2
stitcher = cv2.createStitcher(True)
foo = cv2.imread("4.jpg")
doo = cv2.imread("5.jpg")
eoo = cv2.imread("6.jpg")
roo = cv2.imread("7.jpg")
result = stitcher.stitch((foo,doo,eoo,roo))
cv2.imshow("camera",result[1])
cv2.waitKey(0) |
from django.shortcuts import render
from enemigos.models import Enemigo
from django.views import generic
from enemigos.forms import EnemigoForm
from django.urls import reverse_lazy
# Create your views here.
class ListarEnemigos(generic.ListView):
model=Enemigo
template_name="enemigos/listar_enemigos.html"
context_object_name="obj"
class InsertarEnemigo(generic.CreateView):
model=Enemigo
template_name="enemigos/insertar_enemigo.html"
context_object_name="obj"
form_class=EnemigoForm
success_url=reverse_lazy("enemigos:enemigos_list")
class EditarEnemigo(generic.UpdateView):
model=Enemigo
template_name="enemigos/editar_enemigo.html"
context_object_name="obj"
form_class=EnemigoForm
success_url=reverse_lazy("enemigos:enemigos_list")
class BorrarEnemigo(generic.DeleteView):
model=Enemigo
template_name="enemigos/borrar_enemigo.html"
context_object_name="obj"
form_class=EnemigoForm
success_url=reverse_lazy("enemigos:enemigos_list")
|
import torch
import numpy
if __name__ == '__main__':
tensor = torch.tensor([
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.]
])
array = numpy.array([
[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]
])
# torch.tensor --> numpy.array
print('----------- torch.tensor --> numpy.array ------------')
tensor2array = tensor.numpy()
print(type(tensor), type(tensor2array))
print(tensor2array)
print('-----------------------------------------------------')
# numpy.array --> torch.tensor
print('----------- numpy.array --> torch.tensor ------------')
array2tensor = torch.from_numpy(array)
print(type(array), type(array2tensor))
print(array2tensor)
print('-----------------------------------------------------')
pass
|
import os
import sys
if (len(sys.argv) != 5):
print("Syntax: python alignment_splitter.py input_alignment input_gene_trees output_dir prefix_path")
sys.exit(1)
input_alignment = sys.argv[1]
input_gene_trees = sys.argv[2]
output_dir = sys.argv[3]
prefix_path = sys.argv[4]
if (os.path.exists(output_dir)):
print("output dir already exists")
sys.exit(1)
os.makedirs(output_dir)
ali_dir = os.path.join(output_dir, "split_alignments")
trees_dir = os.path.join(output_dir, "split_gene_trees")
os.makedirs(ali_dir)
os.makedirs(trees_dir)
trees = open(input_gene_trees).readlines()
with open(input_alignment) as f:
model = f.readline()
i = 0
for line in f:
line = line[:-1]
base = os.path.basename(line).split(".")[0]
with open(os.path.join(ali_dir, base + ".ali"), "w") as writer:
writer.write(model)
writer.write(os.path.join(prefix_path, line))
with open(os.path.join(trees_dir, base + ".newick"), "w") as writer:
writer.write(trees[i])
i += 1
|
import math
import numpy as np
import matplotlib.pyplot as plt
from skimage.io import imread, imsave
#from skimage import data_dir
from skimage.transform import radon, iradon, iradon_sart
from scipy.ndimage import zoom
from sklearn import preprocessing
from ObjectiveFunction import *
import ImageMetrics as IM;
import os.path # For file extension
NoneType = type(None);
def normalise(image):
return (image - image.mean()) / image.std();
class TomographyGlobalFitness(ObjectiveFunction):
def __init__(self, anInputImage, anObjective, aSearchSpaceDimension = 2, aNumberOfAngles=180, aPeakValue = 100, k = -1):
self.loadImageData(anInputImage, aNumberOfAngles, aPeakValue);
# Store the image simulated by the flies
self.population_image_data = np.zeros(self.noisy.shape, self.noisy.dtype)
self.population_sinogram_data = np.zeros(self.projections.shape, self.projections.dtype)
self.fig = None;
ax = None;
self.global_fitness_set = [];
self.global_error_term_set = [];
self.global_regularisation_term_set = [];
self.zncc_set = [];
self.k = k;
self.current_population = None;
self.number_of_calls = 0;
self.save_best_solution = False;
if anObjective == "SAE":
type_of_optimisation = ObjectiveFunction.MINIMISATION
self.image_metrics_function = IM.getSAE;
elif anObjective == "SSE":
type_of_optimisation = ObjectiveFunction.MINIMISATION
self.image_metrics_function = IM.getSSE;
elif anObjective == "MAE":
type_of_optimisation = ObjectiveFunction.MINIMISATION
self.image_metrics_function = IM.getMAE;
elif anObjective == "MSE":
type_of_optimisation = ObjectiveFunction.MINIMISATION
self.image_metrics_function = IM.getMSE;
elif anObjective == "RMSE":
type_of_optimisation = ObjectiveFunction.MINIMISATION
self.image_metrics_function = IM.getRMSE;
elif anObjective == "NRMSE_euclidean":
type_of_optimisation = ObjectiveFunction.MINIMISATION
self.image_metrics_function = IM.getNRMSE_euclidean;
elif anObjective == "NRMSE_mean":
type_of_optimisation = ObjectiveFunction.MINIMISATION
self.image_metrics_function = IM.getNRMSE_mean;
elif anObjective == "NRMSE_minMax":
type_of_optimisation = ObjectiveFunction.MINIMISATION
self.image_metrics_function = IM.getNRMSE_minMax;
elif anObjective == "mean_relative_error":
type_of_optimisation = ObjectiveFunction.MINIMISATION
self.image_metrics_function = IM.getMeanRelativeError;
elif anObjective == "max_relative_error":
type_of_optimisation = ObjectiveFunction.MINIMISATION
self.image_metrics_function = IM.getMaxRelativeError;
elif anObjective == "cosine_similarity":
type_of_optimisation = ObjectiveFunction.MAXIMISATION
self.image_metrics_function = IM.getCosineSimilarity;
elif anObjective == "SSIM":
type_of_optimisation = ObjectiveFunction.MAXIMISATION
self.image_metrics_function = IM.getSSIM;
elif anObjective == "PSNR":
type_of_optimisation = ObjectiveFunction.MAXIMISATION
self.image_metrics_function = IM.getPSNR;
elif anObjective == "NCC" or anObjective == "ZNCC":
type_of_optimisation = ObjectiveFunction.MAXIMISATION
self.image_metrics_function = IM.getNCC;
else:
raise ValueError('Invalid objective function "%s".' % (anObjective));
self.boundaries = [];
for _ in range(aSearchSpaceDimension):
self.boundaries.append([0, max(self.noisy.shape) - 1]);
self.boundaries.append([0, max(self.noisy.shape) - 1]);
super().__init__(2 * aSearchSpaceDimension,
self.boundaries,
self.objectiveFunction,
type_of_optimisation);
self.name = "anObjective";
def objectiveFunction(self, aParameterSet, aSavePopulationFlag = True):
self.number_of_calls += 1;
image_data = np.zeros(self.noisy.shape, self.noisy.dtype)
individual_weight = self.total_weight / (len(aParameterSet) / 2);
for i,j in zip(aParameterSet[0::2], aParameterSet[1::2]):
x = math.floor(i);
y = math.floor(j);
if x >= 0 and y >= 0 and x < image_data.shape[1] and y < image_data.shape[0]:
image_data[y,x] += individual_weight;
sinogram_data = radon(image_data, theta=self.theta, circle=False)
error_term = self.image_metrics_function(self.projections, sinogram_data);
fitness = error_term;
tv_norm = 0.5 * IM.getTV(image_data);
if self.k > 0.0:
regularisation_term = self.k * tv_norm;
fitness += regularisation_term;
if aSavePopulationFlag:
save_data = True;
if len(self.global_fitness_set) > 0 and self.save_best_solution:
if self.flag == ObjectiveFunction.MINIMISATION and self.global_fitness_set[-1] < fitness:
save_data = False;
elif self.flag == ObjectiveFunction.MAXIMISATION and self.global_fitness_set[-1] > fitness:
save_data = False;
if save_data:
self.current_population = copy.deepcopy(aParameterSet);
self.population_image_data = image_data;
self.population_sinogram_data = sinogram_data;
self.global_fitness_set.append(fitness);
self.global_error_term_set.append(error_term);
self.global_regularisation_term_set.append(tv_norm);
self.zncc_set.append(IM.getNCC(self.image, self.population_image_data));
return fitness;
def loadImageData(self, anInputImage, aNumberOfAngles, aPeakValue):
# Load the phantom (considered as unknown)
data_dir = '.';
if os.path.splitext(anInputImage)[1] == ".txt":
image = np.loadtxt(anInputImage);
else:
image = imread(anInputImage, as_gray=True)
# Zoom out
image = zoom(image, 0.5)
# Convert from uint8 to float
self.image = image.astype(np.float)
# Add some noise using the Poisson distribution
if aPeakValue > 0.0:
self.noisy = np.random.poisson(image / 255.0 * aPeakValue) / aPeakValue * 255 # noisy image
# Do not add noise
else:
self.noisy = self.image;
# Compute the Radon transform
self.theta = np.linspace(0., 180., aNumberOfAngles, endpoint=False)
self.projections = radon(self.noisy, theta=self.theta, circle=False)
self.total_weight = np.sum(self.noisy);
# Perform the FBP reconstruction
self.fbp_reconstruction = iradon(self.projections,
theta=self.theta,
filter="hann",
interpolation="cubic",
circle=False)
self.FBP_zncc = IM.getNCC(self.image, self.fbp_reconstruction);
# Perform the SART reconstruction
self.sart_reconstruction = IM.cropCenter(iradon_sart(self.projections,
theta=self.theta, relaxation=0.05), self.image.shape[1], self.image.shape[0]);
self.SART_zncc = IM.getNCC(self.image, self.sart_reconstruction);
def saveInputImages(self, aFilePrefix = ""):
prefix = aFilePrefix;
# Groundtruth
# Save a PNG file
imsave(prefix + '-groundtruth.png', self.image);
# Save an ASCII file
np.savetxt(prefix + '-groundtruth.txt', self.image);
# Noisy
# Save a PNG file
imsave(prefix + '-noisy.png', self.noisy);
# Save an ASCII file
np.savetxt(prefix + '-noisy.txt', self.noisy);
# Sinogram
# Save a PNG file
imsave(prefix + '-sinogram.png', self.projections);
# Save an ASCII file
np.savetxt(prefix + '-sinogram.txt', self.projections);
def plot(self, fig, ax, aGenerationID, aTotalNumberOfGenerations):
window_title = "Generation " + str(aGenerationID) + "/" + str(aTotalNumberOfGenerations) + " - Global fitness: " + str(self.global_fitness_set[-1]);
fig.canvas.set_window_title(window_title)
theta = [];
theta.append(self.theta[0])
if theta[-1] != self.theta[math.floor(len(self.theta) * 0.25)]:
theta.append(self.theta[math.floor(len(self.theta) * 0.25)])
if theta[-1] != self.theta[math.floor(len(self.theta) * 0.5)]:
theta.append(self.theta[math.floor(len(self.theta) * 0.5)])
if theta[-1] != self.theta[math.floor(len(self.theta) * 0.75)]:
theta.append(self.theta[math.floor(len(self.theta) * 0.75)])
#plt.axis([0, 10, 0, 1])
# Create a figure using Matplotlib
# It constains 5 sub-figures
if isinstance(self.fig, NoneType):
self.fig = 1;
# Plot the original image
ax[0, 0].set_title("Original");
ax[0, 0].imshow(self.image, cmap=plt.cm.Greys_r)
# Plot the noisy image
ax[0, 1].set_title("Noisy");
ax[0, 1].imshow(self.noisy, cmap=plt.cm.Greys_r)
# Plot some projections
projections = radon(self.noisy, theta=theta, circle=False)
title = "Projections at\n";
for i in range(len(theta) - 1):
title += str(theta[i]) + ", ";
title += 'and ' + \
str(theta[len(theta) - 1]) + \
" degrees";
ax[1, 0].plot(projections);
ax[1, 0].set_title(title)
ax[1, 0].set_xlabel("Projection axis");
ax[1, 0].set_ylabel("Intensity");
# Plot the sinogram
ax[1, 1].set_title("Radon transform\n(Sinogram)");
ax[1, 1].set_xlabel("Projection axis");
ax[1, 1].set_ylabel("Intensity");
ax[1, 1].imshow(self.projections)
# Plot the FBP reconstruction
ax[2, 0].set_title("FBP reconstruction")
ax[2, 0].imshow(self.fbp_reconstruction, cmap=plt.cm.Greys_r)
# Plot the FBP reconstruction error map
ax[2, 1].set_title("FBP reconstruction error")
ax[2, 1].imshow(self.fbp_reconstruction - self.image, cmap=plt.cm.Greys_r)
# Plot the SART reconstruction
ax[3, 0].set_title("SART reconstruction")
ax[3, 0].imshow(self.sart_reconstruction, cmap=plt.cm.Greys_r)
# Plot the SART reconstruction error map
ax[3, 1].set_title("SART reconstruction error")
ax[3, 1].imshow(self.sart_reconstruction - self.image, cmap=plt.cm.Greys_r)
# Plot some projections
ax[4, 0].set_title(title)
ax[4, 0].set_xlabel("Projection axis");
ax[4, 0].set_ylabel("Intensity");
# Plot the sinogram
ax[4, 1].set_title("Radon transform\n(Sinogram)");
ax[4, 1].set_xlabel("Projection axis");
ax[4, 1].set_ylabel("Intensity");
# Plot the Evolutionary reconstruction
ax[5, 0].set_title("Evolutionary reconstruction")
# Plot the Evolutionary reconstruction error map
ax[5, 1].set_title("Evolutionary reconstruction error")
# Plot the global fitness
ax[6, 0].set_title("Global fitness")
ax[6, 1].set_title("Reconstruction ZNCC")
ax[6, 1].legend(loc='lower right')
plt.subplots_adjust(hspace=0.4, wspace=0.5)
projections = radon(self.population_image_data, theta=theta, circle=False)
ax[4, 0].clear();
ax[4, 0].plot(projections);
# Plot the sinogram
ax[4, 1].imshow(self.population_sinogram_data)
# Plot the Evolutionary reconstruction
ax[5, 0].imshow(self.population_image_data, cmap=plt.cm.Greys_r)
# Plot the Evolutionary reconstruction error map
ax[5, 1].imshow(self.population_image_data - self.image, cmap=plt.cm.Greys_r)
ax[6, 0].clear();
ax[6, 0].plot(self.global_fitness_set);
ax[6, 1].clear();
ax[6, 1].plot(np.full(len(self.zncc_set), self.FBP_zncc), label="FBP");
ax[6, 1].plot(np.full(len(self.zncc_set), self.SART_zncc), label="SART");
ax[6, 1].plot(self.zncc_set, label="FA");
|
from django.conf import settings
def site_info(request):
return settings.SITE_INFO |
import sqlite3
import os
import sys
import traceback
import bcolor
import page
def main(argv):
'''
The main loop of the program.
Error codes:
0: Success
1: invalid command line argument
'''
db = getDBFrom(argv)
conn, curr = initConnAndCurrFrom(db)
try:
os.system('clear')
run = True
while run:
page.printFirstScreen()
opt = page.getValidInput('Enter a command: ', ['si', 'su', 'q'])
if opt == 'si':
uid = page.signIn(curr)
os.system('clear')
if uid != None:
page.mainMenu(conn, curr, uid)
elif opt == 'su':
page.signUp(conn, curr)
else:
run = False
sys.exit(0)
except SystemExit as e:
if int(str(e)) > 0:
print(traceback.format_exc())
except Exception as e:
print(traceback.format_exc())
finally:
print("\nClosing connection...")
conn.commit()
conn.close()
def initConnAndCurrFrom(db):
'''
Return a connection and cursor from the given database.
'''
dir_path = os.path.abspath(os.path.dirname(__file__)) + os.sep
db_path = dir_path + db
conn = sqlite3.connect(db_path)
conn.row_factory = sqlite3.Row
curr = conn.cursor()
return conn, curr
def getDBFrom(argv):
'''
Return the db file name from sys.argv.
Assumes the db file exists in the same file.
'''
if len(argv) != 2:
print(bcolor.errmsg("Usage: python3 main.py [file]"))
sys.exit(1)
return argv[1]
if __name__ == "__main__":
main(sys.argv)
|
import numpy as np
from numba import njit, u8
from numba.experimental import jitclass
import game.bitboard as bitop
@njit
def bits_to_array(bits):
arr = np.empty((3, 8, 8), dtype=u8)
arr[0] = bitop.unpack(bits[0])
arr[1] = bitop.unpack(bits[1])
arr[2] = bitop.unpack(bits[2])
return arr
@njit
def array_to_bits(arr):
bits = np.empty((3,), dtype=u8)
bits[0] = bitop.pack(arr[0])
bits[1] = bitop.pack(arr[1])
bits[2] = bitop.pack(arr[2])
return bits
@jitclass([('bits', u8[:])])
class Othello:
def __init__(self, my, opp, obs):
self.bits = np.array([my, opp, obs], dtype=u8)
@property
def array(self):
return bits_to_array(self.bits)
@staticmethod
def from_array(arr):
my, opp, obs = array_to_bits(arr)
return Othello(my, opp, obs)
def terminated(self):
my, opp, obs = self.bits
i_cant_move = (bitop.generate_moves(my, opp, obs) == 0)
opp_cant_move = (bitop.generate_moves(opp, my, obs) == 0)
return i_cant_move and opp_cant_move
def my_moves(self):
my, opp, obs = self.bits
my_moves = bitop.generate_moves(my, opp, obs)
return bitop.unpack(my_moves)
def make_move(self, row, col):
my, opp, obs = self.bits
index = row * 8 + col
my, opp = bitop.resolve_move(my, opp, index)
return Othello(opp, my, obs)
def make_move_pass(self):
my, opp, obs = self.bits
return Othello(opp, my, obs)
def to_string(self):
arr = self.array
res = ''
for x in range(8):
for y in range(8):
if arr[0][x][y]:
res += '.'
elif arr[1][x][y]:
res += 'x'
elif arr[2][x][y]:
res += '#'
else:
res += ' '
res += '\n'
return res
__repr__ = _str__ = to_string
|
#!/usr/bin/python
from lxml import etree
import sys, os
confs = []
journals = []
def prepare (cfile, jfile):
f = open (cfile, 'r')
for line in f.readlines():
confs.append(line.split ('\t')[0])
f.close ()
f = open (jfile, 'r')
for line in f.readlines():
journals.append(line.split ('\t')[0])
f.close ()
def parse (inputfile, outputfile, years = ['2006', '2007', '2008', '2009', '2010', '2011', '2012']):
f = open (inputfile, 'r')
o = open (outputfile, 'w')
context = etree.iterparse (f, dtd_validation=True, events = ("end", ))
count = 0
total = 0
# write a header for output xml
o.write ("""<?xml version="1.0" encoding="ISO-8859-1"?>
<!DOCTYPE dblp SYSTEM "dblp.dtd">""")
for event, elem in context:
tag = elem.tag
# if this node represent academic paper
if tag in ['article', 'inproceedings', 'proceedings']:
key = elem.get ('key')
year = elem.xpath ('year/text()')[0]
# if this paper appears in our desired top conference or journals
if (key.startswith ('journals') and (key.split ('/')[1] in journals)) or \
(key.startswith ('conf') and (key.split ('/')[1] in confs)):
# if this paper is relatively new
if year in years:
o.write (etree.tostring (elem).replace (' xmlns:="dblp"', ''))
count = count + 1
print count
elem.clear ()
while elem.getprevious () is not None:
del elem.getparent ()[0]
o.write ("</dblp>")
del context
o.close ()
f.close ()
print total
if __name__ == '__main__':
prepare (sys.argv[1], sys.argv[2])
parse (sys.argv[3], sys.argv[4])
|
#!/usr/bin/env python
# coding: utf-8
# # import packages and get authenticated
# In[1]:
# from google.colab import driveA
# drive.mount('drive')
# In[2]:
import numpy as np
import pandas as pd
import scipy
from scipy.fftpack import fft, ifft
pd.set_option('display.max_columns', 500)
# Plotting
# checklist 1: comment inline, uncomment Agg
# get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
matplotlib.rc( 'savefig', facecolor = 'white' )
# from tqdm import tqdm_notebook as tqdm
from tqdm import tqdm
import argparse
import os
import sys
sys.path.append('/content/drive/My Drive/ไธญ็ ้ข/repo/')
sys.path.append('~/project_FDDAT/repo/')
sys.path.append('../') # add this line so Data and data are visible in this file
from falldetect.utilities import *
import time
import datetime
from datetime import datetime
from sklearn.decomposition import PCA
from os.path import expanduser
home = expanduser("~")
# home_dir = home+'/project_FDDAT/'
# split_mode = 'LOO'
# split_mode = '5fold'
# # Get user inputs
# In ipython notebook, these are hardcoded. In production python code, use parsers to provide these inputs
# In[3]:
parser = argparse.ArgumentParser(description='FD_DAT')
parser.add_argument('--dataset_name', metavar='dataset_name', help='dataset_name',
default='UMAFall')
parser.add_argument('--sensor_loc', metavar='sensor_loc', help='sensor_loc',
default='ankle')
parser.add_argument('--input_dir', metavar='input_dir', help='path to input_dir',
default='../')
parser.add_argument('--output_dir', metavar='output_dir', help='path to output_dir',
default='../')
parser.add_argument('--split_mode', metavar='split_mode', help='split_mode',
default='5fold')
parser.add_argument('--i_seed', metavar='i_seed', help='seed number',
default='0')
parser.add_argument('--rep_n', metavar='rep_n', help='number of repetition',
default='1')
parser.add_argument('--standardization', metavar='standardization', help='method of standardization',
default='None')
parser.add_argument('--excluded_idx', metavar='excluded_idx',
default='none')
# split_mode = 'LOO'
# split_mode = '5fold'
# checklist 2: comment first line, uncomment second line seizures_FN
# args = parser.parse_args(['--input_dir', '../../Data/{}/ImpactWindow_Resample_NormalforAllAxes/18hz/{}/',
# '--output_dir', '../../data_mic/stage1_preprocessed_NormalforAllAxes_18hz_{}/{}/{}/',
# '--dataset_name', 'UMAFall',
# '--sensor_loc', 'wrist',
# '--split_mode', '5fold',
# '--i_seed', '1',
# '--excluded_idx', '1 3 9 10 12 19',])
# # UPFall
# args = parser.parse_args(['--input_dir', '../../Data/{}/ImpactWindow_Resample_NormalforAllAxes/18hz/{}/',
# '--output_dir', '../../data_mic/stage1_preprocessed_NormalforAllAxes_18hz_{}/{}/{}/',
# '--dataset_name', 'UPFall',
# '--sensor_loc', 'ankle',
# '--split_mode', '5fold',
# '--i_seed', '1'])
# # # SFDLA
# args = parser.parse_args(['--input_dir', '../../Data/{}/ImpactWindow_Resample_NormalforAllAxes/18hz/{}/',
# '--output_dir', '../../data_mic/stage1_preprocessed_NormalforAllAxes_18hz_{}/{}/{}/',
# args = parser.parse_args(['--input_dir', '../../Data/{}/ImpactWindow_Resample_WithoutNormal/18hz/{}/{}/',
# '--output_dir', '../../data_mic/stage1/preprocessed_WithoutNormal_18hz_{}_aug/{}/{}/',
# '--dataset_name', 'UPFall',
# '--sensor_loc', 'belt',
# '--split_mode', '5fold',
# # '--i_seed', '1 2 3 4 5 6 7 8 9 10',
# '--i_seed', '1',
# '--rep_n', '10',
# '--standardization', 'None'])
args = parser.parse_args()
# In[4]:
input_dir = args.input_dir
output_dir = args.output_dir
dataset_name = args.dataset_name
sensor_loc = args.sensor_loc
home_dir = home+'/project_FDDAT/'
split_mode = args.split_mode
i_seed = int(args.i_seed)
rep_n = int(args.rep_n)
# i_seed_list = [int(i_seed) for i_seed in args.i_seed.split(' ')]
standardization = args.standardization
if args.excluded_idx == 'none':
excluded_idx = []
else:
excluded_idx = list(map(int, args.excluded_idx.split(' ')))
sampling_freq = 18.4
print(args)
outputdir = output_dir.format(split_mode, dataset_name, sensor_loc)
if not os.path.exists(outputdir):
os.makedirs(outputdir)
print('will export data to', outputdir)
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# # load data_management (all) first
# In[ ]:
# In[5]:
# act_names = df['Activity_ID'].unique()
# act_embeddings = { act_names[i] : i for i in range(0, act_names.shape[0] ) }
# print(act_embeddings)
# In[6]:
def pull_data(dataset_name, impact_inputdir, DataNameList_inputdir):
if dataset_name=='UMAFall' or dataset_name=='UPFall' or dataset_name=='FARSEEING':
column_x_DataName = 'x_DataName'
elif dataset_name=='SFDLA':
column_x_DataName = 'x_x_DataName'
# DataNameList_inputdir = input_dir+'IP_{}_DataNameList_{}.csv'
# DataNameList_inputdir = DataNameList_inputdir.format(dataset_name, sensor_loc, dataset_name, sensor_loc)
DataNameList_inputdir = DataNameList_inputdir.format(dataset_name, aug_dict[i_aug], sensor_loc, dataset_name, sensor_loc)
df = pd.read_csv(DataNameList_inputdir)
temp = pd.read_csv(impact_inputdir+df[column_x_DataName][0], header=None)
window_length = temp.shape[0]
samples_n = df.shape[0]
data_all = np.zeros((window_length,3,samples_n))
actlabels_all = np.zeros((samples_n,))
sub_all = np.zeros((samples_n,))
DataNameList_idx_all = np.zeros((samples_n,))
aug_idx_all = np.zeros((samples_n,))
i = 0
# for filename in tqdm(df[column_x_DataName]):
for filename in df[column_x_DataName]:
# sub_id = int(filename.split('_')[0])
row = df[df[column_x_DataName]==filename]
sub_id = row.Subject.item()
activity_id = row.FALL_1__ADL_0_.item()
idx = row.index[0]
position = filename.split('_')[3][:-4]
df_imp = pd.read_csv(impact_inputdir+filename, header=None)
data_all[:,:,i] = df_imp.to_numpy()
actlabels_all[i] = activity_id
sub_all[i] = sub_id
DataNameList_idx_all[i] = idx
aug_idx_all[i] = i_aug
i += 1
return data_all, actlabels_all, sub_all, DataNameList_idx_all, aug_idx_all
# In[7]:
aug_dict = {
0: '1_2.5',
1: '1.5_2',
2: '2_1.5',
3: '2.5_1'
}
data_all_list = []
actlabels_all_list = []
sub_all_list = []
DataNameList_idx_all_list = []
aug_idx_all_list = []
for i_aug in aug_dict.keys():
DataNameList_inputdir = input_dir+'IP_{}_DataNameList_{}.csv'
# DataNameList_inputdir = DataNameList_inputdir.format(dataset_name, sensor_loc, dataset_name, sensor_loc)
DataNameList_inputdir = DataNameList_inputdir.format(dataset_name, aug_dict[i_aug], sensor_loc, dataset_name, sensor_loc)
# df = pd.read_csv(DataNameList_inputdir)
impact_inputdir = input_dir.format(dataset_name, aug_dict[i_aug], sensor_loc)
data_all,actlabels_all,sub_all,DataNameList_idx_all,aug_idx_all = pull_data(dataset_name, impact_inputdir, DataNameList_inputdir)
data_all_list.append(data_all)
actlabels_all_list.append(actlabels_all)
sub_all_list.append(sub_all)
DataNameList_idx_all_list.append(DataNameList_idx_all)
aug_idx_all_list.append(aug_idx_all)
data_all = np.concatenate(data_all_list, axis=2)
actlabels_all = np.concatenate(actlabels_all_list)
sub_all = np.concatenate(sub_all_list)
DataNameList_idx_all = np.concatenate(DataNameList_idx_all_list)
aug_idx_all = np.concatenate(aug_idx_all_list)
# In[ ]:
# In[8]:
# # aug_names = ['1_2.5','1.5_2','2_1.5','2.5_1']
# aug_dict = {
# 0: '1_2.5',
# 1: '1.5_2',
# 2: '2_1.5',
# 3: '2.5_1'
# }
# # resampled, 18.4hz
# DataNameList_inputdir = input_dir+'IP_{}_DataNameList_{}.csv'
# # DataNameList_inputdir = DataNameList_inputdir.format(dataset_name, sensor_loc, dataset_name, sensor_loc)
# DataNameList_inputdir = DataNameList_inputdir.format(dataset_name, aug_dict[0], sensor_loc, dataset_name, sensor_loc)
# impact_inputdir = input_dir.format(dataset_name, aug_dict[0], sensor_loc)
# outputdir = output_dir.format(split_mode, dataset_name, sensor_loc)
# if not os.path.exists(outputdir):
# os.makedirs(outputdir)
# print('will export data to', outputdir)
# df = pd.read_csv(DataNameList_inputdir)
# df.head(5)
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[9]:
fall_n = (actlabels_all==1).sum()
adl_n = (actlabels_all==0).sum()
print('fall_n, adl_n:', fall_n, adl_n)
# In[ ]:
# In[ ]:
# In[10]:
if standardization == '0 mean unit var':
data_all = (data_all - data_all.mean()) / data_all.std()
elif standardization == 'None':
pass
# In[11]:
samples_n = data_all.shape[2]
labels_n = np.shape(np.unique(actlabels_all))[0]
subjects_n = np.shape(np.unique(sub_all))[0]
print('finished reading data in data_management {} at {}'.format(dataset_name, sensor_loc))
print('Dimension of data', data_all.shape)
print('number of activities', labels_n)
print('number of subject', subjects_n)
# In[12]:
print('3 axes mean', np.mean(data_all,axis=(0,2)))
print('3 axes std', np.std(data_all,axis=(0,2)))
print('3 axes max', np.max(data_all,axis=(0,2)))
print('3 axes min', np.min(data_all,axis=(0,2)))
# In[13]:
figure=plt.figure(figsize=(5, 5), dpi= 80, facecolor='w', edgecolor='k')
ax = figure.add_subplot(1, 1, 1)
for axis in range(data_all.shape[1]):
ax.hist(data_all[:,axis,:].reshape(-1), 200, alpha=0.5, label='axis{}'.format(axis))
ax.legend( fontsize = 15 )
ax.set_ylabel('count', fontsize = 15)
ax.set_xlabel('raw value (a.u.)', fontsize = 15)
ax.set_title('raw value distribution', fontsize = 20)
ax.set_xlim([np.min(data_all),np.max(data_all)])
# ax.set_xlim([0.3,0.4])
figure.savefig(outputdir + 'raw_distribution.png')
# In[ ]:
# # plot FT distribution
# In[14]:
# initialize spectral feature matrix
N = data_all.shape[0]
data_all_FT = np.zeros((N//2, 3, data_all.shape[2]))
for i_win in range(data_all_FT.shape[2]):
for i_axis in range(data_all_FT.shape[1]): # don't do it for HR
yf = np.abs(scipy.fftpack.fft(data_all[:,i_axis,i_win]))
yf_scaled = 2.0/N * np.abs(yf[:N//2])
data_all_FT[:,i_axis,i_win] = yf_scaled
# In[ ]:
# In[15]:
# get indices for each class
indices_ADL = np.where(actlabels_all==0)[0]
data_FT_ADL = data_all_FT[:,:,indices_ADL]
indices_Fall = np.where(actlabels_all==1)[0]
data_FT_Fall = data_all_FT[:,:,indices_Fall]
# In[ ]:
# In[16]:
T = 1/sampling_freq
N = data_all.shape[0]
def plot_FT_distribution(data_FT_ADL, data_FT_Fall, visual_resultsdir):
link_adl_fall = True
i_start = 1
# grab data
xf = np.linspace(0.0, 1.0/(2.0*T), int(N/2))[i_start:]
mag_mean_Fall = np.mean(data_FT_Fall[i_start:], axis=(1,2))
mag_var_Fall = np.var(data_FT_Fall[i_start:], axis=(1,2))
mag_mean_ADL = np.mean(data_FT_ADL[i_start:], axis=(1,2))
mag_var_ADL = np.var(data_FT_ADL[i_start:], axis=(1,2))
# plt.bar(y_pos, performance, align='center', alpha=0.5)
figure=plt.figure(figsize=(5, 5), dpi= 80, facecolor='w', edgecolor='k')
ax = figure.add_subplot(1, 1, 1)
ax.plot(xf, mag_mean_Fall, color = 'red', label='Fall')
ax.fill_between(xf, mag_mean_Fall+mag_var_Fall, mag_mean_Fall-mag_var_Fall, alpha=0.3, color = 'red')
ax.plot(xf, mag_mean_ADL, label='ADL')
ax.fill_between(xf, mag_mean_ADL+mag_var_ADL, mag_mean_ADL-mag_var_ADL, alpha=0.3)
ax.legend( fontsize = 15 )
ax.set_ylabel('mag (a.u.)', fontsize = 15)
ax.set_xlabel('freq (Hz)', fontsize = 15)
ax.set_title('spectal engergy distribution', fontsize = 20)
figure.savefig(visual_resultsdir + 'FT_distribution.png')
# In[17]:
plot_FT_distribution(data_FT_Fall, data_FT_ADL, outputdir)
# In[ ]:
# In[ ]:
# In[18]:
plt.cla()
# In[19]:
# i_seed
# In[20]:
rand_idx = np.arange(data_all.shape[2])
np.random.seed(i_seed)
np.random.shuffle(rand_idx)
t_data = np.asarray(range(data_all.shape[0]))/sampling_freq
for idx in range(20):
i = rand_idx[idx]
plt.plot(t_data, data_all[:,0,i], label='x', alpha=0.8)
plt.plot(t_data, data_all[:,1,i], label='y', alpha=0.8)
plt.plot(t_data, data_all[:,2,i], label='z', alpha=0.8)
plt.ylabel('acc value (a.u.)')
plt.xlabel('time (sec)')
plt.legend(loc='upper right')
if actlabels_all[i] == 1:
plt.title('sample {} subject {}, act {} -Fall-'.format(int(i), int(sub_all[i]), int(actlabels_all[i])))
else:
plt.title('sample {} subject {}, act {} -ADL-'.format(int(i), int(sub_all[i]), int(actlabels_all[i])))
plt.savefig(outputdir+'i{}_sample'.format(idx))
plt.show()
plt.cla()
# In[ ]:
# In[21]:
unique_label_id, labels_counts = np.unique(actlabels_all, return_counts=True)
unique_label_id = unique_label_id.astype(int)
y_pos = np.arange(unique_label_id.shape[0])
plt.bar(y_pos, labels_counts, align='center', alpha=0.5)
plt.xticks(y_pos, unique_label_id)
# plt.hist(actlabels_all, bins=np.arange(labels_n+1)-0.5, alpha=0.5, histtype='bar', ec='black')
# plt.xticks(range(labels_n))
plt.xlabel('activity label')
plt.ylabel('sample N')
plt.title('activity histogram for {} at {}'.format(dataset_name, sensor_loc))
plt.savefig(outputdir+'act_hist')
plt.show()
plt.cla()
# In[22]:
unique_sub_id, id_counts = np.unique(sub_all, return_counts=True)
unique_sub_id = unique_sub_id.astype(int)
y_pos = np.arange(unique_sub_id.shape[0])
plt.bar(y_pos, id_counts, align='center', alpha=0.5)
plt.xticks(y_pos, unique_sub_id)
plt.xlabel('subject i')
plt.ylabel('sample N')
plt.title('subject histogram for {} at {}'.format(dataset_name, sensor_loc))
plt.savefig(outputdir+'sub_hist')
plt.show()
plt.cla()
# In[ ]:
# In[ ]:
# # split data into train and val (1:1)
# split by sample_id
# In[23]:
i_sub_unique_all = np.unique(sub_all)
i_sub_excluded = []
if len(excluded_idx) == 0:
for i_sub in i_sub_unique_all:
idx_sub = np.where(sub_all==i_sub)[0]
idx_sub_fall = np.where(actlabels_all[idx_sub]==1)[0]
if len(idx_sub_fall)==0:
i_sub_excluded.append(int(i_sub))
else:
i_sub_excluded = excluded_idx
print('i_sub {} has no fall data, will exclude'.format(i_sub_excluded))
i_sub_unique = np.array(list(set(i_sub_unique_all) - set(i_sub_excluded)))
print(i_sub_unique_all)
print(i_sub_excluded)
print(i_sub_unique)
if split_mode == 'LOO':
CV_n = np.shape(i_sub_unique)[0]
elif split_mode == '5fold':
CV_n = int(split_mode.split('fold')[0])
print('will split data into {} folds'.format(CV_n))
# In[ ]:
# In[24]:
# perform train_val_split
def train_val_splitter_v2(features_all, labels_all, sub_all, DataNameList_idx_all, aug_idx_all,
i_sub_unique_train, i_sub_unique_val, outputdir):
data_val = np.zeros((features_all.shape[0],features_all.shape[1],0))
data_train = np.zeros((features_all.shape[0],features_all.shape[1],0))
labels_val = np.zeros((0,))
labels_train = np.zeros((0,))
i_sub_val = np.zeros((0,))
i_sub_train = np.zeros((0,))
DataNameList_idx_val = np.zeros((0,))
DataNameList_idx_train = np.zeros((0,))
aug_idx_val = np.zeros((0,))
aug_idx_train = np.zeros((0,))
for i_sub in i_sub_unique_train:
indices_train = np.where(sub_all == i_sub)[0]
data_train = np.concatenate((data_train, features_all[:,:,indices_train]), axis=2)
labels_train = np.concatenate((labels_train, labels_all[indices_train,]), axis=0)
i_sub_train = np.concatenate((i_sub_train, sub_all[indices_train]), axis=0)
DataNameList_idx_train = np.concatenate((DataNameList_idx_train, DataNameList_idx_all[indices_train]), axis=0)
aug_idx_train = np.concatenate((aug_idx_train, aug_idx_all[indices_train]), axis=0)
for i_sub in i_sub_unique_val:
# indices_val = np.where(sub_all == i_sub)[0]
indices_val = np.where((sub_all == i_sub) & (aug_idx_all == 2))[0]
data_val = np.concatenate((data_val, features_all[:,:,indices_val]), axis=2)
labels_val = np.concatenate((labels_val, labels_all[indices_val,]), axis=0)
i_sub_val = np.concatenate((i_sub_val, sub_all[indices_val]), axis=0)
DataNameList_idx_val = np.concatenate((DataNameList_idx_val, DataNameList_idx_all[indices_val]), axis=0)
aug_idx_val = np.concatenate((aug_idx_val, aug_idx_all[indices_val]), axis=0)
print('train dimensions:', data_train.shape, labels_train.shape, i_sub_train.shape, DataNameList_idx_train.shape, aug_idx_train.shape)
print('val dimensions:', data_val.shape, labels_val.shape, i_sub_val.shape, DataNameList_idx_val.shape, aug_idx_val.shape)
outputdir_train = os.path.join(outputdir, 'train')
if not os.path.exists(outputdir_train):
os.makedirs(outputdir_train)
print('outputdir for train:', outputdir_train)
outputdir_val = os.path.join(outputdir, 'val')
if not os.path.exists(outputdir_val):
os.makedirs(outputdir_val)
print('outputdir for val:', outputdir_val)
data_saver(data_train, 'data', outputdir_train)
data_saver(labels_train, 'labels', outputdir_train)
data_saver(i_sub_train, 'i_sub', outputdir_train)
data_saver(DataNameList_idx_train, 'DataNameList_idx', outputdir_train)
data_saver(aug_idx_train, 'aug_idx', outputdir_train)
data_saver(data_val, 'data', outputdir_val)
data_saver(labels_val, 'labels', outputdir_val)
data_saver(i_sub_val, 'i_sub', outputdir_val)
data_saver(DataNameList_idx_val, 'DataNameList_idx', outputdir_val)
data_saver(aug_idx_val, 'aug_idx', outputdir_val)
act_all_set = set(labels_train).union(set(labels_val))
print('All activity ID:', act_all_set)
if len(set(act_all_set.difference(set(labels_train))))!=0 or len(set(act_all_set.difference(set(labels_val))))!=0:
print('********* Warning *********')
print("Missing activity in labels_train:", (act_all_set.difference(set(labels_train))))
print("Missing activity in labels_val:", (act_all_set.difference(set(labels_val))))
print('***************************')
return data_train, data_val, labels_train, labels_val, i_sub_train, i_sub_val, DataNameList_idx_train, DataNameList_idx_val, aug_idx_train, aug_idx_val
# In[25]:
from sklearn.model_selection import KFold
kfold = CV_n
kf = KFold(n_splits=kfold, shuffle=False)
for i_rep in range(rep_n):
i_sub_unique = np.array(list(set(i_sub_unique_all) - set(i_sub_excluded)))
print('all i_sub_unique', i_sub_unique)
np.random.seed(i_seed+i_rep)
np.random.shuffle(i_sub_unique)
kf.get_n_splits(i_sub_unique)
print(kf)
for i_CV, (train_idx, val_idx) in enumerate(kf.split(i_sub_unique)):
print('----------------Splitting for rep {}, CV {}----------------'.format(i_rep, i_CV))
print("Sub ID | TRAIN:", i_sub_unique[train_idx], "VAL:", i_sub_unique[val_idx])
print('index CV', CV_n*i_rep+i_CV)
train_val_splitter_v2(data_all, actlabels_all, sub_all, DataNameList_idx_all, aug_idx_all,
i_sub_unique[train_idx], i_sub_unique[val_idx], outputdir+'rep{}/CV{}'.format(i_rep,i_CV))
# for i_rep, i_seed in enumerate(i_seed_list):
# i_sub_unique = np.array(list(set(i_sub_unique_all) - set(i_sub_excluded)))
# print('all i_sub_unique', i_sub_unique)
# np.random.seed(i_seed)
# np.random.shuffle(i_sub_unique)
# kf.get_n_splits(i_sub_unique)
# print(kf)
# i_CV = 0
# for train_idx, val_idx in kf.split(i_sub_unique):
# print('----------------Splitting for rep {}, CV {}----------------'.format(i_rep, i_CV))
# # print("Sub ID | TRAIN:", i_sub_unique[train_index], "VAL:", i_sub_unique[val_index])
# print("Sub ID | TRAIN:", i_sub_unique[train_idx], "VAL:", i_sub_unique[val_idx])
# if len(i_seed_list) > 1:
# train_val_splitter(data_all, actlabels_all, sub_all, DataNameList_idx_all,
# i_sub_unique[train_idx], i_sub_unique[val_idx], outputdir+'CV{}_{}'.format(i_CV, i_rep))
# else:
# train_val_splitter(data_all, actlabels_all, sub_all, DataNameList_idx_all,
# i_sub_unique[train_idx], i_sub_unique[val_idx], outputdir+'CV{}'.format(i_CV))
# i_CV = i_CV + 1
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# # Split based on CV results then
# In[ ]:
# In[26]:
# i_CV = 0
# for train_idx, val_idx in kf.split(i_sub_unique):
# print("Sub ID | TRAIN:", i_sub_unique[train_idx], "VAL:", i_sub_unique[val_idx])
# train_val_splitter(data_all, actlabels_all, sub_all, DataNameList_idx_all,
# i_sub_unique[train_idx], i_sub_unique[val_idx], outputdir+'CV'+str(i_CV))
# i_CV = i_CV + 1
# In[ ]:
# In[ ]:
|
# Average Numbers
# Python program for calculation of average numbers in file
# Anatoli Penev
# 27.12.1017
def main():
numbers_file = 'numbers.txt'
total, count = calc_total_and_count_numbers(numbers_file)
print("Average of numbers in {} is {}.".format(numbers_file, total / count))
def calc_total_and_count_numbers(filename):
try:
total = 0
numbers_count = 0
numbers_file = open(filename, 'r')
for line in numbers_file:
total += int(line)
numbers_count += 1
except IOError as e:
print("Input/Output error while handling {}: {}".format(filename, e))
except ValueError as e:
print("Error converting line to integer: {}".format(e))
finally:
numbers_file.close()
return total, numbers_count
main()
|
import datetime
def emeklilikHesapla(dogumTarihi):
tarihler = datetime.datetime.now()
suAnkiYil = tarihler.year
yas = suAnkiYil - dogumTarihi
emeklilikYasi = 65
kalanYil = emeklilikYasi - yas
return kalanYil + suAnkiYil
print(emeklilikHesapla(1984)) |
"""Functions interacting with github"""
from collections import defaultdict, namedtuple
import json
import re
import logging
from dateutil.parser import parse
from requests.exceptions import HTTPError
from client_wrapper import ClientWrapper
from constants import NO_PR_BUILD
from markdown import parse_linked_issues
log = logging.getLogger(__name__)
PullRequest = namedtuple("PullRequest", ["number", "title", "body", "updatedAt", "org", "repo", "url"])
Issue = namedtuple("Issue", ["number", "title", "status", "org", "repo", "updatedAt", "url"])
KARMA_QUERY = """
query {
organization(login:"mitodl") {
repositories(first: 20, orderBy: {
field: PUSHED_AT,
direction: DESC
}) {
nodes {
name
pullRequests(first: 100, states: [MERGED], orderBy: {
field: UPDATED_AT
direction: DESC,
}) {
nodes {
updatedAt
mergedAt
assignees(first: 3) {
nodes {
login
name
}
}
}
}
}
}
}
}
"""
NEEDS_REVIEW_QUERY = """
query {
organization(login:"mitodl") {
repositories(first: 20, orderBy: {
field: PUSHED_AT,
direction: DESC
}) {
nodes {
name
pullRequests(first: 100, states: [OPEN], orderBy: {
field: UPDATED_AT
direction: DESC,
}) {
nodes {
title
url
labels(first: 3) {
nodes {
name
}
}
assignees(first: 1) {
nodes {
login
}
}
}
}
}
}
}
}
"""
def make_pull_requests_query(*, org, repo, cursor):
"""
Construct a GraphQL query getting the text of the last 100 most recently updated pull requests
Args:
org (str): The github org
repo (str): The github repo
cursor (str or None): If set, the cursor to start from for pagination
"""
cursor_param = f", after: \"{cursor}\"" if cursor is not None else ""
return f"""
query {{
organization(login: "{org}") {{
repository(name: "{repo}") {{
pullRequests(first: 100{cursor_param}, states: [MERGED], orderBy: {{
field: UPDATED_AT
direction: DESC,
}}) {{
edges {{
cursor
node {{
number
body
updatedAt
url
title
}}
}}
}}
}}
}}
}}
"""
async def run_query(*, github_access_token, query):
"""
Run a query using Github graphql API
Args:
github_access_token (str): A github access token
query (str): A graphql query to run
Returns:
dict: The results of the query
"""
endpoint = "https://api.github.com/graphql"
query = json.dumps({"query": query})
client = ClientWrapper()
resp = await client.post(endpoint, data=query, headers={
"Authorization": "Bearer {}".format(github_access_token)
})
resp.raise_for_status()
return resp.json()
def github_auth_headers(github_access_token):
"""
Create headers for authenticating requests against github
Args:
github_access_token (str): A github access token
Returns:
dict:
Headers for authenticating a request
"""
return {
"Authorization": "Bearer {}".format(github_access_token),
"Accept": "application/vnd.github.v3+json",
}
async def create_pr(*, github_access_token, repo_url, title, body, head, base): # pylint: disable=too-many-arguments
"""
Create a pull request
Args:
github_access_token (str): A github access token
repo_url (str): The URL of the repository to create the PR in
title (str): The title of the PR
body (str): The body of the PR
head (str): The head branch for the PR
base (str): The base branch for the PR
"""
org, repo = get_org_and_repo(repo_url)
endpoint = "https://api.github.com/repos/{org}/{repo}/pulls".format(
org=org,
repo=repo,
)
client = ClientWrapper()
resp = await client.post(
endpoint,
headers=github_auth_headers(github_access_token),
data=json.dumps({
'title': title,
'body': body,
'head': head,
'base': base,
})
)
resp.raise_for_status()
async def fetch_pull_requests_since_date(*, github_access_token, org, repo, since):
"""
Look up PRs between now and a given datetime
Args:
github_access_token (str): The github access token
org (str): A github organization
repo (str): A github repo
since (date): The earliest date to request PRs
Yields:
PullRequest: Information about each pull request fetched
"""
cursor = None
while True:
# This should hopefully not be an infinite loop because the cursor will be updated and the loop should
# terminate once a pull request is out of the date range given.
result = await run_query(
github_access_token=github_access_token,
query=make_pull_requests_query(
org=org,
repo=repo,
cursor=cursor,
)
)
edges = result['data']['organization']['repository']['pullRequests']['edges']
if not edges:
return
cursor = edges[-1]['cursor']
for edge in edges:
node = edge['node']
pr_number = node['number']
url = node['url']
pr_date = parse(node['updatedAt']).date()
if pr_date < since:
return
title = node['title']
if title.startswith("Release "):
continue
yield PullRequest(
number=pr_number,
title=title,
updatedAt=pr_date,
body=node['body'],
org=org,
repo=repo,
url=url,
)
async def fetch_issues_for_pull_requests(*, github_access_token, pull_requests):
"""
Look up issues linked with the given pull requests
Args:
github_access_token (str): A github access token
pull_requests (async_iterable of PullRequest):
A iterable of PullRequest which contains issue numbers to be parsed in the body
Yields:
(PullRequest, list of (Issue, ParsedIssue))
"""
issue_lookup = {}
async for pull_request in pull_requests:
parsed_issues = parse_linked_issues(pull_request)
for parsed_issue in parsed_issues:
if parsed_issue.issue_number not in issue_lookup:
try:
issue = await get_issue(
github_access_token=github_access_token,
org=parsed_issue.org,
repo=parsed_issue.repo,
issue_number=parsed_issue.issue_number,
)
if issue is None:
continue
issue_lookup[parsed_issue.issue_number] = issue
except HTTPError:
log.warning(
"Unable to find issue %d for %s/%s",
parsed_issue.issue_number,
parsed_issue.org,
parsed_issue.repo,
)
yield pull_request, [
(issue_lookup.get(parsed_issue.issue_number), parsed_issue) for parsed_issue in parsed_issues
]
def make_issue_release_notes(prs_and_issues):
"""
Create release notes for PRs and linked issues
Args:
prs_and_issues (iterable of PullRequest, list of (Issue, ParsedIssue)):
The PRs and issues to use to make release notes
Returns:
str:
Release notes for the issues closed during the time
"""
issue_to_prs = {}
for pr, issue_list in prs_and_issues:
for issue, parsed_issue in issue_list:
if not issue or issue.status != "closed":
continue
if issue.number not in issue_to_prs:
issue_to_prs[issue.number] = (issue, [])
issue_to_prs[issue.number][1].append(
(pr, parsed_issue)
)
if not issue_to_prs:
return "No new issues closed by PR"
return "\n".join(
f"- {issue.title} (<{issue.url}|#{issue_number}>)" for issue_number, (issue, _) in
sorted(issue_to_prs.items(), key=lambda tup: tup[0])
)
async def get_issue(*, github_access_token, org, repo, issue_number):
"""
Look up information about an issue
Args:
github_access_token (str): The github access token
org (str): An organization
repo (str): A repository
issue_number (int): The github issue number
Returns:
Issue: Information about the issue
"""
endpoint = f"https://api.github.com/repos/{org}/{repo}/issues/{issue_number}"
client = ClientWrapper()
response = await client.get(endpoint, headers=github_auth_headers(github_access_token))
response.raise_for_status()
response_json = response.json()
if 'pull_request' in response_json:
return
return Issue(
title=response_json['title'],
number=response_json['number'],
org=org,
repo=repo,
status=response_json['state'],
updatedAt=parse(response_json['updated_at']),
url=response_json['html_url'],
)
async def get_pull_request(*, github_access_token, org, repo, branch):
"""
Look up the pull request for a branch
Args:
github_access_token (str): The github access token
org (str): The github organization (eg mitodl)
repo (str): The github repository (eg micromasters)
branch (str): The name of the associated branch
Returns:
dict: The information about the pull request
"""
endpoint = "https://api.github.com/repos/{org}/{repo}/pulls".format(
org=org,
repo=repo,
)
client = ClientWrapper()
response = await client.get(
endpoint,
headers=github_auth_headers(github_access_token),
)
response.raise_for_status()
pulls = response.json()
pulls = [pull for pull in pulls if pull['head']['ref'] == branch]
if not pulls:
return None
elif len(pulls) > 1:
# Shouldn't happen since we look up by branch
raise Exception("More than one pull request for the branch {}".format(branch))
return pulls[0]
async def calculate_karma(*, github_access_token, begin_date, end_date):
"""
Calculate number of merged pull requests by assigned reviewer
Args:
github_access_token (str): A Github access token
begin_date (datetime.date): Start date for the range to look in
end_date (datetime.date): The end date for the range to look in
Returns:
list of tuple: (assignee, karma count) sorted from most karma to least
"""
data = await run_query(github_access_token=github_access_token, query=KARMA_QUERY)
karma = defaultdict(lambda: 0)
for repository in data['data']['organization']['repositories']['nodes']:
# Keep track if any dates fall outside the range. If none do and we're at the max limit for number of PRs,
# we need to paginate (but instead we'll just raise an exception for now).
some_dates_out_of_range = False
for pull_request in repository['pullRequests']['nodes']:
updated_at = parse(pull_request['updatedAt']).date()
merged_at = parse(pull_request['mergedAt']).date()
if begin_date <= updated_at <= end_date:
if begin_date <= merged_at <= end_date:
# A pull request could get updated after it was merged. We don't have a good way
# to filter this out via API so just ignore them here
for assignee in pull_request['assignees']['nodes']:
karma[assignee['name']] += 1
elif updated_at < begin_date:
some_dates_out_of_range = True
if len(repository['pullRequests']['nodes']) == 100 and not some_dates_out_of_range:
# This means there are at least 100 pull requests within that time range for that value.
# We will probably not get more than 100 merged pull requests in a single sprint, but raise
# an exception if we do.
raise Exception(
"Response contains more PRs than can be handled at once"
" for {repo}, {begin_date} to {end_date}.".format(
repo=repository['name'],
begin_date=begin_date,
end_date=end_date,
)
)
karma_list = sorted(karma.items(), key=lambda tup: tup[1], reverse=True)
return karma_list
async def needs_review(github_access_token):
"""
Calculate which PRs need review
Args:
github_access_token (str): A Github access token
Returns:
list of tuple: A list of (repo name, pr title, pr url) for PRs that need review and are unassigned
"""
data = await run_query(
github_access_token=github_access_token,
query=NEEDS_REVIEW_QUERY,
)
prs_needing_review = []
# Query will show all open PRs, we need to filter on assignee and label
for repository in data['data']['organization']['repositories']['nodes']:
for pull_request in repository['pullRequests']['nodes']:
has_needs_review = False
# Check for needs review label
for label in pull_request['labels']['nodes']:
if label['name'].lower() == 'needs review':
has_needs_review = True
break
if not has_needs_review:
continue
# Check for no assignee
if not pull_request['assignees']['nodes']:
prs_needing_review.append(
(repository['name'], pull_request['title'], pull_request['url'])
)
return prs_needing_review
def get_org_and_repo(repo_url):
"""
Get the org and repo from a git repository cloned from github.
Args:
repo_url (str): The repository URL
Returns:
tuple: (org, repo)
"""
org, repo = re.match(r'^.*github\.com[:|/](.+)/(.+)\.git', repo_url).groups()
return org, repo
async def get_status_of_pr(*, github_access_token, org, repo, branch):
"""
Get the status of the PR for a given branch
Args:
github_access_token (str): The github access token
org (str): The github organization (eg mitodl)
repo (str): The github repository (eg micromasters)
branch (str): The name of the associated branch
Returns:
str: The status of the PR. If any status is failed this is failed,
if any is pending this is pending. Else it's good.
"""
endpoint = "https://api.github.com/repos/{org}/{repo}/commits/{ref}/statuses".format(
org=org,
repo=repo,
ref=branch,
)
client = ClientWrapper()
resp = await client.get(
endpoint,
headers=github_auth_headers(github_access_token),
)
if resp.status_code == 404:
statuses = []
else:
resp.raise_for_status()
statuses = resp.json()
# Only look at PR builds
statuses = [status for status in statuses if status['context'] == 'continuous-integration/travis-ci/pr']
if len(statuses) == 0:
# This may be due to the PR not being available yet
return NO_PR_BUILD
return statuses[0]['state']
|
import pymysql
import config
class MysqlHepler:
def __init__(self):
config.devPassward=input("่ฏท่พๅ
ฅๅผๅ็ฏๅขๆฐๆฎๅบๅฏ็ :")
self.db = pymysql.connect(config.devUrl, config.devUserName, config.devPassward, config.database_name,
charset="utf8")
self.cursor = self.db.cursor()
# ๆฅ่ฏข่กจๆดไธช่กจ
def get_table_data(self, table_name):
sql = "select * from " + table_name
table_data_list = self.cursor.fetchall()
return table_data_list
# ๆ นๆฎsql่ฏญๅฅๆฅ่ฏขไธไธช
def get_one_by_sql(self, sql):
self.cursor.execute(sql)
one = self.cursor.fetchone()
return one
# ๆ นๆฎsqlๆฅ่ฏขๆๆ
def get_all_by_sql(self, sql):
self.cursor.execute(sql)
all = self.cursor.fetchall()
return all
#
# ๆดๆฐ ไพๅฆ: update_sql = "update bs_car_series set en_name = '%s' where id = %d" % (en_name, id)
def update_db(self, sql):
print(sql)
try:
self.cursor.execute(sql)
self.db.commit()
print("ๆๅ")
except:
print("ๅคฑ่ดฅ")
self.db.rollback()
# ่ทๅๆฐๆฎๅบไธญๆๆ็่กจ
def getTables(self):
tableList = [] # ๅ่กจList
sql = "show tables"
self.cursor.execute(sql)
tls = self.cursor._rows
for item in tls:
tableList.append(item[0]) # item ๆฏๅ
็ฅ tuple ้้ขๅชๅ
ๅซไธไธชๅ
็ด ๅณ่กจๅ๏ผๅ็ฌฌไธไธช
return tableList
# ๆฅ่ฏขๆฐๆฎๅบไธญๆๆ่กจ็ๅญๆฎตๅ ่ฟๅๅญๅ
ธ
def getAllTableColumnName(self):
tableComlumsDic = {} # ๅญๅ
ธ๏ผ{'่กจๅ็งฐ':[..ๅญๆฎตๅ่กจ...]}
tableList = self.getTables() # ๆฅ่ฏขๆๆ่กจๅ็งฐ,่ฟๅๅ่กจList
for item in tableList:
columnList = []
sql = "select * from " + item
self.cursor.execute(sql)
columnsDescrip = self.cursor.description # (('id', 3, None, 11, 11, 0, False), ('slug', 253, None, 128, 128, 0, True), ('name', 253, None, 512, 512, 0, True), ('name_en', 253, None, 512, 512, 0, True))
for colName in columnsDescrip:
columnList.append(colName[0])
tableComlumsDic[item] = columnList
return tableComlumsDic
|
import JSONHelper
import os
if __name__ == '__main__':
# training set
# train_set_json = "/cluster/project/infk/courses/3d_vision_21/group_14/1_data/Scan2CAD-training-data/trainset_pn.json"
# train_set_json_filtered = "/cluster/project/infk/courses/3d_vision_21/group_14/Scan2CAD-training-data/trainset_pn_filtered.json"
# validation set
# train_set_json = "/cluster/project/infk/courses/3d_vision_21/group_14/1_data/Scan2CAD-training-data/validationset_pn.json"
# train_set_json_filtered = "/cluster/project/infk/courses/3d_vision_21/group_14/1_data/Scan2CAD-training-data/validationset_pn_filtered.json"
# visualization set
train_set_json = "/cluster/project/infk/courses/3d_vision_21/group_14/1_data/Scan2CAD-training-data/visualizationset.json"
train_set_json_filtered = "/cluster/project/infk/courses/3d_vision_21/group_14/1_data/Scan2CAD-training-data/visualizationset_filtered.json"
valid_sample_count = 0
invalid_sample_count = 0
count =0
json_data= []
for r in JSONHelper.read(train_set_json):
count +=1
if count%100 ==0:
print(valid_sample_count, ":", invalid_sample_count)
# load data
filename_center = r["filename_vox_center"]
filename_heatmap = r["filename_vox_heatmap"]
if os.path.isfile(filename_heatmap) and os.path.isfile(filename_center):
json_data.append(r)
valid_sample_count+=1
else:
invalid_sample_count+=1
print("Valid sample percentage:", 100.0*valid_sample_count/count, " %")
JSONHelper.write(train_set_json_filtered, json_data)
print("Training json-file (needed from conv) saved in:", train_set_json_filtered)
|
import requests
from bs4 import BeautifulSoup
import re
keywords = {
"php",
"mysql",
"database",
"restapi",
"restfullapi",
"encryption",
"pentest",
"hacking",
}
for keyword in keywords:
for page in range(1, 10):
pages = requests.get("https://www.google.com/search?q=" + keyword + "&start=" + str(page * 10))
soup = BeautifulSoup(pages.content)
links = soup.findAll("a")
for link in soup.find_all("a",href=re.compile("(?<=/url\?q=)(htt.*://.*)")):
print(re.split(":(?=http)",link["href"].replace("/url?q=","")))
with open("result.txt", "a") as f:
f.write(link["href"].replace("/url?q=","") + "\n") |
import sys
from node import *
#input params
input_list = [-1, 1, 3, 7, 11, 9, 2, 3, 5];
delete_value = 3;
#make a linked list
if (len(input_list) <= 2):
print("ERROR: list has nothing but the head and the tail.");
head = node(input_list[0]);
current_ref = head;
for index in range(1, len(input_list)):
current_ref.add_next(node(input_list[index]));
current_ref = current_ref.next;
head.print_list();
#NOTE: whichever node we delete CANNOT be the first of the last node
prev_ref = None;
current_ref = head;
for index in range(len(input_list)):
if (0 < index < len(input_list)-1):
if (current_ref.value == delete_value):
prev_ref.next = current_ref.next;
current_ref = prev_ref; #this leaves no reference to the deleted node
break; #we break after having removed a single node
#increment node
prev_ref = current_ref;
current_ref = current_ref.next;
head.print_list(); |
def operation(input1,input2):
result = input1 + input2
print(str(result))
operation(2,3)
#parameter by position
def printer(name, last):
print(name + " " + last)
#parameter by keywords
printer(name="Shayan",last="Khanani")
printer(last="Khanani", name = "Shayan")
def salary_calc(salary, tax = 0.013):
print(str(salary*tax))
salary_calc(30000,0.01)
#default parameter of tax = 0.013
salary_calc(40000)
#positional and keyword arguments
def greet(greeting, name):
print(greeting + ", " + name)
#greet(greeting="Good Morning", "Shayan")
greet("Hello", name = "Shayan")
|
import sys, getopt
from subprocess import Popen, PIPE
import sys
import socket
import threading
def showHelp():
print("This will display the command options")
print("-h, --help . . . . print this message")
print("-p, --port <port> . . . . . . . port")
def program(data):
bind_ip = "0.0.0.0"
bind_port = data
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((bind_ip,bind_port))
server.listen(5)
print "[*] Listening on %s:%d" % (bind_ip,bind_port)
while True:
try:
client,addr = server.accept()
print "[*] Accepted connection from: %s:%d" % (addr[0],addr[1])
# spin up our client thread to handle incoming data
client_handler = threading.Thread(target=handle_client,args=(client,))
client_handler.start()
except KeyboardInterrupt:
print "Session aborted."
sys.exit(0)
# this is our client-handling thread
def handle_client(client_socket):
# print out what the client sends
request = client_socket.recv(1024)
data = str(request)
processData(data, client_socket)
# send back a packet
client_socket.send("\n\nClose.")
client_socket.close()
def processData(d, client_socket):
print "[*] Input receieved: %s" % (d)
d = d.split()
msg = [""]
p = Popen(d, stdout=PIPE, bufsize=1)
with p.stdout:
for line in iter(p.stdout.readline, b''):
msg.append(line)
p.wait() # wait for the subprocess to exit
for i in msg:
client_socket.send(i)
def main(argv):
tmp_data = 0
try:
opts, args = getopt.getopt(argv,"hp:",["help", "port="])
except getopt.GetoptError:
showHelp()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
showHelp()
sys.exit()
elif opt in ("-p", "--port"):
tmp_data = int(arg)
else:
showHelp()
sys.exit()
program(tmp_data)
if __name__ == "__main__":
main(sys.argv[1:])
|
import tkinter as tk
opera = True
num1 = 0
num2 = 0
op = ["+", "-", "*", "/"]
ops = 0
class Button():
def __init__(self, kind, txt, display = None):
self.kind = kind
self.txt = txt
self.display = display
self.btnFrame = tk.Frame(self.kind, bd = 2, padx = 2, pady = 2, bg = "#57A773")
self.btn = tk.Button(self.btnFrame, text = self.txt, font = "Helvetica 20", bg = "#08B2E3", height = 1, width = 3)
if self.txt=="AC" or self.txt=="=":
self.btn["height"] = 3
if self.txt=="AC" or self.txt=="DEL":
self.btn["bg"] = "#EE6352"
global op
if self.txt in op:
self.btn["bg"] = "#767991"
self.btn.pack(fill = tk.BOTH, expand = True)
def oper(self):
global opera, num1, op, ops
if ops!=0:
self.eq()
try:
num1 = float(self.display.get())
except ValueError:
if self.display.get()=="":
num1 = 0
self.display.insert(tk.END, num1)
else:
self.display.delete(0, tk.END)
self.display.insert(tk.END, "SYNTAX ERROR")
opera = True
for i in range(len(op)):
if self.txt==op[i]:
ops = i + 1
break
def eq(self):
global ops, num1, num2, opera
try:
num2 = float(self.display.get())
self.display.delete(0, tk.END)
if ops==0:
self.display.insert(tk.END, self.wholer(num2))
elif ops==1:
self.display.insert(tk.END, self.wholer(num1+num2))
elif ops==2:
self.display.insert(tk.END, self.wholer(num1-num2))
elif ops==3:
self.display.insert(tk.END, self.wholer(num1*num2))
elif ops==4:
try:
self.display.insert(tk.END, num1/num2)
except ZeroDivisionError:
self.display.insert(tk.END, "MATH ERROR")
except ValueError:
if self.display.get()=="":
num2 = 0
self.display.insert(tk.END, num2)
else:
self.display.delete(0, tk.END)
self.display.insert(tk.END, "SYNTAX ERROR")
opera = True
num1 = num2
ops = 0
def clearer(self):
global num1, num2, opera
self.display.delete(0, tk.END)
num1 = 0
num2 = 0
self.display.insert(tk.END, 0)
opera = True
def deleter(self):
global num2, opera
self.display.delete(len(self.display.get())-1, tk.END)
if self.display.get()=="":
num2 = 0
self.display.insert(tk.END, num2)
opera = True
def click(self):
global opera
if opera:
self.display.delete(0, tk.END)
self.display.insert(tk.END, self.txt)
opera = False
def wholer(self, num):
if num%1==0:
return int(num)
else:
return num
class Calculator():
def __init__(self):
self.window = tk.Tk()
self.window.title("Calculator")
self.add_display()
self.keysFrame = tk.Frame(self.window, bg = "#484D6D")
self.keysFrame.pack(fill = tk.BOTH, side = tk.LEFT, expand = True)
for i in range(5):
for j in range(4):
tk.Grid.rowconfigure(self.keysFrame, j, weight = 1)
tk.Grid.columnconfigure(self.keysFrame, i, weight = 1)
self.add_digitsButtons()
self.add_operationButtons()
def add_display(self):
self.dispFrame = tk.Frame(self.window, bd = 10, bg = "#484D6D")
self.dispFrame.pack(fill = tk.BOTH, expand = True)
self.disp = tk.Entry(self.dispFrame, font = "Helvetica 30", width = 10, justify = "right")
self.disp.pack(fill = tk.BOTH, expand = True)
self.disp.insert(tk.END, 0)
def add_digitsButtons(self):
self.digitButtons = [Button(self.keysFrame, 0, self.disp)]
self.digitButtons[0].btnFrame.grid(row = 3, column = 0, sticky = tk.N+tk.S+tk.E+tk.W)
self.digitButtons[0].btn["command"] = self.digitButtons[0].click
for i in range(3):
for j in range(3):
self.digitButtons.append(Button(self.keysFrame, 1+j+3*i, self.disp))
self.digitButtons[1+j+3*i].btnFrame.grid(row = 2-i, column = j, sticky = tk.N+tk.S+tk.E+tk.W)
self.digitButtons[1+j+3*i].btn["command"] = self.digitButtons[1+j+3*i].click
self.digitButtons.append(Button(self.keysFrame, ".", self.disp))
self.digitButtons[-1].btnFrame.grid(row = 3, column = 1, sticky = tk.N+tk.S+tk.E+tk.W)
self.digitButtons[-1].btn["command"] = self.digitButtons[-1].click
def add_operationButtons(self):
global op
self.oper = []
for i in range(len(op)):
self.oper.append(Button(self.keysFrame, op[i], self.disp))
self.oper[i].btnFrame.grid(row = i, column = 3, sticky = tk.N+tk.S+tk.E+tk.W)
self.oper[i].btn["command"] = self.oper[i].oper
self.delete = Button(self.keysFrame, "DEL", self.disp)
self.delete.btnFrame.grid(row = 3, column = 2, sticky = tk.N+tk.S+tk.E+tk.W)
self.delete.btn["command"] = self.delete.deleter
self.clear = Button(self.keysFrame, "AC", self.disp)
self.clear.btnFrame.grid(row = 0, column = 4, rowspan = 2, sticky = tk.N+tk.S+tk.E+tk.W)
self.clear.btn["command"] = self.clear.clearer
self.equ = Button(self.keysFrame, "=", self.disp)
self.equ.btnFrame.grid(row = 2, column = 4, rowspan = 2, sticky = tk.N+tk.S+tk.E+tk.W)
self.equ.btn["command"] = self.equ.eq
calc = Calculator()
calc.window.mainloop() |
import os
import json
import prepare_circuit as pc
import layout as la
import visualise_layout as vla
import layer_map as lll
import patches_state as ps
import cube_to_physical as qre
import operationcollection as opc
import cirqinterface as ci
def write_json(object_to_store):
with open('layout.json', 'w') as outfile:
json.dump(object_to_store, outfile)
def process_multi_body_format(commands):
"""
Entry point is a multibody measurement format
:param commands: list of commands. Accepted are INIT, NEED A, MZZ, MXX, MZ, MX, S, H
:return:
"""
def main():
''''
Required for SKC compiler
Not always necessary if the generated circuits are Clifford+T, for example
'''
# if not os.path.exists("stars"):
# os.makedirs("stars")
print("OpenSurgery (version Santa Barbara)\n")
interface = ci.CirqInterface()
cirq_circuit = interface.random_circuit(nr_qubits=10, nr_gates=10)
# cirq_circuit = interface.openfermion_circuit()
prep = pc.PrepareCircuit()
gate_list = prep.parse_to_my_string_format(cirq_circuit)
# A compaction of the SK decomposition would be good. Too many gates are output.
# This will start an instance of the SKC decomposer
gate_list = prep.decompose_arbitrary_rotations(gate_list)
# print(gate_list)
# take the gates to M?? commands
commands = prep.replace_gates_with_multibody(gate_list)
print(len(commands))
print(commands)
return
# load from file
# commands = prep.load_multibody_format()
# tests begin
# commands = ['INIT 10', 'NEED A', 'S 2', 'MXX 2 3', 'H 2', 'H 3', 'MXX 2 3', 'MZZ A 3']
# commands = ['INIT 4', 'NEED A', 'MZZ A 0', 'MX A' , 'S ANCILLA', 'MXX ANCILLA 0', 'H 3', 'S 3', 'NEED A', 'MZZ A 3', 'MX A', 'S ANCILLA', 'MXX ANCILLA 3', 'S 3', 'H 3', 'H 3', 'S 3', 'NEED A', 'MZZ A 0 3 1 2', 'MX A', 'S ANCILLA', 'MXX ANCILLA 0 3 1 2', 'S 3', 'H 3', 'H 2', 'S 2', 'H 1', 'NEED A', 'MZZ A 2 1', 'MX A', 'S ANCILLA', 'MXX ANCILLA 2 1', 'S 2', 'H 2', 'H 1', 'H 0', 'S 0', 'H 3', 'S 3', 'MZZ 0 1 2 3', 'H 0', 'H 1', 'MZZ 0 1', 'H 0', 'H 3', 'MZZ 0 3']
# tests end
#
# The STORAGE of QUBIT STATES
#
patches_state = ps.PatchesState()
#
# The LAYER MAP
#
layer_map = lll.LayerMap()
# this is the layout, which needs to be first initialised
lay = None
# determine the hardcoded time depth of a distillation and add some delay
height_of_distillation = int(layer_map.distillation_t_length * 1.5)
# worst case: each command is a distillation
nr_commands = len(commands) * height_of_distillation
# there is a MAX the current version can handle
if nr_commands >= 10000:
nr_commands = 10000
if not commands[0].startswith("INIT"):
# first line should always be INIT
print("ERROR: No INIT command for the layer map")
return
# limit the maximum commands to nr_commands, because otherwise memory explodes
for command in commands[0:nr_commands]:
# print(command)
# each command should add a new time step?
command_splits = command.split(" ")
if ("ANCILLA" in command_splits) and (not patches_state.is_patch_active("ANCILLA")):
patches_state.add_active_patch("ANCILLA")
if command_splits[0] == "INIT":
# pass patches_state to be filled by the method
# with the names of the qubits that will be tracked
layer_map.setup_arrangement_one(int(command_splits[1]), patches_state)
# initialise the cubic layout
lay = la.CubeLayout(layer_map, nr_commands)
# for debugging purposes place some cubes to see if the layout is correct
lay.debug_layer_map()
elif command_splits[0] == "NEED":
# add on time axis
# lay.extend_data_qubits_to_current_time()
sets = lay.create_distillation()
lay.configure_operation(*sets)
# simples solution for the moment
# without doing any optimisation is:
# - each time a distillation is needed, a box is placed
# - all the following gates are delayed until the distillation has finished
# Get the 2D coordinates of the active patches
filtered_active_patches = filter_active_patches(lay, patches_state, filter_out=[])
lay.move_current_time_coordinate_to_max_from_coordinates(sets, patches_state, filtered_active_patches)
# the distilled A state is available
patches_state.add_active_patch("A")
elif command_splits[0] == "MZZ":
# and this is the route
touch_sides = (["Z"] * (len(command_splits) - 1))
qubit_list = command_splits[1:]
sets = lay.create_route_between_qubits(qubit_list, patches_state, touch_sides)
lay.configure_operation(*sets)
filtered_active_patches = filter_active_patches(lay, patches_state, filter_out=qubit_list)
lay.move_current_time_coordinate_to_max_from_coordinates(sets, patches_state, filtered_active_patches)
elif command_splits[0] == "MXX":
# for the moment no difference between MXX and MZZ
touch_sides = (["X"] * (len(command_splits) - 1))
qubit_list = command_splits[1:3]
sets = lay.create_route_between_qubits(qubit_list, patches_state, touch_sides)
lay.configure_operation(*sets)
filtered_active_patches = filter_active_patches(lay, patches_state, filter_out=qubit_list)
lay.move_current_time_coordinate_to_max_from_coordinates(sets, patches_state, filtered_active_patches)
elif (command_splits[0] == "S") or (command_splits[0] == "V"):
# I will tread S and V the same
# for the moment not mark them with different colours
# we need four patches in this method
# two are ancilla, two are data
# one of the data qubits (Q2) is moved on to an ancilla A3
# A1 A2 A3
# Q1 Q2
# --------
# A1 A2 Q2
# Q1 A3
# --------
# QS QS Q2
# QS QS
# --------
# A1 A2 Q2
# QS A3
# --------
# A1 A2 A3
# QS Q2
# add on time axis
# lay.increase_current_time_coordinate()
coordinates_all_active_patches = filter_active_patches(lay, patches_state, filter_out=[])
sets = lay.create_s_gate(command_splits[1], patches_state, coordinates_all_active_patches)
#
#
#
# the following are time-depth zero operations
# which, for the moment, are not explicitly drawn
elif command_splits[0] == "MX":
continue
elif command_splits[0] == "MZ":
continue
elif command_splits[0] == "H":
# this adds a decorator to the patch
# if the cell does not exist, the decorator cannot be added
# coordinates of the data qubit
qubit_string = lay.layer_map.get_circuit_qubit_name(command_splits[1])
qub1_coord = lay.layer_map.get_qubit_coordinate_2d(qubit_string)
span_set = [(*qub1_coord, lay.current_time_coordinate)]
sets = (opc.OperationTypes.HADAMARD_QUBIT, span_set, [], [])
lay.configure_operation(*sets)
coordinates_all_active_patches = filter_active_patches(lay, patches_state, filter_out=command_splits[1:])
lay.move_current_time_coordinate_to_max_from_coordinates(sets, patches_state, coordinates_all_active_patches)
#
# If this is a measurement that consumed the A state
# then the state will not be available any more
#
if ("A" in command_splits) and command_splits[0].startswith("M"):
patches_state.remove_active_patch("A")
if ("ANCILLA" in command_splits) and command_splits[0].startswith("M"):
patches_state.remove_active_patch("ANCILLA")
# Visual Debug the layer map layout
# lay.debug_layer_map()
# Visual Debug the paths computed between ancilla patches
# lay.debug_all_paths()
"""
Estimate the resources
"""
max_log_qubits = len(layer_map.get_potential_data_patches_coordinates_2d())
max_log_qubits += len(layer_map.get_potential_ancilla_patches_coordinates_2d())
# TODO: This is not really correct, because I need to send as parameter the depth of the geometry
# TODO: Correct
total_t_gates = commands.count("NEED A")
res_values = qre.compute_physical_resources(total_t_gates, max_log_qubits)
print("Resource estimation (qubits, time): ", res_values)
# """
# Write the layout to disk - for visualisation purposes
# """
# v_layout = vla.VisualiseLayout()
# json_result = v_layout.visualise_cube(lay, remove_noop=True)
# write_json(json_result)
def filter_active_patches(lay, patches_state, filter_out=[]):
names_strings = [lay.layer_map.get_circuit_qubit_name(x) for x in filter_out]
# Get the 2D coordinates of the active patches
filtered_active_patches = []
for key in patches_state.get_all_active_patches():
if not (key in names_strings):
# coordinates_all_active_patches.append(lay.layer_map.get_qubit_coordinate_2d(key))
filtered_active_patches.append(key)
# coordinates_all_active_patches = lay.patch_names_to_coordinates(filtered_active_patches)
# return coordinates_all_active_patches
return filtered_active_patches
if __name__ == "__main__":
# try:
# herr_interface.herr_write_file_1()
# except:
# pass
main()
|
from graphgallery import functional as gf
TensorFlow = gf.Registry("TensorFlow-Attacker")
PyTorch = gf.Registry("PyTorch-Attacker")
Common = gf.Registry("Common-Attacker")
MAPPING = {"tensorflow": TensorFlow,
"pytorch": PyTorch}
|
import sys
import shutil
import socket
from PySide import QtCore, QtGui
from zeroconf import ServiceBrowser, Zeroconf
from gierzwaluw.server import FileServer
from gierzwaluw.client import Client
class GUIListener(QtCore.QObject):
files = QtCore.Signal(object)
def __init__(self):
QtCore.QObject.__init__(self)
self.services = {}
@QtCore.Slot()
def check(self):
files = []
for name, addr in self.services.items():
c = Client(addr)
filename = c.poll()
if filename:
files.append((addr, filename))
self.files.emit(files)
def remove_service(self, zeroconf, type, name):
self.services.pop(name)
def add_service(self, zeroconf, type, name):
info = zeroconf.get_service_info(type, name)
if info:
addr = "http://%s:%d" % (socket.inet_ntoa(info.address), info.port)
self.services[name] = addr
class ListenerThread(QtCore.QThread):
def __init__(self):
QtCore.QThread.__init__(self)
self.listener = GUIListener()
self.listener.moveToThread(self)
def run(self):
timer = QtCore.QTimer()
timer.timeout.connect(self.listener.check)
timer.start(1000*60)
self.exec_()
class ServerThread(QtCore.QThread):
def __init__(self):
QtCore.QThread.__init__(self)
self.server = FileServer()
def run(self):
self.server.start()
class Peer(QtGui.QListWidgetItem):
def __init__(self, filename, addr):
QtGui.QListWidgetItem.__init__(self, filename)
self.addr = addr
class PeerWindow(QtGui.QApplication):
opened = QtCore.Signal(object)
def __init__(self):
QtGui.QApplication.__init__(self, sys.argv)
self.icon = QtGui.QSystemTrayIcon(QtGui.QIcon('static/swallow.png'), self)
self.window = QtGui.QMainWindow(flags=QtCore.Qt.Popup)
frame = QtGui.QFrame(self.window)
self.window.setCentralWidget(frame)
layout = QtGui.QVBoxLayout(frame)
self.peers = QtGui.QListWidget(self.window)
layout.addWidget(self.peers)
self.progress = QtGui.QProgressBar(self.window)
layout.addWidget(self.progress)
self.share = QtGui.QPushButton("Share...", self.window)
layout.addWidget(self.share)
quit = QtGui.QPushButton("Quit", self.window)
quit.clicked.connect(self.quit)
layout.addWidget(quit)
@QtCore.Slot(str)
def open_file(self):
(filename, _) = QtGui.QFileDialog.getOpenFileName()
self.opened.emit(filename)
@QtCore.Slot(object)
def save_file(self, upload):
(filename, _) = QtGui.QFileDialog.getSaveFileName()
if filename:
upload.save(filename)
@QtCore.Slot(object)
def toggle(self, files):
if self.window.isVisible():
self.window.hide()
else:
self.window.show()
height = self.window.geometry().height()
width = self.window.geometry().width()
icon = self.icon.geometry().center()
desktop = self.desktop().availableGeometry()
center = desktop.center();
margin = 50
if icon.x() > center.x() and icon.y() > center.y(): # bottom right icon
self.window.move(desktop.right() - margin - width, desktop.bottom() - margin - height)
elif icon.x() > center.x() and icon.y() < center.y(): # top right icon
self.window.move(desktop.right() - margin - width, margin)
elif icon.x() < center.x() and icon.y() < center.y(): # top left icon
self.window.move(margin, margin)
elif icon.x() < center.x() and icon.y() > center.y(): # bottom left icon
self.window.move(margin, desktop.bottom() - margin - height)
@QtCore.Slot(object)
def set_peers(self, files):
self.peers.clear()
for addr, filename in files:
self.peers.addItem(Peer(filename, addr))
@QtCore.Slot(object)
def callback(self, item):
(filename, _) = QtGui.QFileDialog.getSaveFileName()
c = Client(item.addr)
c.progress.connect(self.progress.setValue)
c.save(filename)
if __name__ == '__main__':
app = PeerWindow()
app.setQuitOnLastWindowClosed(False)
st = ServerThread()
st.start()
lt = ListenerThread()
lt.start()
app.icon.activated.connect(lt.listener.check)
lt.listener.files.connect(app.set_peers)
app.icon.activated.connect(app.toggle)
app.opened.connect(st.server.set_download)
app.peers.itemClicked.connect(app.callback)
app.share.clicked.connect(app.open_file)
st.server.uploaded.connect(app.save_file)
zeroconf = Zeroconf()
ServiceBrowser(zeroconf, "_http._tcp.local.", lt.listener)
app.icon.show()
app.exec_()
|
# source: https://github.com/BlistBotList/blist-wrapper/blob/bc0c0fe9afbea39993ccfa8b6d633c2b5be634c8/blist/errors.py
class AlexFlipnoteException(Exception):
pass
class BadRequest(AlexFlipnoteException):
pass
class NotFound(AlexFlipnoteException):
pass
class InternalServerError(AlexFlipnoteException):
pass
class HTTPException(AlexFlipnoteException):
def __init__(self, response, message):
self.response = response
self.status = response.status
self.message = message
|
import warnings,os,sys
warnings.simplefilter('ignore')
import random,time,copy
from deap import base
from deap import creator
from deap import tools
from itertools import repeat
from Network import CNN_Network
from Network import CNN_Descriptor
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
import torchvision
from torchvision import transforms, datasets
import copy
import cv2
import numpy as np
from tqdm import tqdm
import json as json
from sklearn.preprocessing import MinMaxScaler
# === BEGIN DELETE AFTERWARDS ===
class Dataset_CNN():
TRAINING = ""
VALIDATION = ""
TESTING = ""
LABELS = {} # Labels as they are put in the folder
NN_LABELS = {} # Labels if they begin from 1,2,3 or another number. The secuence must be of order 1
img_size = None
batch_size = 1
training_data = []
validation_data = []
testing_data = []
def __init__(self, dir_name, img_size, batch_size):
self.TRAINING = dir_name + "/train/"
self.VALIDATION = dir_name + "/valid/"
self.TESTING = dir_name + "/test/"
self.img_size = img_size
self.batch_size = batch_size
def get_NN_labels(self,json_file_str): #THIS FUNCTION HAS TO BE USED WHEN DEALING WITH THE CLASSIFICATION, NOT WHEN OBTAINING THE DATA
if len(self.LABELS) == 0:
with open(json_file_str) as json_file:
self.LABELS = json.load(json_file)
has_class_cero = False
for label in self.LABELS:
if int(label) == 0:
self.NN_LABELS = copy.copy(self.LABELS)
has_class_cero = True
break
_min_ = float('Inf')
for label in self.LABELS:
if int(label) < _min_:
_min_ = int(label)
if not has_class_cero:
for label in self.LABELS:
self.NN_LABELS[str((int(label)-_min_))] = self.LABELS[label]
return _min_
else: return -1
def make_data(self, json_file_str, dtloader_str):
with open(json_file_str) as json_file:
self.LABELS = json.load(json_file)
_min_ = self.get_NN_labels(json_file)
lim_1 = None
lim_2 = None
lim_3 = None
# lim_1 = 1000#5
# lim_2 = 250#2
# lim_3 = 250#1
count = 0
count1 = 0
count2 = 0
count3 = 0
for label in self.LABELS:
for f in os.listdir(self.TRAINING+label):
if count == lim_1 and lim_1 != None:
break
if "jpg" in f:
try:
img_path = "{}/{}/{}".format(self.TRAINING,label,f)
img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, (self.img_size, self.img_size))
if _min_ == -1:
# self.training_data.append([np.array(img),int(label)])
_label_ = copy.copy(int(label))
self.training_data.append([img, np.array(_label_)])#torch.Tensor(label)])
else:
# self.training_data.append([np.array(img),(int(label)-_min_)])
_label_ = copy.copy( int(label)-_min_ )
self.training_data.append([img, np.array(_label_)])#torch.Tensor(label)-_min_])
except Exception as e:
print("The following error occurred:\n"+str(e))
pass
count+=1
count = 0
for f in os.listdir(self.VALIDATION+label):
if count == lim_2 and lim_2 != None:
break
if "jpg" in f:
try:
img_path = "{}/{}/{}".format(self.VALIDATION,label,f)
img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, (self.img_size, self.img_size))
if _min_ == -1:
_label_ = copy.copy(int(label))
# self.validation_data.append([np.array(img),int(label)])
# self.validation_data.append([torch.from_numpy(np.array(img)).double(),torch.Tensor(label)])
self.validation_data.append([img, np.array(_label_)])#torch.Tensor(label)])
else:
_label_ = copy.copy(int(label)-_min_)
# self.validation_data.append([np.array(img),(int(label)-_min_)])
# self.validation_data.append([torch.from_numpy(np.array(img)).double(), torch.Tensor(label)-_min_])
self.validation_data.append([img, np.array(_label_)])#torch.Tensor(label)])
except Exception as e:
print("The following error occurred:\n"+str(e))
pass
count+=1
count = 0
for f in os.listdir(self.TESTING+label):
if count == lim_3 and lim_3 != None:
break
if "jpg" in f:
try:
img_path = "{}/{}/{}".format(self.TESTING,label,f)
img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, (self.img_size, self.img_size))
if _min_ == -1:
_label_ = copy.copy(int(label))
# self.testing_data.append([np.array(img),int(label)])
# self.testing_data.append([torch.from_numpy(np.array(img)).double(), torch.Tensor(label)])
self.testing_data.append([img, np.array(_label_)])#torch.Tensor(label)])
else:
_label_ = copy.copy(int(label)-_min_)
# self.testing_data.append([np.array(img),(int(label)-_min_)])
# self.testing_data.append([torch.from_numpy(np.array(img)).double(), torch.Tensor(label)-_min_])
self.testing_data.append([img, np.array(_label_)])
except Exception as e:
print("The following error occurred:\n"+str(e))
pass
count += 1
values = [ i[0] for i in self.training_data]
scaler = MinMaxScaler()
scaler.fit(values)
values = scaler.transform(values)
# scaler.fit(self.validation_data)
# standarized_validloaderr = scaler.transform(self.validation_data)
train = [ [torch.from_numpy(values[i]).double(), torch.from_numpy(np.array(self.training_data[i][1])).long()] \
for i in range(len(self.training_data))] #standarized_trainloaderr]
values = [ i[0] for i in self.testing_data]
scaler = MinMaxScaler()
scaler.fit(values)
values = scaler.transform(values)
test = [ [torch.from_numpy(values[i]).double(), torch.from_numpy(np.array(i[1])).long()] \
for i in range(len(self.testing_data))] #standarized_testloaderr]
# scaler.fit(self.testing_data)
# standarized_testloaderr = scaler.transform(self.testing_data)
# train = [ [torch.from_numpy(np.array(i[0])).double(), torch.from_numpy(np.array(i[1])).long()] \
# for i in self.training_data]#standarized_trainloaderr]
# valid = [ [torch.from_numpy(np.array(i[0])).double(), torch.from_numpy(np.array(i[1])).long()] \
# for i in self.validation_data]#standarized_validloaderr]
# test = [ [torch.from_numpy(np.array(i[0])).double(), torch.from_numpy(np.array(i[1])).long()] \
# for i in self.testing_data]#standarized_testloaderr]
self.training_data = torch.utils.data.DataLoader(train, batch_size = int(self.batch_size), \
shuffle = True, drop_last=True)
# self.validation_data = torch.utils.data.DataLoader(valid, batch_size = int(self.batch_size), \
# shuffle = True, drop_last=True)
self.testing_data = torch.utils.data.DataLoader(test, batch_size = int(self.batch_size),\
shuffle = True, drop_last=True)
# print(next(iter(self.training_data)))
# torch.save(self.training_data, dtloader_str + "_tr")
# torch.save(self.validation_data, dtloader_str +"_va")
# torch.save(self.testing_data, dtloader_str +"_te")
def load_data(self, dtloader_str):
self.training_data = torch.load(dtloader_str + "_tr")
self.validation_data = torch.load(dtloader_str +"_va")
self.testing_data = torch.load(dtloader_str +"_te")
# === for data reading ===
# a = [random.randint(1,10) for i in range(8)]
# b = [random.randint(1,5) for i in range(7)]
# c = [random.randint(1,10) for i in range(11)]
# d = [random.randint(1,5) for i in range(10)]
# index_a = random.randint(0,7)
# while index_a % 2 !=0:
# index_a = random.randint(0,7)
# index_b = random.randint(0,6)
# index_c = random.randint(0,10)
# # while index_c % 2 !=0:
# # index_c = random.randint(0,10)
# index_d = random.randint(0,9)
# # print("Lista de a de tamaรฑo 8 ", a)
# # print("Lista de b de tamaรฑo 7 ", b)
# # print(index_a, index_b)
# # print(a[0:index_a])
# # print(b[0:(index_a-1)])
# print("Lista de c de tamaรฑo 11 ", c)
# print("Lista de d de tamaรฑo 10 ", d)
# print(c[0:index_c])
# print(d[0:(index_c-1)])
# # print(index_a, index_b, index_c, index_d)
# exit()
n_network = CNN_Network(CNN_Descriptor())
NN_info_file = "Arquitecturas/NN_MNIST_095.txt"
n_network.load_NN_info(NN_info_file)
# === BEGIN MNIST ===
transform = transforms.Compose([#torchvision.transforms.Resize(50),
transforms.ToTensor(), transforms.Normalize((0.5, ), (0.5,)) ])
# === BEGIN MNIST ===
train = datasets.MNIST('', train=True, download=True,
transform=transform)
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
valid = datasets.MNIST('', train=False, download=True,
transform=transform)
test = datasets.MNIST('', train=False, download=True,
transform=transform)
# === END MNIST ===
# === BEGIN CIFAR10 ===
# train = datasets.CIFAR10('', train=True, download=True,
# transform=transform)
# # device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# valid = datasets.CIFAR10('', train=False, download=True,
# transform=transform)
# test = datasets.CIFAR10('', train=False, download=True,
# transform=transform)
# === END CIFAR10 ===
# === BEGIN FashionMNIST ===
# train = datasets.FashionMNIST('', train=True, download=True,
# transform=transform)
# # device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# valid = datasets.FashionMNIST('', train=True, download=True,
# transform=transform)
# test = datasets.FashionMNIST('', train=True, download=True,
# transform=transform)
# === END FashionMNIST ===
# print(len(train[0][0][0]))
# exit()
# === BEGIN Cut ===
train = torch.utils.data.Subset(train, np.random.choice(len(train), 10000, replace=False))
valid = torch.utils.data.Subset(valid, np.random.choice(len(valid), 200, replace=False))
test = torch.utils.data.Subset(test, np.random.choice(len(test), 200, replace=False))
trainset = torch.utils.data.DataLoader(train, batch_size=n_network.descriptor.batch_size, shuffle=True)
validset = torch.utils.data.DataLoader(valid, batch_size=n_network.descriptor.batch_size, shuffle=True)
testset = torch.utils.data.DataLoader(test, batch_size=n_network.descriptor.batch_size, shuffle=True)
# === END Cut ===
shuffle = True
# === for the genetic algorithm ===
file = sys.argv[1]
json_file = sys.argv[2]
makenew = True
dtloader_str = "Db_cats_dogs"
# dataset = Dataset_CNN(file, n_network.descriptor.input_dim, n_network.descriptor.input_dim)
# if makenew: dataset.make_data(json_file,dtloader_str)
# else: dataset.load_data(dtloader_str)
start_time = time.time()
n_network.training_CNN(trainset)
with torch.no_grad():
print("The testing gives an accuracy of ", str( round(100.0 * n_network.testing_CNN(testset), 3 ) ) + " %")
print("Execution time: ", time.time()-start_time, " seconds | ", (time.time()-start_time)/60, " minutes" ) |
from django.db import models
class Foo(models.Model):
title = models.CharField(max_length=1000)
description = models.TextField()
class Bar(models.Model):
age = models.IntegerField()
|
'''
Time Complexity:
O(n^2) (when n = length of the given array)
Space Complexity:
O(1)
Did this code successfully run on LeetCode?:
Yes
Problems faced while coding this:
None
Approach:
Two Pointer Approach.
'''
class Solution:
def __init__(self):
self.target = 0
self.nums = []
self.result = []
def threeSum(self, nums: List[int]) -> List[List[int]]:
self.nums = nums
self.nums.sort()
self.generate_triplets()
return self.result
def generate_triplets(self):
for i in range(len(self.nums) - 2):
if self.nums[i] > self.target:
return
if i > 0 and self.nums[i] == self.nums[i - 1]:
continue
self.find_pairs(self.nums[i], i + 1, len(self.nums) - 1)
def find_pairs(self, first_num, low, high):
while low < high:
total = first_num + self.nums[low] + self.nums[high]
if total == self.target:
self.result.append([first_num, self.nums[low], self.nums[high]])
low += 1
high -= 1
while (low < high and self.nums[low] == self.nums[low - 1]):
low += 1
while (low < high and self.nums[high] == self.nums[high + 1]):
high -= 1
elif total > self.target:
high -= 1
else:
low += 1
|
from pydub import AudioSegment
import os
def audio_folder_split(segment_len, audio_folder, msg=True):
files = os.listdir(audio_folder)
splitdir = "split_audios"
# Adds numbers to titles if split_audio folder already exists
if splitdir in files:
i = 1
splitdir += str(i)
while splitdir in files:
splitdir = splitdir[:-1] + str(i)
i+=1
os.mkdir(audio_folder + "/" + splitdir)
# Loops through each .wav audio file
for file in files:
if file[-4:].lower() != '.wav':
continue
file_path = audio_folder + "/" + file
sound = AudioSegment.from_file(file_path)
size = len(sound)
nsegs = int(size / segment_len) + (size % segment_len > 0)
# Just to prevent accidentally creating 100+ files
if nsegs > 100 and msg:
print('File "{}" will make {} files, skip this one? (y/n)'.format(file, nsegs))
if input().lower() in ('y', 'yes'):
continue
newdir = audio_folder + "/" + splitdir + "/" + file[:-4] + "/"
os.mkdir(newdir)
# Splits the files into equal length segments
for x in range(nsegs):
seg_sound = sound[x * segment_len:(x + 1) * segment_len]
path = newdir + file[:-4] + "_split" + str(x) + ".wav"
seg_sound.export(path, format="wav")
print("Files successfully split into {} folder!".format(splitdir))
# loc = '/Users/jeremy.meyer/Desktop/wyze_camera/audio'
print("How long do you want each audio segment? (milliseconds)")
while True:
try:
seg_len = int(input())
if seg_len > 0:
break
else:
print("Please provide a positive integer!")
except TypeError:
print("Please provide an integer!")
print("Location of the folder with all audio files? (Will split all .wav files in folder)")
loc = input()
audio_folder_split(seg_len, loc)
|
# =================================For Loop=================================
count = 0
while count < 10:
print(count)
count+=1
# else in while loop
# while <condition>:
# <statement>
# else: This part will only be called if the loop is not break in between.
# <statement>
count = 0
while count < 5:
if count == 7:
print("Need to break now")
break # if the IF statement is satisfied then it will lead to call of break statement and will miss else part
print(count)
count +=1
else:
print("All ended well")
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from os import listdir
from os.path import join, isdir
import re
from flexp.browser.utils import message
from flexp.utils import get_logger, natural_sort, id_generator
log = get_logger(__name__)
class ToHtml(object):
"""Abstract class to print something to html."""
def __init__(self, file_name_pattern=None, title=None):
self.experiment_path = None
self.experiment_folder = None
self.file_name_pattern = file_name_pattern
self.title = title or self.__class__.__name__
self.id = id_generator()
def process(self, data):
"""Append new HTML content to data["html"]."""
# Save data
self.experiment_path = data["experiment_path"]
self.experiment_folder = data["experiment_folder"]
try:
content = self.get_html()
except Exception as e:
content = message(str(e))
# Initialization
data.setdefault("html", []).append("""
<div class='toggle-{id}-all'>
<h2 id='{title}'>{title}</h2>
<div class='toggle-{id}-content'>
{content}
</div>
<hr />
</div>""".format(title=self.title, class_name=self.__class__.__name__,
content=content, id=self.id))
# Process headers and scripts - allows modules to manipulate HEAD and
# add various scripts at the end of BODY.
self.process_header(data)
self.process_scripts(data)
def get_files(self, subfolder=""):
"""Recursively retrieves all files that satisfies function show_file().
:param subfolder: string Relative path from experiment_folder
:return: list of file paths (from experiment folder)
"""
files_to_return = []
for file_name in natural_sort(listdir(join(self.experiment_path, subfolder))):
file_path_exp = join(subfolder, file_name)
if isdir(join(self.experiment_path, file_path_exp)):
files_to_return += self.get_files(file_path_exp)
else:
if (self.file_name_pattern is None or re.search(self.file_name_pattern, file_path_exp)) \
and (self.show_file(file_path_exp)):
files_to_return.append(file_path_exp)
return files_to_return
def show_file(self, file_path_exp):
"""Return true if the `file_path_exp` should be processed by this module.
:param file_path_exp: {string} file path from experiment folder
:return: bool
"""
return True
def get_html(self):
"""Method free to overwrite - has access to all files at once.
By default it iterates over allowed files and pass them to `self.to_html` method.
:return: {str} HTML content of the module
"""
content = []
for file_name in self.get_files():
try:
file_content = self.to_html(file_name)
except Exception as e:
file_content = message(str(e))
content.append(file_content)
return u"\n".join(content)
def process_scripts(self, data):
"""
Allows modules to access SCRIPTS section in the generated HTML and
modify it. The scripts are supposed to be stored in "scripts" key of
data as data["scripts"]["namespace"] = list().
Example:
data["scripts"]["bokeh"]
:param data: standard data dict
:return:
"""
pass
def process_header(self, data):
"""
Allows modules to access HEADER section in the generated HTML and
modify it. The headers are supposed to be stored in "header" key of
data as data["header"]["namespace"] = list().
:param data: standard data dict
:return:
"""
pass
def to_html(self, file_name):
"""Transform content of file under `file_name` into HTML string and return it."""
raise NotImplementedError("Implement either to_html or overwrite get_html")
|
from collections import defaultdict, deque, Counter
from heapq import heapify, heappop, heappush
import math
from copy import deepcopy
from itertools import combinations, permutations, product, combinations_with_replacement
from bisect import bisect_left, bisect_right
import sys
def input():
return sys.stdin.readline().rstrip()
def getN():
return int(input())
def getNM():
return map(int, input().split())
def getList():
return list(map(int, input().split()))
def getArray(intn):
return [int(input()) for i in range(intn)]
mod = 10 ** 9 + 7
MOD = 998244353
sys.setrecursionlimit(1000000)
INF = float('inf')
eps = 10 ** (-10)
dx = [1, 0, -1, 0]
dy = [0, 1, 0, -1]
#############
# Main Code #
#############
# ่ณdp
"""
้
ๅA = [a1, a2, a3...]ใ้ฃ็ถใใใใใคใใฎ็ถๆ
ใซๅใใใฎใซๆๅน
ๅ็ถๆ
ใซใคใใฆ
dp[i][j]: ็พๅจ็ถๆ
iใงAใjใพใง่ชฟในใใใฎ
ๅใใๅผใ็ถใ็ถๆ
iใฎใใฎใจไปๅ็ถๆ
i - 1 โ iใซ้ท็งปใใใใฎใๆฏในใ
dp[i][j] = min or max(dp[i][j - 1] + val1, dp[i - 1][j - 1])
"""
# ใฟใใชใฎใใญใณใณ 2019 D - Ears
L = getN()
A = getArray(L)
dp = [[float('inf')] * 5 for i in range(L + 1)]
# ็ถๆ
0: 0ๅบ้ใ
# ็ถๆ
1: ๅถๆฐๅบ้ใ
# ็ถๆ
2: ๅฅๆฐๅบ้ใ
# ็ถๆ
3: ๅถๆฐๅบ้ใ
# ็ถๆ
4: 0ๅบ้
dp[0][0] = 0
# ๅถๆฐๅบ้ใง0ใชใ2ใๆใฃใฆใใใ
def zero_e(a):
if a == 0:
return 2
else:
return (a % 2 != 0)
# ๅฅๆฐๅบ้ใง0ใชใ1ใๆใฃใฆใใใ
def zero_o(a):
if a == 0:
return 1
else:
return (a % 2 == 0)
# dp[i + 1]ใๆดๆฐใใฆใใ
# ็ถๆ
k(k <= j)ใใ้ท็งปใใใใจใใงใใ
for i in range(L):
# ็ถๆ
0 ้ท็งปใฏใชใ
dp[i + 1][0] = dp[i][0] + A[i]
# ็ถๆ
1
dp[i + 1][1] = min(dp[i][1], dp[i][0]) + zero_e(A[i])
# ็ถๆ
2
dp[i + 1][2] = min(dp[i][2], dp[i][1], dp[i][0]) + zero_o(A[i])
# ็ถๆ
3
dp[i + 1][3] = min(dp[i][3], dp[i][2], dp[i][1], dp[i][0]) + zero_e(A[i])
# ็ถๆ
4
dp[i + 1][4] = min(dp[i][4], dp[i][3], dp[i][2], dp[i][1], dp[i][0]) + A[i]
print(min(dp[L]))
"""
ๅ
จใฆใฎ้จๅ้ๅใฎไธญใฎๆกไปถใๆบใใ่ฆ็ด ใฎๆฐใๆฐใใ
้จๅ้ๅใฎๆฐใซใคใใฆใฏ
Aใฎj็ช็ฎใๅซใใ/ๅซใใชใใใใใใฏAใฎj็ช็ฎใไฝใฎ่ฆ็ด ใซๅคๆใใใ
ใงๅขใใฆใใ
ใใฎไธญใงi็ช็ฎใฎ่ฆ็ด ใพใงๆบใใใ้จๅ้ๅใใใใคใใใใๆฐใใ
ใคใพใ็ถๆ
ใฎ้ท็งป
"""
# ABC169 F - Knapsack for All Subsets
N, S = getNM()
A = getList()
MOD = 998244353
dp = [[0] * (S + 1) for i in range(N + 1)]
dp[0][0] = 1
for i in range(N):
# ใใใใใฎ้จๅๆๅญๅใฎๆฐใcompoundใใ
for j in range(S + 1):
dp[i + 1][j] += dp[i][j] * 2
dp[i + 1][j] %= MOD
# i็ช็ฎใ้จๅ้ๅใซๅซใใๅ ดๅใซใคใใฆใใซใฆใณใใ้ฒใใใฎใๅ ใใ
for j in range(S + 1):
if j - A[i] >= 0:
dp[i + 1][j] += dp[i][j - A[i]]
dp[i + 1][j] %= MOD
print(dp[N][S] % MOD)
|
# Description: This program simulates a two-player tic tac toe game.
import random
# Given a tic, tac, toe board determine if there is a winner
# Function inputs:
# board_list: an array of 9 strings representing the tic tac toe board
# move_counter: an integer representing the number of moves that have been made
# Returns a string:
# 'x' if x won
# 'o' if o won
# 'n' if no one wins
# 's' if there is a stalemate
def checkForWinner(board_list, move_counter):
j = 0
for i in range(0, 9, 3):
# Check for 3 in a row
if board_list[i] == board_list[i+1] == board_list[i+2]:
return board_list[i]
# Check for 3 in a column
elif board_list[j] == board_list[j+3] == board_list[j+6]:
return board_list[j]
# Check the diagonal from the top left to the bottom right
elif board_list[0] == board_list[4] == board_list[8]:
return board_list[0]
# Check the diagonal from top right to bottom left
elif board_list[2] == board_list[4] == board_list[6]:
return board_list[2]
j += 1
# If winner was not found and board is completely filled up, return stalemate
if move_counter > 8:
return "s"
# Otherwise, 3 in a row anywhere on the board
return "n"
# Print out the tic tac toe board
# Input: list representing the tic tac toe board
# Return value: none
def printBoard(board_list):
print()
counter = 0
for i in range(5):
if i % 2 == 0:
for j in range(5):
if j % 2 == 0:
print(board_list[counter], end=" ")
counter += 1
else:
print("|", end=" ")
else:
print("\n---------")
return
def isValidMove(board_list, spot): #Checks to see if player input is valid.
if (spot < 0) or (spot > 8):
return False
else:
return board_list[spot] not in ['o', 'x']
def updateBoard(board_list, spot, player_letter): #Replaces player choice of spot with 'x' or 'o'
pos = board_list.index(str(spot))
board_list[pos] = player_letter
return board_list
def playGame():
board_list = ["0", "1", "2", "3", "4", "5", "6", "7", "8"]
print("Welcome to Tic Tac Toe!")
mode = str(input("Please select a game mode:\n1) Player vs. Player\n2) Player vs. Computer\n>"))
if (mode != "1") and (mode != "2"):
print("Invalid input, please try again.")
mode = input("Please select a game mode:\n1) Player vs. Player\n2) Player vs. Computer\n>")
count = 0
playerLet = ["x", "o"]
chooseLet = input("Please select your letter:\n'x' plays first by default\n'o' plays second\n>").lower()
if chooseLet not in playerLet:
print("Invalid input, please try again.")
chooseLet = input("Please select your letter:\n'x' plays first by default\n'o' plays second\n>").lower()
while checkForWinner(board_list, count) == "n":
let = count % 2
player = playerLet[let]
printBoard(board_list)
if (mode == "2") and (chooseLet != player):
spot = random.choice(range(9))
while not isValidMove(board_list, spot):
spot = random.choice(range(9))
print("\nComputer " + player + ", pick a spot: " + str(spot))
else:
spot = int(input("\nPlayer " + player + ", pick a spot: "))
while not isValidMove(board_list, spot):
print("Invalid move, please try again.")
spot = input("PLayer " + player + ", pick a spot: ")
updateBoard(board_list, spot, player)
count += 1
printBoard(board_list)
print("Game over!")
winner = checkForWinner(board_list, count)
if winner == "s":
print("Stalemate reached.")
else:
if mode == "2" and winner != chooseLet:
print("Computer is the winner!")
else:
print("Player " + chooseLet + " is the winner!")
def main():
playGame()
while True: #Allows player to decide whether to play another round.
another = input("Would you like to play another round? (y/n): ").lower()
if another == "y":
playGame()
else:
print("Goodbye!")
break
main() |
# @Time :2019/7/7 21:39
# @Author :jinbiao
"""
ๆฐๆฎ็ฑปๅ็ธไบ่ฝฌๆข
"""
# pythonไธญ็ๆฐๆฎ็ฑปๅๆint,str,list,tuple,dict,set;ไธ่ฝ่ฟ่กๆฐๆฎๅๆด็็ฑปๅๆint,str,tuple
# [intโโstr]
number = 10 # ๅฐ10่ตๅผ็ปnumber๏ผnumberไธบint็ฑปๅ
num_to_str = str(number)
print("number็ๆฐๆฎ็ฑปๅๆฏ{},num_to_str็ๆฐๆฎ็ฑปๅๆฏ{}".format(type(number), type(num_to_str)))
# [intโโlist]
num_to_list = []
num_to_list.append(number)
print(num_to_list)
# # [strโโlist]
# one_str = "abcdefg"
# str_to_list = list(one_str)
# print(str_to_list)
#
# # ้่ฟsplitๅ้็ฌฆ่ฟ่กๅๅฒ๏ผ่ฟๅๅๅฒๅ็ๅ่กจ
# str_to_list1 = one_str.split("d")
# print(str_to_list1)
#
# # ๅฎไนไธไธชๅ่กจ๏ผ็ดขๅผๆ่
ๆชๅๆณ่ฆ็ๅญ็ฌฆๆทปๅ ๅฐๅ่กจไธญ
# str_to_list2 = [one_str[0], one_str[1]]
# print(str_to_list2)
#
# # [listโโstr]
# # ้่ฟๅญ็ฌฆไธฒๅฝๆฐjoin่ฟๆฅๅ่กจๅผ๏ผไฝๆฏๅ่กจๅผ้ฝๅบ่ฏฅไธบstr็ฑปๅ
# one_list = ["hello", "python", "3"]
# list_to_str = "".join(one_list)
# print(list_to_str)
#
# # ้่ฟ็ดขๅผๅ่กจๅ
็ด ๅ่ฟ่กๅญ็ฌฆไธฒๆผๆฅ
# list_to_str1 = ""
# for i in one_list:
# list_to_str1 += i
# print(list_to_str1)
#
# # [listโโdict]
# # ้่ฟdict็ฑปไธญ็fromkeysๆนๆณๅฐๅ่กจไธญ็ๅ
็ด ไฝไธบๅญๅ
ธ็้ฎ๏ผๅญๅ
ธๅผ้ป่ฎคไธบNone๏ผไนๅฏ็ปไธๆๅฎๅผ
# one_list = ["number1", "number2", "number3"]
# one_dict = dict.fromkeys(one_list)
# print(one_dict)
#
# # ๅญๅจไธคไธชๅ่กจ๏ผๅฏไปฅๅฉ็จzipๅฝๆฐ๏ผๅฐไธคไธชๅ่กจ็ๅ
็ด ไธไธๅฏนๅบ็ปๆ้ฎๅผๅฏน
# one_list = ["number1", "number2", "number3"]
# two_list = [1, 2, 3]
# one_zip = zip(one_list, two_list) # zipๅฝๆฐ่ฟๅๅ่กจๅตๅฅๅ
็ป็ๆฐๆฎ
# one_dict = dict(list(one_zip))
# print(one_dict)
#
# one_list = [(1, 4), (2, 5), (3, 6)]
# print(dict(one_list))
#
# # ๅ่กจ็ๅตๅฅ๏ผๅฏไปฅ็จdictๅฝๆฐๅฐๅ่กจ่ฝฌๆขไธบๅญๅ
ธ็ฑปๅ
# one_list = ["number1", 1]
# two_list = ["number2", 2]
# three_list = [one_list, two_list]
# print(three_list)
# one_dict = dict(three_list)
# print(one_dict)
#
#
# # [dictโโlist]
# # ็จlistๅฝๆฐๅฐๅญๅ
ธ็้ฎไฝไธบๅ่กจๅ
็ด
# one_dict = {"number1": 1, "number2": 2, "number3": 3}
# one_list = list(one_dict.keys())
# print(one_list)
#
# # ็จlistๅฝๆฐๅฐๅญๅ
ธ็ๅผไฝไธบๅ่กจๅ
็ด
# two_list = list(one_dict.values())
# print(two_list)
#
# # items()ๆนๆณไปฅๅ่กจ่ฟๅๅฏ้ๅ็(้ฎ, ๅผ) ๅ
็ปๆฐ็ป
# three_list = one_dict.items()
# print(list(three_list))
#
# # ไปฅๅ่กจ่ฟๅๅฏ้ๅ็[้ฎ๏ผๅผ]ๅ่กจๆฐ็ป
# four_list = []
# for k, v in one_dict.items():
# four_list.append([k, v])
# print(four_list)
|
#!python3
from flask import Flask, request, jsonify, redirect, url_for, session
from flask_cors import cross_origin, CORS
from datetime import timedelta
from pyrebase import pyrebase
firebaseConfig = {
'apiKey': "AIzaSyDe6g46cEfOWkyJpXXPtpnihper_Z60n0Q",
'authDomain': "traindatabase-c33ac.firebaseapp.com",
'projectId': "traindatabase-c33ac",
'storageBucket': "traindatabase-c33ac.appspot.com",
'messagingSenderId': "978921770047",
'appId': "1:978921770047:web:bdd7ce0d3b1e987231fc8a",
'measurementId': "G-RBYQ8DX7TF",
'databaseURL': "https://traindatabase-c33ac-default-rtdb.firebaseio.com/"
}
firebase = pyrebase.initialize_app(firebaseConfig)
auth = firebase.auth()
db = firebase.database()
storage = firebase.storage()
#temporarily replace quote function
#there is an error in the pyrebase library that encodes quotes and special characters incorrectly to the url
#which has not been patched yet
#the below function and assignment temporarily fixes the problem until the library is patched
def noquote(s):
return s
pyrebase.quote = noquote
app = Flask(__name__)
app.secret_key = 'ejIk28Ik3hhUUEik620ssnYYe78bbneYQ092'
app.permanent_session_lifetime = timedelta(days=5)
cors = CORS(app, resources={r"/*": {"origins": "*"}})
# health check required for deployment to services like GAE, which occasionally query '/' endpoint to check if server is alive
@app.route('/', methods=['GET', 'POST'])
def health_check():
payload = {
'headers': {'Access-Control-Allow-Origin': '*'},
'alive' : 'alive'
}
return jsonify(payload), 200
@app.route('/index', methods=['GET'])
def index():
#check if user session is still alive
if 'uuid' in session:
#refresh id token in case it has expired
access_token = auth.refresh(session['refreshToken'])['idToken']
#update session id token
session['access_token'] = access_token
payload = {
'headers': {'Access-Control-Allow-Origin': '*'},
'access_token': access_token,
'uuid': session['uuid'],
'name': session['name'],
'email': session['email'],
'redirect_url': url_for('/api/user'),
'isLoggedIn': True
}
return jsonify(payload), 200
else:
payload = {
'headers': {'Access-Control-Allow-Origin': '*'},
'access_token': '',
'uuid': '',
'name': '',
'email': '',
'redirect_url': '',
'isLoggedIn': False
}
return jsonify(payload), 200
@app.route('/api/login', methods=['POST'])
@cross_origin()
def login():
#header required to send information between servers cross origin
header = {'Access-Control-Allow-Origin': '*'}
try:
req = request.get_json(force=True)
except:
return jsonify({'headers': header, 'msg': 'Missing JSON'}), 400
email = req.get('email')
password = req.get('password')
if not email:
return jsonify({'headers': header, 'msg': 'email is missing'}), 400
if not password:
return jsonify({'headers': header, 'msg': 'Password is missing'}), 400
try:
user = auth.sign_in_with_email_and_password(email, password)
except:
return jsonify({'headers': header, 'route': url_for('api/signup')})
#deal with session variables to keep track of user information to pass around backend and frontend easily
session.permamnent = True
session['email'] = email
user_data = db.child('Users').order_by_child("email").equal_to(email).get().val()
user_uuid = list(user_data.keys())[0]
name = user_data[user_uuid]['name']
try:
db.child('Managers').order_by_key().equal_to(user_uuid).get().val()[user_uuid]
designation = "manager"
except:
try:
db.child('Trainees').order_by_key().equal_to(user_uuid).get().val()[user_uuid]
designation = "trainee"
except:
designation = "unassigned"
session['uuid'] = user_uuid
session['name'] = name
session['refresh_token'] = user['refreshToken']
payload = {
'headers': header,
'access_token': user['idToken'],
'uuid': user_uuid,
'name': name,
'email': email,
'designation' : designation,
'isLoggedIn': True
}
return jsonify(payload), 200
@app.route('/api/signup', methods=['POST'])
@cross_origin() #needed to allow the backend server to receive data from frontend server cross origin
def signup():
header = {'Access-Control-Allow-Origin': '*'}
#receive data from front end
try:
req = request.get_json(force=True)
except:
return jsonify({'headers': header, 'msg': 'Missing JSON'}), 400
first_name = req.get('first_name')
last_name = req.get('last_name')
email = req.get('email')
age = req.get('age')
address = req.get('address')
about_me = req.get('about_me')
password = req.get('password')
confirm_pass = req.get('confirm_pass')
if not first_name:
return jsonify({'headers': header, 'msg': 'First Name is missing'}), 400
if not last_name:
return jsonify({'headers': header, 'msg': 'Last Name is missing'}), 400
if not email:
return jsonify({'headers': header, 'msg': 'Email is missing'}), 400
if not age:
return jsonify({'headers': header, 'msg': 'Age is missing'}), 400
if not address:
return jsonify({'headers': header, 'msg': 'Address is missing'}), 400
if not about_me:
about_me = "To be completed"
if not password:
return jsonify({'headers': header, 'msg': 'Password is missing'}), 400
if not confirm_pass:
return jsonify({'headers': header, 'msg': 'Confirm Password is missing'}), 400
if not password == confirm_pass:
return jsonify({'headers': header, 'msg': 'Password and confirm password do not match.'}), 400
try:
user = auth.create_user_with_email_and_password(email, password)
except Exception as e:
attrs = vars(e)
print(attrs)
return jsonify({'headers': header, 'msg': 'User already exists.', 'url': url_for('/api/login')}), 400
#deal with session variables
session['user'] = f'{first_name} {last_name}'
session['email'] = f'{email}'
session['idToken'] = user['idToken']
session['refreshToken'] = user['refreshToken']
#send verification email
auth.send_email_verification(user['idToken'])
#put data into database
designation = 'unassigned'
data = {
'name': f'{first_name} {last_name}',
'email': email,
'age': age,
'address': address,
'designation' : designation,
'about_me' : about_me
}
db.child('Users').push(data)
#retrieve user uuid
user_uuid = list(db.child('Users').order_by_child('email').equal_to(email).get().val().keys())[0]
session['uuid'] = user_uuid
session.permanent = True
payload = {
'headers': header,
'access_token': user['idToken'],
'uuid': user_uuid,
'name': f'{first_name} {last_name}',
'email': email,
'designation' : designation,
'isLoggedIn': True
}
return jsonify(payload), 200
@app.route('/api/logout', methods=['GET'])
def logout():
#pop all session variables
for session_var in session.keys():
session.pop(session_var, None)
#return route for index and make logged in false
payload = {
'header': {'Access-Control-Allow-Origin': '*'},
'url': url_for('.index'),
'isLoggedIn': False
}
return jsonify(payload), 200
###
"""
Manager Methods
query methods - get manager's employees (done), get specific employee plan_id (done), get specific employee plan contents (done),
get template_ids (done), get specific template contents (done), get specific task in plan or template (done),
add methods - add task to specific employee plan (done), add task to specific template (done), add template to plan (done), add documentation/links to specific task (done),
update specific task's name/note/due date/duration (done)
remove methods - remove task from specific employee plan or specific template (written, but implementation depends on schema we choose), remove documentation/link/notes/due date from specific task
"""
###
# expect request to have the following fields: manager_uuid
# the manager_uuid field should contain the manager's unique id which was provided to the client upon the manager logging in
# this method uses the manager_uuid and queries the database to get a dictionary of trainee_name : trainee_uuid pairs of the manager's trainees
# and returns a json of the dictionary of {trainee_name : trainee_uuid} pairs
@app.route('/manager/get_trainees', methods=['GET'])
def manager_get_trainees():
header = {'Access-Control-Allow-Origin': '*'}
#receive data from front end
try:
req = request.get_json(force=True)
except:
return jsonify({'headers': header, 'msg': 'Missing JSON'}), 400
manager_uuid = req.get('manager_uuid')
if not manager_uuid:
return jsonify({'headers': header, 'msg': 'Missing manager uuid'}), 400
try:
trainees = db.child('Managers').order_by_key().equal_to(manager_uuid).get().val()[manager_uuid]["Trainees"]
except:
trainees = {}
payload = {
'headers': header,
'trainees' : trainees
}
return jsonify(payload), 200
# expect request to have the following fields: manager_uuid, trainee_uuid
# the manager_uuid field should contain the manager's unique id which was provided to the client upon the manager logging in
# the trainee_uuid field should contain the uuid of the trainee whose plan should be retrieved. This value can be gotten from the /manager/get_trainees endpoint
# this method uses the trainee_uuid and queries the Trainees database to get a string of the trainee's plan_id and return it
@app.route('/manager/get_trainee_training_plan_id', methods=['GET'])
def manager_get_trainee_training_plan_id():
header = {'Access-Control-Allow-Origin': '*'}
#receive data from front end
try:
req = request.get_json(force=True)
except:
return jsonify({'headers': header, 'msg': 'Missing JSON'}), 400
manager_uuid = req.get('manager_uuid')
trainee_uuid = req.get('trainee_uuid')
if not manager_uuid:
return jsonify({'headers': header, 'msg': 'Missing manager uuid'}), 400
if not trainee_uuid:
return jsonify({'headers': header, 'msg': 'Missing trainee uuid'}), 400
try:
plan_id = db.child('Trainees').order_by_key().equal_to(trainee_uuid).get().val()[trainee_uuid]['plan']
except:
plan_id = ""
payload = {
'headers': header,
'plan_id' : plan_id
}
return jsonify(payload)
# expect request to have the following fields: manager_uuid, plan_id
# the manager_uuid field should contain the manager's unique id which was provided to the client upon the manager logging in
# the plan_id field should contain the plan_id of the plan which should be retrieved. This value can be gotten from the /manager/get_trainee_training_plan_id endpoint
# this method uses the plan_id and queries the Plans database to get the dictionary of training_id : training_name from the plan (both from the templates associated with
# the plan and trainings added directly to the plan). It returns a json of the dictionary of training_id : training_name pairs.
@app.route('/manager/get_trainee_training_plan_contents', methods=['GET'])
def manager_get_trainee_training_plan_contents():
header = {'Access-Control-Allow-Origin': '*'}
#receive data from front end
try:
req = request.get_json(force=True)
except:
return jsonify({'headers': header, 'msg': 'Missing JSON'}), 400
manager_uuid = req.get('manager_uuid')
plan_id = req.get('plan_id')
if not manager_uuid:
return jsonify({'headers': header, 'msg': 'Missing manager uuid'}), 400
if not plan_id:
return jsonify({'headers': header, 'msg': 'Missing plan id'}), 400
# get trainings added directly to the plan first (trainings will be a dict of training_id : training_name)
try:
trainings = db.child('Plans').order_by_key().equal_to(plan_id).get().val()[plan_id]['Trainings']
except:
trainings = {}
# now get trainings from the templates (templates will be a dict of template_id : template_name)
try:
template_ids = db.child('Plans').order_by_key().equal_to(plan_id).get().val()[plan_id]['Templates']
for template_id in template_ids.keys():
# index into the templates table using the template_id and get the trainings (template_trainings will be a dict of training_id : training_name)
template_trainings = db.child('Templates').order_by_key().equal_to(template_id).get().val()[template_id]['Trainings']
# merge the trainings from this template into the trainings dict
trainings.update(template_trainings)
except:
pass
payload = {
'headers' : header,
'trainings' : trainings
}
return jsonify(payload)
# expect request to have the following fields: manager_uuid
# the manager_uuid field should contain the manager's unique id which was provided to the client upon the manager logging in
# this method uses to manager_uuid to query the Managers database to get
# the a dictionary of the manager's training template_id : template_name pairs. It returns a json of the dictionary of template_id : template_name pairs
@app.route('/manager/get_training_templates', methods=['GET'])
def get_training_templates():
header = {'Access-Control-Allow-Origin': '*'}
#receive data from front end
try:
req = request.get_json(force=True)
except:
return jsonify({'headers': header, 'msg': 'Missing JSON'}), 400
manager_uuid = req.get('manager_uuid')
if not manager_uuid:
return jsonify({'headers': header, 'msg': 'Missing manager uuid'}), 400
try:
templates = db.child('Managers').order_by_key().equal_to(manager_uuid).get().val()[manager_uuid]['Templates']
except:
templates = {}
payload = {
'headers' : header,
'templates' : templates
}
return jsonify(payload)
# expect the request to have the following fields: manager_uuid, template_id
# the manager_uuid field should contain the manager's unique id which was provided to the client upon the manager logging in
# the template_id should contain the template_id for the template the manager wants to retrieve. This value can be gotten from the /manager/get_training_templates endpoint
# this method uses the template_id to query the Templates database to get a dictionary of training_id : training_name. The method
# returns a json of the dictionary of template's manager, the template name, and the template's training_id : training_name pairs.
@app.route('/manager/get_training_template', methods=['GET'])
def get_training_template():
header = {'Access-Control-Allow-Origin': '*'}
#receive data from front end
try:
req = request.get_json(force=True)
except:
return jsonify({'headers': header, 'msg': 'Missing JSON'}), 400
manager_uuid = req.get('manager_uuid')
template_id = req.get('template_id')
if not manager_uuid:
return jsonify({'headers': header, 'msg': 'Missing manager uuid'}), 400
if not template_id:
return jsonify({'headers': header, 'msg': 'Missing template id'}), 400
try:
template = db.child('Templates').order_by_key().equal_to(template_id).get().val()[template_id]
except:
template = {}
payload = {
'headers' : header,
'template' : template
}
return jsonify(payload)
# expect the request to have the following fields: manager_uuid, training_id
# the manager_uuid field should contain the manager's unique id which was provided to the client upon the manager logging in
# the training_id field should contain the training_id for which you want to get the training's contents.
# This value can be gotten from the /manager/get_training_template or /manager/get_trainee_training_plan_contents endpoints.
# this method uses the training_id to query the Trainings database to get the training contents. It returns the training
# contents as a json.
@app.route('/manager/get_training', methods=['GET'])
def manager_get_training():
header = {'Access-Control-Allow-Origin': '*'}
#receive data from front end
try:
req = request.get_json(force=True)
except:
return jsonify({'headers': header, 'msg': 'Missing JSON'}), 400
manager_uuid = req.get('manager_uuid')
training_id = req.get('training_id')
if not manager_uuid:
return jsonify({'headers': header, 'msg': 'Missing manager uuid'}), 400
if not training_id:
return jsonify({'headers': header, 'msg': 'Missing training id'}), 400
try:
training = db.child('Trainings').order_by_key().equal_to(training_id).get().val()[training_id]
except:
training = {}
payload = {
'headers' : header,
'training' : training
}
return jsonify(payload)
# expect the request to have the following fields: manager_uuid, template_name
# the manager_uuid field should contain the manager's unique id which was provided to the client upon the manager logging in
# the template_name is supplied by the client
# this method adds a new empty template to the manager and returns a dictionary of the newly created template_id : template_name
# NOTE: when a new template is made, it has no trainings associated with it and no Trainings field
@app.route('/manager/new_empty_template', methods=['POST'])
def new_empty_template():
header = {'Access-Control-Allow-Origin': '*'}
#receive data from front end
try:
req = request.get_json(force=True)
except:
return jsonify({'headers': header, 'msg': 'Missing JSON'}), 400
manager_uuid = req.get('manager_uuid')
template_name = req.get('template_name')
description = req.get('description')
if not manager_uuid:
return jsonify({'headers': header, 'msg': 'Missing manager uuid'}), 400
if not template_name:
return jsonify({'headers': header, 'msg': 'Missing template name'}), 400
if not description:
return jsonify({'headers': header, 'msg': 'Missing description'}), 400
template_id = db.generate_key()
data = {
"manager" : manager_uuid,
"template_name" : template_name,
'description' : description
}
# make new empty template in Templates database
db.child('Templates').child(template_id).set(data)
# add template reference to manager
# since we can't append to database, get the template info from the manager (will be a dict of template_id : template_name)
try:
templates = db.child('Managers').order_by_key().equal_to(manager_uuid).get().val()['Templates']
except:
templates = {}
templates.update({template_id : template_name})
# set plan's template data by combining the templates dict with a dict of the new template_id : template_name to be added
try:
db.child('Managers').child(manager_uuid).child('Templates').update(templates)
except: # the Manager doesn't currently have any templates so it needs to bet set for the first time
db.child('Managers').child(manager_uuid).child('Templates').set(templates)
payload = {
'headers' : header,
'template' : {template_id : template_name}
}
return jsonify(payload)
# expect the request to have the following fields: manager_uuid, template_id, training_name, documentation_links, other_links, note, due_date, duration
# the manager_uuid field should contain the manager's unique id which was provided to the client upon the manager logging in
# the template_id can be gotten from the endpoint /manager/new_empty_template or /manager/get_training_templates
# the training_name is a string supplied by the user
# the documentation links are a dictionary of link : name supplied by the user
# the other links are a dictionary of link : name supplied by the user
# the note is a string supplied by the user
# the due_date is a string supplied by the user
# the duration is a string supplied by the user
# this method makes a new training and adds that training to the given template. It returns a dictionary of the newly created training_id : training_name
# or it returns a dictionary of {"failure" : "failure"}
@app.route('/manager/add_training_to_training_template', methods=['POST'])
def add_training_to_training_template():
header = {'Access-Control-Allow-Origin': '*'}
#receive data from front end
try:
req = request.get_json(force=True)
except:
return jsonify({'headers': header, 'msg': 'Missing JSON'}), 400
manager_uuid = req.get('manager_uuid')
template_id = req.get('template_id')
training_name = req.get('training_name')
documentation_links = req.get('documentation_links')
other_links = req.get('other_links')
note = req.get('note')
due_date = req.get('due_date')
duration = req.get('duration')
if not manager_uuid:
return jsonify({'headers': header, 'msg': 'Missing manager uuid'}), 400
if not template_id:
return jsonify({'headers': header, 'msg': 'Missing template id'}), 400
if not training_name:
return jsonify({'headers': header, 'msg': 'Missing training name'}), 400
if not note:
return jsonify({'headers': header, 'msg': 'Missing note'}), 400
if not due_date:
return jsonify({'headers': header, 'msg': 'Missing due date'}), 400
if not duration:
return jsonify({'headers': header, 'msg': 'Missing duration'}), 400
# make a new training
try:
training_key = db.generate_key()
data = {
'name' : training_name,
'note' : note,
'due_date' : due_date,
'duration' : duration,
'complete' : 'false'
}
db.child('Trainings').child(training_key).set(data)
if documentation_links:
db.child('Trainings').child(training_key).child('Documentation_Links').set(documentation_links)
if other_links:
db.child('Trainings').child(training_key).child('Other_Links').set(other_links)
# since we can't append to database, get the trainings currently in the template (will be a dict of training_id : training_name)
# try except to handle case where template has no trainings in it to start
try:
trainings = db.child('Templates').order_by_key().equal_to(template_id).get().val()[template_id]["Trainings"]
except:
trainings = {}
# set template data by combining the templates dict with a dict of the new training_id : training name to be added
trainings.update({training_key : training_name})
try:
db.child('Templates').child(template_id).child("Trainings").update(trainings)
except:
db.child('Templates').child(template_id).child("Trainings").set(trainings)
training = {training_key : training_name}
except:
training = {"failure" : "failure"}
payload = {
'headers' : header,
'training' : training
}
return jsonify(payload)
# expect the request to have the following fields: manager_uuid, template_id, template_name, plan_id
# the manager_uuid field should contain the manager's unique id which was provided to the client upon the manager logging in
# the template_id can be gotten from the endpoint /manager/new_empty_template or /manager/get_training_templates
# the template name is a string supplied by the user
# the plan id can be gotten from the endpoint /manager/get_trainee_training_plan_id
# this method adds the template_id to the templates field of the given plan and returns "success" or "failure"
@app.route('/manager/add_template_to_training_plan', methods=['POST'])
def add_template_to_training_plan():
header = {'Access-Control-Allow-Origin': '*'}
#receive data from front end
try:
req = request.get_json(force=True)
except:
return jsonify({'headers': header, 'msg': 'Missing JSON'}), 400
manager_uuid = req.get('manager_uuid')
template_id = req.get('template_id')
template_name = req.get('template_name')
plan_id = req.get('plan_id')
if not manager_uuid:
return jsonify({'headers': header, 'msg': 'Missing manager uuid'}), 400
if not template_id:
return jsonify({'headers': header, 'msg': 'Missing template id'}), 400
if not template_name:
return jsonify({'headers': header, 'msg': 'Missing template name'}), 400
if not plan_id:
return jsonify({'headers': header, 'msg': 'Missing plan id'}), 400
try:
# since we can't append to database, get the template info currently in the plan (will be a dict of template_id : template_name)
# try except to handle the case where the plan doesn't have any Templates field yet
try:
templates = db.child('Plans').order_by_key().equal_to(plan_id).get().val()['Templates']
except:
templates = {}
# set plan's template data by combining the templates dict with a dict of the new template_id : template_name to be added
templates.update({template_id : template_name})
try:
db.child('Plans').child(plan_id).child('Templates').update(templates)
except:
db.child('Plans').child(plan_id).child('Templates').set(templates)
response = "success"
except:
response = "failure"
payload = {
'headers' : header,
'response' : response
}
return jsonify(payload)
# expect the request to have the following fields: manager_uuid, plan_id, training_name, documentation_links, other_links, note, due_date, duration
# the manager_uuid field should contain the manager's unique id which was provided to the client upon the manager logging in
# the plan id can be gotten from the endpoint /manager/get_trainee_training_plan_id
# the training_name is a string supplied by the user
# the documentation links are a dictionary of link : name supplied by the user
# the other links are a dictionary of link : name supplied by the user
# the note is a string supplied by the user
# the due_date is a string supplied by the user
# the duration is a string supplied by the user
# this method makes a new training and adds that training to the given plan. It returns a dictionary of the {training_id : training_name}
# or it returns a dictionary of {"failure" : "failure"}
@app.route('/manager/add_training_to_training_plan', methods=['POST'])
def add_training_to_training_plan():
header = {'Access-Control-Allow-Origin': '*'}
#receive data from front end
try:
req = request.get_json(force=True)
except:
return jsonify({'headers': header, 'msg': 'Missing JSON'}), 400
manager_uuid = req.get('manager_uuid')
plan_id = req.get('plan_id')
training_name = req.get('training_name')
documentation_links = req.get('documentation_links')
other_links = req.get('other_links')
note = req.get('note')
due_date = req.get('due_date')
duration = req.get('duration')
if not manager_uuid:
return jsonify({'headers': header, 'msg': 'Missing manager uuid'}), 400
if not plan_id:
return jsonify({'headers': header, 'msg': 'Missing plan id'}), 400
if not training_name:
return jsonify({'headers': header, 'msg': 'Missing training name'}), 400
if not note:
return jsonify({'headers': header, 'msg': 'Missing note'}), 400
if not due_date:
return jsonify({'headers': header, 'msg': 'Missing due date'}), 400
if not duration:
return jsonify({'headers': header, 'msg': 'Missing duration'}), 400
# make a new training
try:
training_key = db.generate_key()
data = {
'name' : training_name,
'note' : note,
'due_date' : due_date,
'duration' : duration,
'complete' : 'false'
}
db.child('Trainings').child(training_key).set(data)
if documentation_links:
db.child('Trainings').child(training_key).child('Documentation_Links').set(documentation_links)
if other_links:
db.child('Trainings').child(training_key).child('Other_Links').set(other_links)
# since we can't append to database, get the trainings currently in the plan (will be a dict of training_id : training_name)
# try except in case plan has no trainings in it yet
try:
trainings = db.child('Plans').order_by_key().equal_to(plan_id).get().val()[plan_id]['Trainings']
except:
trainings = {}
# set plan's training data by combining the trainings dict with a dict of the new training_id : training_name to be added
trainings.update({training_key : training_name})
try:
db.child('Plans').child(plan_id).child('Trainings').update(trainings)
except:
db.child('Plans').child(plan_id).child('Trainings').set(trainings)
training = {training_key : training_name}
except:
training = {"failure" : "failure"}
payload = {
'headers' : header,
'training' : training
}
return jsonify(payload)
# expect the request to have the following fields: manager_uuid, training_id, documentation_links, other_links
# the manager_uuid field should contain the manager's unique id which was provided to the client upon the manager logging in
# the training_id can be gotten from the endpoint /manager/add_training_to_training_plan, /manager/add_training_to_training_template, or /manager/get_trainee_training_plan_contents
# the documentation links are a dictionary of link : name supplied by the user
# the other links are a dictionary of link : name supplied by the user
# manager_uuid and training_id are required. For the other fields, if there is nothing to be added, do not include them in the request.
# NOTE: this method APPENDS info to the relevant fields and returns either "success" or "failure"
@app.route('/manager/add_info_to_training', methods=['POST'])
def add_info_to_training():
header = {'Access-Control-Allow-Origin': '*'}
#receive data from front end
try:
req = request.get_json(force=True)
except:
return jsonify({'headers': header, 'msg': 'Missing JSON'}), 400
manager_uuid = req.get('manager_uuid')
training_id = req.get('training_id')
documentation_links = req.get('documentation_links')
other_links = req.get('other_links')
if not manager_uuid:
return jsonify({'headers': header, 'msg': 'Missing manager uuid'}), 400
if not training_id:
return jsonify({'headers': header, 'msg': 'Missing training id'}), 400
try:
# since we can't append to database, need to get current list of things, then add to it
training = db.child('Trainings').order_by_key().equal_to(training_id).get().val()[training_id]
if documentation_links:
try:
original_documentation_links = training['Documentation_Links'] # will be a dict of {link : name}
except:
original_documentation_links = {}
original_documentation_links.update(documentation_links) # combine the original dict with the new dict
db.child('Trainings').child(training_id).child('Documentation_Links').update(original_documentation_links)
if other_links:
try:
original_other_links = training['Other_Links'] # will be a dict of {link : name}
except:
original_other_links = {}
original_other_links.update(other_links) # combine the original dict with the new dict
db.child('Trainings').child(training_id).child('Other_Links').update(original_other_links)
response = "success"
except:
response = "failure"
payload = {
'headers' : header,
'response' : response
}
return jsonify(payload)
# expect the request to have the following fields: manager_uuid, training_id, training_name, note, due_date, duration
# the manager_uuid field should contain the manager's unique id which was provided to the client upon the manager logging in
# the training_id can be gotten from the endpoint /manager/add_training_to_training_plan, /manager/add_training_to_training_template, or /manager/get_trainee_training_plan_contents
# training_name is a string supplied by the user
# note is a string supplied by the user
# due_date is a string supplied by the user
# duration is a string supplied by the user
# manager_uuid and training_id are required. For the other fields, put an empty string in the fields that you don't want to overwrite
# NOTE: this method OVERWRITES info in the relevant fields and returns "success" or "failure"
# TODO: existing bug - if you change the training name, it'll be changed in the Trainings table but not the Plans table. Would require plan_id to also train in Plans table.
@app.route('/manager/update_training_info', methods=['POST'])
def update_training_info():
header = {'Access-Control-Allow-Origin': '*'}
#receive data from front end
try:
req = request.get_json(force=True)
except:
return jsonify({'headers': header, 'msg': 'Missing JSON'}), 400
manager_uuid = req.get('manager_uuid')
training_id = req.get('training_id')
training_name = req.get('training_name')
note = req.get('note')
due_date = req.get('due_date')
duration = req.get('duration')
if not manager_uuid:
return jsonify({'headers': header, 'msg': 'Missing manager uuid'}), 400
if not training_id:
return jsonify({'headers': header, 'msg': 'Missing training id'}), 400
try:
if training_name:
db.child('Trainings').child(training_id).update({'name' : training_name})
if note:
db.child('Trainings').child(training_id).update({'note' : note})
if due_date:
db.child('Trainings').child(training_id).update({'due_date' : due_date})
if duration:
db.child('Trainings').child(training_id).update({'duration' : duration})
response = "success"
except:
response = "failure"
payload = {
'headers' : header,
'response' : response
}
return jsonify(payload)
# expect the request to have the following fields: manager_uuid, template_id, training_id
# the manager_uuid field should contain the manager's unique id which was provided to the client upon the manager logging in
# the template_id can be gotten from the endpoint /manager/new_empty_template or /manager/get_training_templates
# the training_id can be gotten from the endpoint /manager/add_training_to_training_plan, /manager/add_training_to_training_template, or /manager/get_trainee_training_plan_contents
# this method uses the template_id and training_id to remove the training
# from the template and delete it from the database (safe, since it can only be pointed to from the template)
# Returns "success" or "failure"
@app.route('/manager/remove_training_from_template', methods=['POST'])
def remove_training_from_template():
header = {'Access-Control-Allow-Origin': '*'}
#receive data from front end
try:
req = request.get_json(force=True)
except:
return jsonify({'headers': header, 'msg': 'Missing JSON'}), 400
manager_uuid = req.get('manager_uuid')
template_id = req.get('template_id')
training_id = req.get('training_id')
if not manager_uuid:
return jsonify({'headers': header, 'msg': 'Missing manager uuid'}), 400
if not template_id:
return jsonify({'headers': header, 'msg': 'Missing template id'}), 400
if not training_id:
return jsonify({'headers': header, 'msg': 'Missing training id'}), 400
try:
# remove the training entry from the Templates table
db.child('Templates').child(template_id).child('Trainings').child(training_id).remove()
# remove the training entry in the Trainings table that corresponds to the training_id
db.child('Trainings').child(training_id).remove()
response = "success"
except:
response = "failure"
payload = {
'headers' : header,
'response' : response
}
return jsonify(payload)
# expect the request to have the following fields: manager_uuid, plan_id, training_id
# the manager_uuid field should contain the manager's unique id which was provided to the client upon the manager logging in
# the plan id can be gotten from the endpoint /manager/get_trainee_training_plan_id
# the training_id can be gotten from the endpoint /manager/add_training_to_training_plan, /manager/add_training_to_training_template, or /manager/get_trainee_training_plan_contents
# this method uses the plan_id and training_id to remove the training
# from the plan and delete it from the database (safe, since it can only be pointed to from the plan)
# returns "success" or "failure"
@app.route('/manager/remove_training_from_plan', methods=['POST'])
def remove_training_from_plan():
header = {'Access-Control-Allow-Origin': '*'}
#receive data from front end
try:
req = request.get_json(force=True)
except:
return jsonify({'headers': header, 'msg': 'Missing JSON'}), 400
manager_uuid = req.get('manager_uuid')
plan_id = req.get('plan_id')
training_id = req.get('training_id')
if not manager_uuid:
return jsonify({'headers': header, 'msg': 'Missing manager uuid'}), 400
if not plan_id:
return jsonify({'headers': header, 'msg': 'Missing plan id'}), 400
if not training_id:
return jsonify({'headers': header, 'msg': 'Missing training id'}), 400
try:
# remove the training entry from the Plans table
db.child('Plans').child(plan_id).child('Trainings').child(training_id).remove()
# remove the training entry in the Trainings table that corresponds to the training_id
db.child('Trainings').child(training_id).remove()
response = "success"
except:
response = "failure"
payload = {
'headers' : header,
'response' : response
}
return jsonify(payload)
# expect the request to have the following fields: manager_uuid
# the manager_uuid field should contain the manager's unique id which was provided to the client upon the manager logging in
# this method uses manager uuid to query the managers database to get the list of events then returns the list of events
@app.route('/manager/get_manager_events', methods=['GET'])
def get_manager_events():
header = {'Access-Control-Allow-Origin': '*'}
#receive data from front end
try:
req = request.get_json(force=True)
except:
return jsonify({'headers': header, 'msg': 'Missing JSON'}), 400
manager_uuid = req.get('manager_uuid')
if not manager_uuid:
return jsonify({'headers': header, 'msg': 'Missing manager uuid'}), 400
try:
events = db.child('Managers').order_by_key().equal_to(manager_uuid).get().val()[manager_uuid]['Events']
except:
events = {}
payload = {
'headers' : header,
'events' : events
}
return jsonify(payload)
###
"""
Trainee Methods
query methods - get plan_id (done), get task ids in plan (done), view specific task in plan or template (done), view events (done)
update methods - mark task complete (done)
"""
###
# expect request to have the following fields: trainee_uuid
# the trainee_uuid field should contain the trainee's unique id which was provided to the client upon the trainee logging in
# this method uses the trainee_uuid and queries the database to get a dictionary of trainee_name : trainee_uuid pairs of the trainee's peers
# and returns a json of the dictionary of {trainee_name : trainee_uuid} pairs
@app.route('/trainee/get_peers', methods=['GET'])
def trainee_get_trainees():
header = {'Access-Control-Allow-Origin': '*'}
#receive data from front end
try:
req = request.get_json(force=True)
except:
return jsonify({'headers': header, 'msg': 'Missing JSON'}), 400
trainee_uuid = req.get('trainee_uuid')
if not trainee_uuid:
return jsonify({'headers': header, 'msg': 'Missing trainee uuid'}), 400
try:
peers = db.child('Trainees').order_by_key().equal_to(trainee_uuid).get().val()[trainee_uuid]["Team"]
except:
peers = {}
payload = {
'headers': header,
'peers' : peers
}
return jsonify(payload), 200
# expect request to have the following fields: trainee_uuid
# the trainee_uuid field should contain the trainee's unique id which was provided to the client upon the trainee logging in
# this method uses the trainee_uuid and queries the database to get a dictionary of trainee_uuid/name pairs of the trainee's trainees
# and returns a json of the dictionary of {manager_name : manager_uuid} pairs
@app.route('/trainee/get_managers', methods=['GET'])
def trainee_get_managers():
header = {'Access-Control-Allow-Origin': '*'}
#receive data from front end
try:
req = request.get_json(force=True)
except:
return jsonify({'headers': header, 'msg': 'Missing JSON'}), 400
trainee_uuid = req.get('trainee_uuid')
if not trainee_uuid:
return jsonify({'headers': header, 'msg': 'Missing trainee uuid'}), 400
try:
managers = db.child('Trainees').order_by_key().equal_to(trainee_uuid).get().val()[trainee_uuid]["Managers"]
except:
managers = {}
payload = {
'headers': header,
'managers' : managers
}
return jsonify(payload), 200
# expect the request to have the following fields: trainee_uuid
# the authorization header should contain the user's verification token received from logging in
# this method uses the trainee uuid to get and return the string of the trainee's plan_id
@app.route('/trainee/get_trainee_plan_id', methods=['GET'])
def trainee_get_plan_id():
header = {'Access-Control-Allow-Origin': '*'}
#receive data from front end
try:
req = request.get_json(force=True)
except:
return jsonify({'headers': header, 'msg': 'Missing JSON'}), 400
trainee_uuid = req.get('trainee_uuid')
if not trainee_uuid:
return jsonify({'headers': header, 'msg': 'Missing trainee uuid'}), 400
try:
plan_id = db.child('Trainees').order_by_key().equal_to(trainee_uuid).get().val()[trainee_uuid]['plan']
except:
plan_id = ""
payload = {
'headers': header,
'plan_id' : plan_id
}
return jsonify(payload)
# expect request to have the following fields: trainee_uuid, plan_id
# the trainee_uuid field should contain the trainee's unique id which was provided to the client upon the trainee logging in
# the plan_id field should contain the plan_id of the plan which should be retrieved. This value can be gotten from the /trainee/get_trainee_training_plan_id endpoint
# this method uses the plan_id and queries the Plans database to get the dictionary of training_id : training_name from the plan (both from the templates associated with
# the plan and trainings added directly to the plan). It returns a json of the dictionary of training_id : training_name.
@app.route('/trainee/get_trainee_training_plan_contents', methods=['GET'])
def trainee_get_trainee_training_plan_contents():
header = {'Access-Control-Allow-Origin': '*'}
#receive data from front end
try:
req = request.get_json(force=True)
except:
return jsonify({'headers': header, 'msg': 'Missing JSON'}), 400
trainee_uuid = req.get('trainee_uuid')
plan_id = req.get('plan_id')
if not trainee_uuid:
return jsonify({'headers': header, 'msg': 'Missing trainee uuid'}), 400
if not plan_id:
return jsonify({'headers': header, 'msg': 'Missing plan id'}), 400
# get trainings added directly to the plan first (trainings will be a dict of training_id : training_name)
try:
trainings = db.child('Plans').order_by_key().equal_to(plan_id).get().val()[plan_id]['Trainings']
except:
trainings = {}
# now get trainings from the templates (templates will be a dict of template_id : template_name)
try:
template_ids = db.child('Plans').order_by_key().equal_to(plan_id).get().val()[plan_id]['Templates']
for template_id in template_ids.keys():
# index into the templates table using the template_id and get the trainings (template_trainings will be a dict of training_id : training_name)
template_trainings = db.child('Templates').order_by_key().equal_to(template_id).get().val()[template_id]['Trainings']
# merge the trainings from this template into the trainings dict
trainings.update(template_trainings)
except:
pass
payload = {
'headers' : header,
'trainings' : trainings
}
return jsonify(payload)
# expect the request to have the following fields: trainee_uuid, training_id
# the trainee_uuid field should contain the trainee's unique id which was provided to the client upon the trainee logging in
# the training_id field should contain the training_id for which you want to get the training's contents.
# This value can be gotten from the /trainee/get_trainee_training_plan_contents endpoint.
# this method uses the training_id to query the Trainings database to get the training contents. It returns the training
# contents as a json.
@app.route('/trainee/get_training', methods=['GET'])
def trainee_get_training():
header = {'Access-Control-Allow-Origin': '*'}
#receive data from front end
try:
req = request.get_json(force=True)
except:
return jsonify({'headers': header, 'msg': 'Missing JSON'}), 400
trainee_uuid = req.get('trainee_uuid')
training_id = req.get('training_id')
if not trainee_uuid:
return jsonify({'headers': header, 'msg': 'Missing trainee uuid'}), 400
if not training_id:
return jsonify({'headers': header, 'msg': 'Missing training id'}), 400
try:
training = db.child('Trainings').order_by_key().equal_to(training_id).get().val()[training_id]
except:
training = {}
payload = {
'headers' : header,
'trainings' : training
}
return jsonify(payload)
# expect the request to have the following fields: trainee_uuid
# the trainee_uuid field should contain the trainee's unique id which was provided to the client upon the trainee logging in
# this method uses trainee uuid to query the Trainees database to get the list of events then returns the list of events
@app.route('/trainee/get_trainee_events', methods=['GET'])
def get_trainee_events():
header = {'Access-Control-Allow-Origin': '*'}
#receive data from front end
try:
req = request.get_json(force=True)
except:
return jsonify({'headers': header, 'msg': 'Missing JSON'}), 400
trainee_uuid = req.get('trainee_uuid')
if not trainee_uuid:
return jsonify({'headers': header, 'msg': 'Missing trainee uuid'}), 400
try:
events = db.child('Trainees').order_by_key().equal_to(trainee_uuid).get().val()[trainee_uuid]['Events']
except:
events = {}
payload = {
'headers' : header,
'events' : events
}
return jsonify(payload)
# expect request to have the following header: authorization
# expect the request to have the following fields: trainee_uuid, training_id
# the trainee_uuid field should contain the trainee's unique id which was provided to the client upon the trainee logging in
# the training_id field should contain the training_id that you want to mark complete. It can be gotten from the /trainee/get_trainee_training_plan_contents endpoint
# this method uses the training_id to mark the given training as complete
# returns "success" or "failure"
@app.route('/trainee/mark_task_complete', methods=['POST'])
def mark_task_complete():
header = {'Access-Control-Allow-Origin': '*'}
#receive data from front end
try:
req = request.get_json(force=True)
except:
return jsonify({'headers': header, 'msg': 'Missing JSON'}), 400
trainee_uuid = req.get('trainee_uuid')
training_id = req.get('training_id')
if not trainee_uuid:
return jsonify({'headers': header, 'msg': 'Missing trainee uuid'}), 400
if not training_id:
return jsonify({'headers': header, 'msg': 'Missing training id'}), 400
try:
db.child('Trainings').child(training_id).update({'complete' : 'true'})
response = "success"
except:
response = "failure"
payload = {
'headers' : header,
'response' : response
}
return jsonify(payload)
###
"""
Shared Methods
add methods - add new event (done)
"""
###
# expect the request to have the following fields: manager_uuid, trainee_uuid
# the manager_uuid field should contain the manager's unique id which was provided to the client upon the manager logging in
# the trainee_uuid field should contain the trainee's unique id which was provided to the client upon the trainee logging in
# the start, end, and text fields are strings provided by the user
# The manager can get their trainees' uuids and names through the endpoint /manager/get_trainees
# The trainee can get their managers' uuids and names through the endpoint /trainee/get_managers
# this method uses trainee uuid and manager uuid to add an event to both the trainee and manager's lists of events.
# It returns "success" or "failure"
@app.route('/shared/add_event_between_manager_and_trainee', methods=['POST'])
def add_event_between_manager_and_trainee():
header = {'Access-Control-Allow-Origin': '*'}
#receive data from front end
try:
req = request.get_json(force=True)
except:
return jsonify({'headers': header, 'msg': 'Missing JSON'}), 400
trainee_uuid = req.get('trainee_uuid')
trainee_name = req.get('trainee_name')
manager_uuid = req.get('manager_uuid')
manager_name = req.get('manager_name')
start = req.get('start')
end = req.get('end')
text = req.get('text')
if not trainee_uuid:
return jsonify({'headers': header, 'msg': 'Missing trainee uuid'}), 400
if not trainee_name:
return jsonify({'headers': header, 'msg': 'Missing trainee name'}), 400
if not manager_uuid:
return jsonify({'headers': header, 'msg': 'Missing manager uuid'}), 400
if not manager_name:
return jsonify({'headers': header, 'msg': 'Missing manager name'}), 400
if not start:
return jsonify({'headers': header, 'msg': 'Missing start timestamp'}), 400
if not end:
return jsonify({'headers': header, 'msg': 'Missing end timestamp'}), 400
if not text:
return jsonify({'headers': header, 'msg': 'Missing event text'}), 400
try:
event_key = db.generate_key()
# make a new manager event
manager_event_data = {
'start' : start,
'end' : end,
'text' : text,
'with' : trainee_name,
}
db.child('Managers').child(manager_uuid).child("Events").child(event_key).set(manager_event_data)
# make a new trainee event
trainee_event_data = {
'start' : start,
'end' : end,
'text' : text,
'with' : manager_name,
}
db.child('Trainees').child(trainee_uuid).child("Events").child(event_key).set(trainee_event_data)
result = "success"
except:
result = "failure"
payload = {
'headers' : header,
'result' : result
}
return jsonify(payload)
# expect the request to have the following fields: trainee_uuid1, trainee_uuid2
# the trainee_uuid1 field should contain the trainee's unique id which was provided to the client upon the trainee logging in
# The trainee can get their peers' uuids through the endpoint /trainee/get_team
# this method uses trainee_uuid1 and trainee_uuid2 to add an event to both the trainee and their peer's lists of events.
# It returns "success" or "failure"
@app.route('/shared/add_event_between_trainee_and_trainee', methods=['POST'])
def add_event_between_trainee_and_trainee():
header = {'Access-Control-Allow-Origin': '*'}
#receive data from front end
try:
req = request.get_json(force=True)
except:
return jsonify({'headers': header, 'msg': 'Missing JSON'}), 400
trainee_uuid1 = req.get('trainee_uuid1')
trainee_name1 = req.get('trainee_name1')
trainee_uuid2 = req.get('trainee_uuid2')
trainee_name2 = req.get('trainee_name2')
start = req.get('start')
end = req.get('end')
text = req.get('text')
if not trainee_uuid1:
return jsonify({'headers': header, 'msg': 'Missing trainee uuid 1'}), 400
if not trainee_name1:
return jsonify({'headers': header, 'msg': 'Missing trainee name 1'}), 400
if not trainee_uuid2:
return jsonify({'headers': header, 'msg': 'Missing trainee uuid 2'}), 400
if not trainee_name2:
return jsonify({'headers': header, 'msg': 'Missing trainee name 2'}), 400
if not start:
return jsonify({'headers': header, 'msg': 'Missing start timestamp'}), 400
if not end:
return jsonify({'headers': header, 'msg': 'Missing end timestamp'}), 400
if not text:
return jsonify({'headers': header, 'msg': 'Missing event text'}), 400
try:
event_key = db.generate_key()
# make a new event for trainee 1
trainee_event_data1 = {
'start' : start,
'end' : end,
'text' : text,
'with' : trainee_name2,
}
db.child('Trainees').child(trainee_uuid1).child("Events").child(event_key).set(trainee_event_data1)
# make a new event for trainee 1
trainee_event_data2 = {
'start' : start,
'end' : end,
'text' : text,
'with' : trainee_name1,
}
db.child('Trainees').child(trainee_uuid2).child("Events").child(event_key).set(trainee_event_data2)
result = "success"
except:
result = "failure"
payload = {
'headers' : header,
'result' : result
}
return jsonify(payload)
if __name__ == '__main__':
app.run(port=5000, debug=True) |
from .checker import check_login
from .checker import check_argument
from .output import Output
from . import sorting
from . import parsing
@check_login
def like_core(ses, url, next, string_next):
html = ses.session.get(url if not next else next).text
data = parsing.parsing_href(html, "like.php")
next = sorting.to_mbasic(parsing.parsing_href(html, string_next, one = True))
return Output(items = data, next = next, html = html, session_number = ses.session_number)
def like_post_home(ses, next = None):
return like_core(ses, "https://mbasic.facebook.com/home.php", next, "?aftercursorr=")
@check_argument(["id"])
def like_post_friend(ses, id = None, next = None):
return like_core(ses, "https://mbasic.facebook.com/{}?v=timeline".format(id), next, "?cursor")
@check_argument(["username"])
def like_post_fanspage(ses, username = None, next = None):
return like_core(ses, "https://mbasic.facebook.com/{}".format(username), next, "?sectionLoadingID=")
@check_argument(["id"])
def like_post_grup(ses, id = None, next = None):
return like_core(ses, "https://mbasic.facebook.com/groups/{}".format(id), next, "?bacr=") |
'''
@Description: In User Settings Edit
@Author: your name
@Date: 2019-09-02 20:12:34
@LastEditTime: 2019-09-22 22:43:12
@LastEditors: Please set LastEditors
'''
from __future__ import division
from __future__ import print_function
import time
import os
import argparse
import numpy as np
import math
import torch
import torch.nn.functional as F
import torch.optim as optim
import torch.nn as nn
from utils import accuracy, writeColorOFFfile, get_log,load_normal_data,load_adj_data
from models import GAE, GCNencoder, GCNdecoder,GCNcolorDecoder
from PairGraphDataset import GraphDataset, CustomDataset
# ------------------------------------------------------------------------------------------
results = 'results'
path_ae = 'fixedmodel'
if not os.path.exists(results):
os.makedirs(results)
# Device configuration
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# Training settings
parser = argparse.ArgumentParser()
parser.add_argument('--no-cuda', action='store_true', default=False,
help='Disables CUDA training.')
parser.add_argument('--fastmode', action='store_true', default=False,
help='Validate during training pass.')
parser.add_argument('--seed', type=int, default=21, help='Random seed.')
parser.add_argument('--epochs', type=int, default=1000,
help='Number of epochs to train.')
parser.add_argument('--lr', type=float, default=0.001,
help='Initial learning rate.')
parser.add_argument('--weight_decay', type=float, default=0,
help='Weight decay (L2 loss on parameters).')
parser.add_argument('--batch_size', type=int, default=1,
help='the size of a batch .')
parser.add_argument('--z', type=int, default=256,
help='Number of hidden units.')
parser.add_argument('--dropout', type=float, default=0.2,
help='Dropout rate (1 - keep probability).')
parser.add_argument('--nfeatures', type=int, default=6,
help='number of features( 3 features for each node).')
parser.add_argument('--nnodeA', type=int, default=1000,
help='number of class A nodes.')
parser.add_argument('--nnodeB', type=int, default=1000,
help='number of class B nodes.')
parser.add_argument('--datasetA', type=str, default='plane',
help='name of object of datasetA.(plane,bunny)')
parser.add_argument('--datasetB', type=str, default='bunny-up',
help='name of object of datasetB.plane,bunny')
parser.add_argument('--path_custom', type=str, default='custom',
help='the path of your custom OI data.')
parser.add_argument('--path_target_mesh', type=str, default='Data/color/bunny/test/bunny-up_lnum32_09000_Light.ply',
help='the path of target mesh for getting normal and adj.')
parser.add_argument('--attriA', type=str, default='Light',
help='name of attribute of datasetA.(Light,Color)')
parser.add_argument('--attriB', type=str, default='Color',
help='name of attribute of datasetB.(Light,Color)')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
# load model
path_A_encoder = os.path.join(path_ae,'{}-encoder.ckpt'.format(args.datasetA))
path_A_decoder = os.path.join(path_ae,'{}-decoder.ckpt'.format(args.datasetA))
path_B_encoder = os.path.join(path_ae,'{}-encoder.ckpt'.format(args.datasetB))
path_B_decoder = os.path.join(path_ae,'{}-decoder.ckpt'.format(args.datasetB))
path_A2B_generator = os.path.join(path_ae,'{}2{}-Light.ckpt'.format(args.datasetA,args.datasetB))
path_B_light2color_encoder = os.path.join(path_ae,'{}-Light2Color-encoder.ckpt'.format(args.datasetB))
path_B_light2color_decoder = os.path.join(path_ae,'{}-Light2Color-decoder.ckpt'.format(args.datasetB))
test_dataset = CustomDataset(args.path_custom,path_target_mesh)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=1,
shuffle=True)
# Model and optimizer
encoder_A = GCNencoder(nfeat=args.nfeatures,
z=args.z,
nver=args.nnodeA,
dropout=args.dropout)
decoder_A = GCNdecoder(nfeat=args.nfeatures,
z=args.z,
nver=args.nnodeA,
dropout=args.dropout)
encoder_B = GCNencoder(nfeat=args.nfeatures,
z=args.z,
nver=args.nnodeB,
dropout=args.dropout)
decoder_B = GCNdecoder(nfeat=args.nfeatures,
z=args.z,
nver=args.nnodeB,
dropout=args.dropout)
encoder_light2color = GCNencoder(nfeat=args.nfeatures,
z=args.z,
nver=args.nnodeB,
dropout=args.dropout)
decoder_light2color = GCNcolorDecoder(nfeat=args.nfeatures,
z=args.z,
nver=args.nnodeB,
dropout=args.dropout)
encoder_A.load_state_dict(torch.load(path_A_encoder))
decoder_A.load_state_dict(torch.load(path_A_decoder))
encoder_B.load_state_dict(torch.load(path_B_encoder))
decoder_B.load_state_dict(torch.load(path_B_decoder))
encoder_light2color.load_state_dict(torch.load(path_B_light2color_encoder))
decoder_light2color.load_state_dict(torch.load(path_B_light2color_decoder))
light2color_model = GAE(encoder_light2color,decoder_light2color)
for parm in encoder_A.parameters():
parm.requires_grad = False
for parm in decoder_A.parameters():
parm.requires_grad = False
for parm in encoder_B.parameters():
parm.requires_grad = False
for parm in decoder_B.parameters():
parm.requires_grad = False
for parm in light2color_model.parameters():
parm.requires_grad = False
encoder_A = nn.DataParallel(encoder_A).to(device)
decoder_A = nn.DataParallel(decoder_A).to(device)
encoder_B = nn.DataParallel(encoder_B).to(device)
decoder_B = nn.DataParallel(decoder_B).to(device)
light2color_model = nn.DataParallel(light2color_model).to(device)
encoder_A.eval()
decoder_A.eval()
encoder_B.eval()
decoder_B.eval()
light2color_model.eval()
Ga2b = nn.Sequential(
nn.Linear(args.z, 512),
nn.BatchNorm1d(512),
nn.LeakyReLU(0.2),
nn.Linear(512,1024),
nn.BatchNorm1d(1024),
nn.LeakyReLU(0.2),
nn.Linear(1024,2048),
nn.BatchNorm1d(2048),
nn.LeakyReLU(0.2),
nn.Linear(2048,1024),
nn.BatchNorm1d(1024),
nn.LeakyReLU(0.2),
nn.Linear(1024,args.z),
#nn.Tanh()
)
Ga2b.load_state_dict(torch.load(path_A2B_generator))
for parm in Ga2b.parameters():
parm.requires_grad = False
Ga2b = nn.DataParallel(Ga2b).to(device)
Ga2b.eval()
criterion_L1 = torch.nn.L1Loss()
criterion_L2 = torch.nn.MSELoss()
# Test the model
# eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance)
with torch.no_grad():
for i, (light, normal, adj, target_normal, target_adj, fname) in enumerate(test_loader):
light = light.float().to(device)
normal = normal.float().to(device)
adj = adj.float().to(device)
target_normal = target_normal.float().to(device)
target_adj = target_adj.float().to(device)
light = light.view(-1, 1, args.nnodeA, int(args.nfeatures/2))
normal = normal.view(-1, 1, args.nnodeA, int(args.nfeatures/2))
adj = adj.view(-1, 1, args.nnodeA, args.nnodeA)
target_normal = target_normal.view(-1, 1, args.nnodeB, int(args.nfeatures/2))
target_adj = target_adj.view(-1, 1, args.nnodeB, args.nnodeB)
# Forward pass
predited_bunny_light = decoder_B(Ga2b(encoder_A(light, normal,adj)),target_adj)
predited_bunny_light = predited_bunny_light.view(-1, 1,args.nnodeB, int(args.nfeatures/2))
predited_bunny_color = light2color_model(predited_bunny_light,target_normal,target_adj,target_normal,target_adj)
predited_bunny_color = predited_bunny_color[0] * 255
predited_bunny_color = predited_bunny_color.view(args.nnodeB, 1)
pointlist = predited_bunny_color.cpu().numpy().tolist()
fname_B = str(fname[0]).replace(args.datasetA,args.datasetB)
fname_B = fname_B.replace(args.attriA,args.attriB)
writeColorOFFfile(os.path.join(
results, fname_B), pointlist, args.path_target_mesh)
|
from django.urls import path
from django.conf.urls import url
from . import views
urlpatterns = [
path('', views.index, name='carparking'),
url(
r"^dashboard/$",views.dashboard,name="dashboard"
),
url(
r"^get-list/$",views.get_all_slots,name="all_slots"
)
]
|
# Generated by Django 3.2.6 on 2021-08-15 11:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('content', '0004_dictirbution_is_send'),
]
operations = [
migrations.CreateModel(
name='Activity',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('activity_type', models.CharField(max_length=200, verbose_name='ะขะธะฟ ะฐะบัะธะฒะฝะพััะธ')),
('element_unique_id', models.CharField(max_length=200, verbose_name='ะะนะดะธ ัะปะตะผะตะฝั ะฐะบัะธะฒะฝะพััะธ')),
('time', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name': 'ะะบัะธะฒะฝะพััั',
'verbose_name_plural': 'ะะบัะธะฒะฝะพััะธ',
},
),
migrations.AlterField(
model_name='dictirbution',
name='send_time',
field=models.DateTimeField(verbose_name='ะัะตะผั ะพัะฟัะฐะฒะบะธ'),
),
]
|
"""
Computes specific humidity using surface pressure, air temperature and dew point
https://github.com/Unidata/MetPy/issues/791#issuecomment-377501593
Modified to fit the task specifics
"""
import numpy as np
def spec_humidity(pressure, temp, dew, latent='water'):
"""Calculates SH automatically from the dewpt. Returns in kg/kg"""
# Declaring constants
e0 = 611.3 # saturation vapor pressure in Pa
# e0 and Pressure have to be in same units
c_water = 5423 # L/R for water in Kelvin
c_ice = 6139 # L/R for ice in Kelvin
t0 = 273.15 # Kelvin
if latent == 'water' or latent == 'Water':
c = c_water # using c for water
else:
c = c_ice # using c_ice for ice, clear state
# saturation vapor not required, uncomment to calculate it (units in hPa becuase of e0)
# sat_vapor = self.e0 * np.exp((self.c * (self.temp -self.t0))/(self.temp * self.t0))
# calculating specific humidity, q directly from dew point temperature
# using equation 4.24, Pg 96 Practical Meteorolgy (Roland Stull)
q = (622 * e0 * np.exp(c * (dew - t0) / (dew * t0))) / pressure # g/kg
# 622 is the ratio of Rd/Rv in g/kg
return q / 1000 # kg/kg
|
import os, pp
def hello_world(value):
return "Hello world from hostname [%s] with pid [%d] and number [%d]" % (os.uname()[1], os.getpid(), value)
node_list = ('192.168.25.20',)
job_server = pp.Server(ppservers=node_list)
result_dict = {}
for i in xrange(10):
result_dict[i] = job_server.submit(hello_world, args=(i,))
for key, value in result_dict.items():
print "key [%d] => [%s]" % (key, value())
|
from turtle import *
import turtle
import random
import math
import time
from ball import Ball
turtle.colormode(255)
turtle.tracer(0)
turtle.hideturtle()
RUNNING=True
SLEEP=0.0065
SCREEN_WIDTH = turtle.getcanvas().winfo_width()/2
SCREEN_WIDTH_MINUS=-turtle.getcanvas().winfo_width()/2
SCREEN_HEIGHT = turtle.getcanvas().winfo_height()/2
SCREEN_HEIGHT_MINUS = -turtle.getcanvas().winfo_height()/2
MY_BALL=Ball(100,100,2,1,40,"green")
original_radius=MY_BALL.radius
NUMBER_OF_BALLS=5
MINIMUM_BALL_RADIUS =10
MAXIMUM_BALL_RADIUS =50
MINIMUM_BALL_DX =-5
MAXIMUM_BALL_DX =5
MINIMUM_BALL_DY =-5
MAXIMUM_BALL_DY =5
FIRST=int(SCREEN_WIDTH_MINUS +MAXIMUM_BALL_RADIUS)
SECOND=int(SCREEN_WIDTH - MAXIMUM_BALL_RADIUS)
BALLS=[]
for i in range(NUMBER_OF_BALLS):
x=random.randint(FIRST,SECOND)
y=random.randint(-SCREEN_HEIGHT + MAXIMUM_BALL_RADIUS , SCREEN_HEIGHT - MAXIMUM_BALL_RADIUS)
dx=random.randint(MINIMUM_BALL_DX, MAXIMUM_BALL_DX)
while dx==0:
dx=random.randint(MINIMUM_BALL_DX, MAXIMUM_BALL_DX)
dy=random.randint(MINIMUM_BALL_DY,MAXIMUM_BALL_DY)
while dy==0:
dy=random.randint(MINIMUM_BALL_DY,MAXIMUM_BALL_DY)
radius=random.randint(MINIMUM_BALL_RADIUS,MAXIMUM_BALL_RADIUS)
r=random.randint(0,225)
g=random.randint(0,225)
b=random.randint(0,225)
color=(r,g,b)
balll=Ball(x,y,dx,dy,radius,color)
BALLS.append(balll)
def move_all_balls():
for i in BALLS:
i.move(SCREEN_WIDTH,SCREEN_HEIGHT)
def collide(ball_a,ball_b):
if ball_a==ball_b:
return False
DISTANCE_BETWEEN_CORES=math.sqrt((ball_a.xcor()-ball_b.xcor())**2+(ball_a.ycor()-ball_b.ycor())**2)
if DISTANCE_BETWEEN_CORES+10<=ball_a.radius+ball_b.radius:
return True
else:
return False
def check_all_balls_collision():
for ball_a in BALLS:
for ball_b in BALLS:
if collide(ball_a,ball_b)==True:
radius_a=ball_a.radius
radius_b=ball_b.radius
x_coordinate=random.randint(FIRST,SECOND)
y_coordinate=random.randint(-SCREEN_HEIGHT + MAXIMUM_BALL_RADIUS , SCREEN_HEIGHT - MAXIMUM_BALL_RADIUS)
x_axis_speed=random.randint(MINIMUM_BALL_DX, MAXIMUM_BALL_DX)
while x_axis_speed==0:
x_axis_speed=random.randint(MINIMUM_BALL_DX, MAXIMUM_BALL_DX)
y_axis_speed=random.randint(MINIMUM_BALL_DY,MAXIMUM_BALL_DY)
while y_axis_speed==0:
y_axis_speed=random.randint(MINIMUM_BALL_DY,MAXIMUM_BALL_DY)
radius=random.randint(MINIMUM_BALL_RADIUS,MAXIMUM_BALL_RADIUS)
r=random.randint(0,225)
g=random.randint(0,225)
b=random.randint(0,225)
color=(r,g,b)
if radius_a<radius_b:
ball_a.goto(x_coordinate,y_coordinate)
ball_a.dx=x_axis_speed
ball_a.dy=y_axis_speed
ball_a.radius=radius
ball_a.shapesize(radius/10)
ball_a.color(color)
ball_b.radius=ball_b.radius+1
ball_b.shapesize(ball_b.radius/10)
else:
ball_b.goto(x_coordinate,y_coordinate)
ball_b.dx=x_axis_speed
ball_b.dy=y_axis_speed
ball_b.radius=radius
ball_b.shapesize(radius/10)
ball_b.color(color)
ball_a.radius=ball_a.radius+1
ball_a.shapesize(ball_a.radius/10)
def check_myball_collision():
for i in BALLS:
if collide(i,MY_BALL)==True:
if MY_BALL.radius>i.radius:
x_coordinate=random.randint(FIRST,SECOND)
y_coordinate=random.randint(-SCREEN_HEIGHT + MAXIMUM_BALL_RADIUS , SCREEN_HEIGHT - MAXIMUM_BALL_RADIUS)
x_axis_speed=random.randint(MINIMUM_BALL_DX, MAXIMUM_BALL_DX)
while x_axis_speed==0:
x_axis_speed=random.randint(MINIMUM_BALL_DX, MAXIMUM_BALL_DX)
y_axis_speed=random.randint(MINIMUM_BALL_DY,MAXIMUM_BALL_DY)
while y_axis_speed==0:
y_axis_speed=random.randint(MINIMUM_BALL_DY,MAXIMUM_BALL_DY)
radius=random.randint(MINIMUM_BALL_RADIUS,MAXIMUM_BALL_RADIUS)
r=random.randint(0,225)
g=random.randint(0,225)
b=random.randint(0,225)
color=(r,g,b)
i.goto(x_coordinate,y_coordinate)
i.dx=x_axis_speed
i.dy=y_axis_speed
i.radius=radius
i.shapesize(radius/10)
i.color(color)
MY_BALL.radius=MY_BALL.radius+1
MY_BALL.shapesize(MY_BALL.radius/10)
print("yummy")
return True
if MY_BALL.radius<i.radius:
x_coordinate=random.randint(FIRST,SECOND)
y_coordinate=random.randint(-SCREEN_HEIGHT + MAXIMUM_BALL_RADIUS , SCREEN_HEIGHT - MAXIMUM_BALL_RADIUS)
x_axis_speed=random.randint(MINIMUM_BALL_DX, MAXIMUM_BALL_DX)
while x_axis_speed==0:
x_axis_speed=random.randint(MINIMUM_BALL_DX, MAXIMUM_BALL_DX)
y_axis_speed=random.randint(MINIMUM_BALL_DY,MAXIMUM_BALL_DY)
while y_axis_speed==0:
y_axis_speed=random.randint(MINIMUM_BALL_DY,MAXIMUM_BALL_DY)
radius=random.randint(MINIMUM_BALL_RADIUS,MAXIMUM_BALL_RADIUS)
r=random.randint(0,225)
g=random.randint(0,225)
b=random.randint(0,225)
color=(r,g,b)
i.goto(x_coordinate,y_coordinate)
i.dx=x_axis_speed
i.dy=y_axis_speed
i.radius=radius
i.shapesize(radius/10)
i.color(color)
return False
def movearound(event):
Xcoordinate=event.x-SCREEN_WIDTH
Ycoordinate=SCREEN_HEIGHT - event.y
MY_BALL.goto(Xcoordinate,Ycoordinate)
getcanvas().bind("<Motion>", movearound)
listen()
while RUNNING==True:
if SCREEN_WIDTH != getcanvas().winfo_width()/2 or SCREEN_HEIGHT!= getcanvas().winfo_height()/2:
SCREEN_WIDTH = getcanvas().winfo_width()/2
SCREEN_HEIGHT = getcanvas().winfo_height()/2
move_all_balls()
check_all_balls_collision()
getscreen().update()
time.sleep(SLEEP)
if check_myball_collision() == False:
turtle.goto(0,0)
turtle.write("you suck, GAME OVER!",align="center",font=("Arial", 50, "normal"))
time.sleep(SLEEP)
turtle.clear()
check_myball_collision()
turtle.mainloop() |
a,c = map(int, input().split())
print(c*2 - a)
|
"""Simple server application"""
import socket
# server host (local machine)
host = 'localhost'
# machine port
port = 7000
# CONNECTION tuple
conn = (host, port)
# number of requests before need to thread...
backlog = 5
# max length of data buffer (bytes)
size = 1024
# create a socket object. AF_INET = ipv4 addressing SOCK_STREAM = transport protocol (tcp)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# force server to release socket immediately if killed
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# bind the socket to connection
s.bind(conn)
# listen for events on the socket
s.listen(backlog)
# poll for incoming connections then send response
while 1:
print 'Server ready, willing and able!'
# waits for connection then returns a tuple representing socket connection object
client, address = s.accept()
# reads in the payload sent by a client
fd = client.recv(size)
se = open(fd, 'r')
data = se.read()
if data:
print 'Client at ', address[0], 'asked for ==> ', data
# send a message back to the client
client.send(data)
client.close() # close the connection with client
|
trans=['bicycle','motobike','car','carb','trank']
message="I would like to own a ";
print(message+trans[0])
print(message+trans[1])
print(message+trans[2])
print(message+trans[-2])
print(message+trans[-1])
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 11 20:46:28 2017
@author: Administrator
"""
import numpy as np
from matplotlib import pyplot as plt
from scipy.stats import gmean
#ไพ็จ1 ็ๆๆญฃๅผฆๆณข
fs = 20000.0
N = 2000
freq = 1000
freq2 = 100
time = np.linspace(0,float(N)/fs,N)
sine = 0.264*np.sin(2*np.pi*time*freq) #ๆญฃๅผฆๆณข
plt.plot(sine)
plt.show()
plt.plot(sine[:100]) #ๆฅ็ไฟกๅทๆฎต
plt.show()
#ๅฝไธๅ
def normalization(sine):
array = np.array(sine)
arrayMax = -np.sort(-array, axis=0)
maxNum = arrayMax[0]
arrayMin = np.sort(array,axis = 0)
minNum = arrayMin[0]
plt.plot((array-minNum)/(maxNum-minNum))
newArray = (array-minNum)/(maxNum-minNum)
return newArray
def SFM(psd):
psd = psd * (10^6)
arithmetic = np.average(psd) # ็ฎๆฐๅนณๅๅผ
geometric = gmean(psd) # ๅ ไฝๅนณๅๆฐ
# print gmean([1,2])
SFM = geometric/arithmetic
return SFM
def PSD(signal):
psd = plt.psd(signal) #็ฒพๅบฆ็ฒพ็กฎๅฐๅฐๆฐ็นๅ3ไฝ
# psd[0] ็บตๅๆ psd[1] ๆจชๅๆ
print psd[0]
PSD(sine)
|
import psycopg2
from lab4.fields.EmailField import EmailField
from lab4.fields.IdField import IdField
from lab4.fields.MessageField import MessageField
from lab4.fields.ThemeField import ThemeField
db = psycopg2.connect("dbname=db user=postgres password=root host=127.0.0.1 port=5432")
class Mail:
def findById(self, id):
try:
cur = db.cursor()
query = 'SELECT "id","to","from","theme","message" FROM mail WHERE id=%s LIMIT 1'
cur.execute(query, [str(id)])
res = cur.fetchone()
cur.close()
if res:
self.id = IdField(res[0])
self.to = EmailField(res[1])
self.from_ = EmailField(res[2])
self.theme = ThemeField(res[3])
self.message = MessageField(res[4])
except:
raise
def findByOffset(self, offset):
try:
cur = db.cursor()
query = 'SELECT "id","to","from","theme","message" FROM mail LIMIT 1 OFFSET %s'
cur.execute(query, [str(offset)])
res = cur.fetchone()
cur.close()
if res:
self.id = IdField(res[0])
self.to = EmailField(res[1])
self.from_ = EmailField(res[2])
self.theme = ThemeField(res[3])
self.message = MessageField(res[4])
return 1
return 0
except:
raise
def save(self):
try:
cur = db.cursor()
if hasattr(self, 'id'):
query = 'UPDATE "mail" SET "theme"=%s,"to"=%s,"from"=%s,"message"=%s WHERE "id"=%s'
cur.execute(query, [str(self.theme), str(self.to), str(self.from_), str(self.message), str(self.id)])
else:
query = 'INSERT INTO "mail" ("theme", "to", "from", "message") VALUES (%s, %s, %s, %s)'
cur.execute(query, [str(self.theme), str(self.to), str(self.from_), str(self.message)])
db.commit()
cur.close()
except:
raise
@staticmethod
def count():
try:
cur = db.cursor()
cur.execute("SELECT last_value FROM mail_id")
res = cur.fetchone()[0]
cur.close()
return res
except:
raise
###########################
if __name__ == "__main__":
try:
# myMail = Mail()
# myMail.from_ = EmailField("test@qwe.ru")
# myMail.to = EmailField("asd@qwe.ru")
# myMail.theme = ThemeField("hello")
# myMail.message = MessageField("Test message!")
# myMail.save()
m = Mail()
if m.findByOffset(0):
print(m.id)
print(m.from_)
print(m.to)
print(m.theme)
print(m.message)
# m.theme = ThemeField("^__^")
# m.save()
except:
print("error") |
# Copyright 2021 DAI Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import TypeVar, Dict
class Base(ABC):
"""
Semantics Base class.
All semantics should have `code_hash: str` and `contract_semantics: dict` class attribute.
Inherit this class if you want to implement new semantic.
"""
@property
@abstractmethod
def contract_semantics(self) -> str:
...
@property
@abstractmethod
def code_hash(self) -> dict:
...
BaseType = TypeVar(
"BaseType", bound=Dict[Base.code_hash.fget, Base.contract_semantics.fget]
)
|
import sys
import numpy as np
import tensorflow as tf
import udc_model
import udc_hparams
from models.dual_encoder_gru import dual_encoder_model
import csv
tf.flags.DEFINE_string("model_dir", "./runs/GRU", "Directory to load model checkpoints from")
tf.flags.DEFINE_string("vocab_processor_file", "/Users/ektasorathia/Documents/CMPE295B/Final/qa-rest-server/qamodel/runs/GRU/vocab_processor.bin", "Saved vocabulary processor file")
FLAGS = tf.flags.FLAGS
outdir="/Users/ektasorathia/Documents/CMPE295B/udc_train"
if not FLAGS.model_dir:
print("You must specify a model directory")
sys.exit(1)
def tokenizer_fn(iterator):
return (x.split(" ") for x in iterator)
# Load vocabulary
#vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(
# 160,
# min_frequency=5,
# tokenizer_fn=tokenizer_fn)
vp = tf.contrib.learn.preprocessing.VocabularyProcessor.restore(
FLAGS.vocab_processor_file)
QUESTION="""buy 4k laptop would like run ubuntu 1510 believe nt big problem sure adjust text unity ui
right thing use virtualbox quite often running ubuntu server believe run problem yet set text within guest
operating system small barely readable right way scaling guest o view like instance scale factor time 2 possible
run application adapt 4k kind magnifying lens like instance running virtualbox doublesize zoom"""
#ANS=try man telinit however runlevels ubuntu code description 0 halt 1 singleuser mode 2 graphical
# multiuser networking 35 unused configured runlevel 2 6 reboot
def get_features(context, utterance):
context_matrix = np.array(list(vp.transform([context])))
utterance_matrix = np.array(list(vp.transform([utterance])))
context_len = len(context.split(" "))
utterance_len = len(utterance.split(" "))
features = {
"context": tf.convert_to_tensor(context_matrix, dtype=tf.int64),
"context_len": tf.constant(context_len, shape=[1,1], dtype=tf.int64),
"utterance": tf.convert_to_tensor(utterance_matrix, dtype=tf.int64),
"utterance_len": tf.constant(utterance_len, shape=[1,1], dtype=tf.int64),
}
return features, None
ans_dict={}
def initialize():
hparams = udc_hparams.create_hparams()
model_fn = udc_model.create_model_fn(hparams, model_impl=dual_encoder_model)
estimator = tf.contrib.learn.Estimator(model_fn=model_fn, model_dir=FLAGS.model_dir)
estimator._targets_info = tf.contrib.learn.estimators.tensor_signature.TensorSignature(tf.constant(0, shape=[1, 1]))
return estimator
def get_probability(context,response,estimator):
prob = estimator.predict(input_fn=lambda: get_features(context, response))
probability = next(prob)[0]
return probability
def read_answers():
f = open("/Users/ektasorathia/Documents/CMPE295B/udc_train/chatbot-retrieval/answer_clipped.csv", 'rb')
reader = csv.reader(f)
for row in reader:
get_probability(QUESTION,row[0])
if __name__ == "__main__":
read_answers() |
from itertools import permutations
def get_words(hash_of_letters):
return sorted(''.join(b) for b in set(permutations(''.join(
k * a for k, v in hash_of_letters.iteritems() for a in v))))
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mayor.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
from views.mayor.showStats import Ui_showStatus
from DB import scheduleTable
class Ui_MainMayor(object):
def setupUi(self, MainMayor):
MainMayor.setObjectName("MainMayor")
MainMayor.resize(800, 600)
font = QtGui.QFont()
font.setPointSize(9)
MainMayor.setFont(font)
self.centralwidget = QtWidgets.QWidget(MainMayor)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(350, 10, 101, 41))
font = QtGui.QFont()
font.setPointSize(16)
font.setUnderline(True)
self.label.setFont(font)
self.label.setObjectName("label")
self.tableWidget = QtWidgets.QTableWidget(self.centralwidget)
self.tableWidget.setGeometry(QtCore.QRect(20, 90, 731, 451))
self.tableWidget.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(5)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
item.setBackground(QtGui.QColor(194, 194, 194))
self.tableWidget.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
item.setBackground(QtGui.QColor(194, 194, 194))
self.tableWidget.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
item.setBackground(QtGui.QColor(194, 194, 194))
self.tableWidget.setHorizontalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
item.setBackground(QtGui.QColor(194, 194, 194))
self.tableWidget.setHorizontalHeaderItem(4, item)
header = self.tableWidget.horizontalHeader()
header.setSectionResizeMode(0, QtWidgets.QHeaderView.ResizeToContents)
header.setSectionResizeMode(4,QtWidgets.QHeaderView.ResizeToContents)
self.tableWidget.horizontalHeader().setStretchLastSection(True)
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(20, 50, 191, 21))
font = QtGui.QFont()
font.setPointSize(11)
font.setUnderline(True)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
MainMayor.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainMayor)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 26))
self.menubar.setObjectName("menubar")
self.menuMain = QtWidgets.QMenu(self.menubar)
self.menuMain.setObjectName("menuMain")
self.menuAbout = QtWidgets.QMenu(self.menubar)
self.menuAbout.setObjectName("menuAbout")
MainMayor.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainMayor)
self.statusbar.setObjectName("statusbar")
MainMayor.setStatusBar(self.statusbar)
self.actionDevelopers = QtWidgets.QAction(MainMayor)
self.actionDevelopers.setObjectName("actionDevelopers")
self.menuAbout.addAction(self.actionDevelopers)
self.menubar.addAction(self.menuMain.menuAction())
self.menubar.addAction(self.menuAbout.menuAction())
self.retranslateUi(MainMayor)
QtCore.QMetaObject.connectSlotsByName(MainMayor)
def retranslateUi(self, MainMayor):
_translate = QtCore.QCoreApplication.translate
MainMayor.setWindowTitle(_translate("MainMayor", "mayor main"))
self.label.setText(_translate("MainMayor", "Mayor"))
item = self.tableWidget.horizontalHeaderItem(0)
item.setText(_translate("MainMayor", "ComplaintId"))
item = self.tableWidget.horizontalHeaderItem(1)
item.setText(_translate("MainMayor", "RoadLoc"))
item = self.tableWidget.horizontalHeaderItem(2)
item.setText(_translate("MainMayor", "Start Loc"))
item = self.tableWidget.horizontalHeaderItem(3)
item.setText(_translate("MainMayor", "End Loc"))
item = self.tableWidget.horizontalHeaderItem(4)
item.setText(_translate("MainMayor", "statistics"))
__sortingEnabled = self.tableWidget.isSortingEnabled()
self.tableWidget.setSortingEnabled(False)
self.updateTable(MainMayor)
self.tableWidget.setSortingEnabled(__sortingEnabled)
self.label_2.setText(_translate("MainMayor", "Road Repair Schedule:"))
self.menuMain.setTitle(_translate("MainMayor", "Main"))
self.menuAbout.setTitle(_translate("MainMayor", "About"))
self.actionDevelopers.setText(_translate("MainMayor", "Developers"))
def updateTable(self,win):
_translate = QtCore.QCoreApplication.translate
self.schedule = scheduleTable.getSchedule()
self.tableWidget.setRowCount(len(self.schedule))
getitems = (0,1,2,3)
for i,t in enumerate(self.schedule):
item = QtWidgets.QTableWidgetItem()
item.setText(str(i+1))
self.tableWidget.setVerticalHeaderItem(i, item)
for j,idx in enumerate(getitems):
item = QtWidgets.QTableWidgetItem()
item.setText(_translate("MainSupervisor", str(t[idx])))
self.tableWidget.setItem(i, j, item)
item = QtWidgets.QPushButton()
item.setText('show stats of id: ' + str(t[0]))
item.clicked.connect(lambda : self.showStats(win,t[0],t[9]))
self.tableWidget.setCellWidget(i, 4, item)
def showStats(self,win,id,stats):
showStatus = QtWidgets.QDialog(win)
ui = Ui_showStatus(id,stats)
ui.setupUi(showStatus)
showStatus.show()
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainMayor = QtWidgets.QMainWindow()
ui = Ui_MainMayor()
ui.setupUi(MainMayor)
MainMayor.show()
sys.exit(app.exec_())
|
import numpy as np
import pandas as pd
pd.set_option('display.max_columns', 500)
from tqdm import tqdm_notebook as tqdm
from IPython.display import display
import os
import sys
sys.path.append('/content/drive/My Drive/ไธญ็ ้ข/repo/')
from falldetect.utilities import *
from falldetect.models import *
from falldetect.dataset_util import *
from falldetect.training_util import *
import time
import datetime
from datetime import datetime
import json
# %matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import pyplot
matplotlib.rc( 'savefig', facecolor = 'white' )
# matplotlib.rc( 'savefig', transparent=True )
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import torch
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
import torch.nn.functional as F
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
dpi = 80
# def baseline_learning_diagnosis(num_epochs, train_performance_dict_list, val_src_performance_dict_list, val_tgt_performance_dict_list, PAD_list, i_CV, outputdir):
# train_performance_epochs = pd.DataFrame(train_performance_dict_list)
# val_src_performance_epochs = pd.DataFrame(val_src_performance_dict_list)
# val_tgt_performance_epochs = pd.DataFrame(val_tgt_performance_dict_list)
# # metric_list = ['loss', 'acc', 'sensitivity', 'precision', 'F1']
# metric_list = ['loss', 'acc', 'sensitivity', 'precision', 'F1', 'PAD']
# if not os.path.exists(outputdir):
# os.makedirs(outputdir)
# print('outputdir for baseline_learning_diagnosis output:', outputdir)
# fig = plt.figure(figsize=(5*len(metric_list), 3), dpi=dpi)
# for i, metric_name in enumerate(metric_list):
# ax1 = fig.add_subplot(1, len(metric_list), i+1)
# ax1.set_title('{}_epochs'.format(metric_name))
# ax1.set_xlabel('epoch')
# if metric_name=='PAD':
# ax1.plot(np.arange(num_epochs), PAD_list, color='blue', label='PAD_val')
# else:
# ax1.plot(np.arange(num_epochs), train_performance_epochs['{}'.format(metric_name)].values, color='blue', label='train')
# ax1.plot(np.arange(num_epochs), val_src_performance_epochs['src_{}'.format(metric_name)].values, color='red', label='val_src')
# ax1.plot(np.arange(num_epochs), val_tgt_performance_epochs['tgt_{}'.format(metric_name)].values, color='green', label='val_tgt')
# ax1.legend(loc="upper right")
# # plt.show()
# fig.savefig(outputdir+'learning_curve_CV{}'.format(i_CV))
# def dann_learning_diagnosis(num_epochs, train_performance_dict_list, val_performance_dict_list, PAD_list, i_CV, outputdir):
def dann_learning_diagnosis(num_epochs, train_performance_dict_list, val_performance_dict_list, PAD_list, i_CV, epoch_optimal, outputdir):
train_performance_epochs = pd.DataFrame(train_performance_dict_list)
val_performance_epochs = pd.DataFrame(val_performance_dict_list)
if not os.path.exists(outputdir):
os.makedirs(outputdir)
print('outputdir for dann_learning_diagnosis output:', outputdir)
# metric_list = ['src_class_loss', 'src_class_acc', 'tgt_class_acc', 'tgt_sensitivity', 'tgt_precision', 'tgt_F1', 'domain_acc']
# metric_list = ['src_class_loss', 'src_acc', 'tgt_acc', 'tgt_sensitivity', 'tgt_precision', 'tgt_F1', 'domain_acc', 'PAD']
metric_list = ['total_loss', 'class_loss', 'domain_loss', 'acc', 'sensitivity', 'precision', 'F1', 'PAD']
fig = plt.figure(figsize=(5*len(metric_list), 3), dpi=dpi)
for i, metric_name in enumerate(metric_list):
ax1 = fig.add_subplot(1, len(metric_list), i+1)
ax1.set_title('{}_epochs'.format(metric_name))
ax1.set_xlabel('epoch')
if metric_name=='PAD':
ax1.plot(np.arange(num_epochs), PAD_list, color='blue', label='PAD_val')
elif metric_name=='total_loss':
ax1.plot(np.arange(num_epochs), train_performance_epochs['{}'.format(metric_name)].values, color='blue', label='train')
ax1.plot(np.arange(num_epochs), val_performance_epochs['{}'.format(metric_name)].values, color='red', label='val')
else:
ax1.plot(np.arange(num_epochs), train_performance_epochs['src_{}'.format(metric_name)].values, color='blue', label='train')
ax1.plot(np.arange(num_epochs), val_performance_epochs['src_{}'.format(metric_name)].values, color='red', label='val_src')
ax1.plot(np.arange(num_epochs), val_performance_epochs['tgt_{}'.format(metric_name)].values, color='green', label='val_tgt')
ax1.axvline(epoch_optimal, linestyle='--', color='gray', alpha=0.7, label='checkpoint')
ax1.legend(loc="upper right")
fig.savefig(outputdir+'learning_curve_CV{}'.format(i_CV))
pyplot.close(fig)
def model_output_diagnosis(model, src_loader, tgt_loader, device, fig, col_name, ax_idx):
model.eval()
src_data = src_loader.dataset.data
src_labels = src_loader.dataset.labels
src_DataNameList_idx = src_loader.dataset.DataNameList_idx
tgt_data = tgt_loader.dataset.data
tgt_labels = tgt_loader.dataset.labels
tgt_DataNameList_idx = tgt_loader.dataset.DataNameList_idx
src_data = src_data.to(device)
src_labels = src_labels.to(device).long()
tgt_data = tgt_data.to(device)
tgt_labels = tgt_labels.to(device).long()
src_domain_labels = torch.zeros(src_data.size()[0]).to(device).long()
tgt_domain_labels = torch.ones(tgt_data.size()[0]).to(device).long()
src_feature, src_class_out, src_domain_out = model(src_data)
tgt_feature, tgt_class_out, tgt_domain_out = model(tgt_data)
# make prediction based on logits output class_out
src_class_sigmoid = torch.sigmoid(src_class_out).data.detach().cpu().numpy()
src_class_pred = np.argmax(src_class_sigmoid, 1)
tgt_class_sigmoid = torch.sigmoid(tgt_class_out).data.detach().cpu().numpy()
tgt_class_pred = np.argmax(tgt_class_sigmoid, 1)
# make prediction based on logits output domain_out
src_domain_sigmoid = torch.sigmoid(src_domain_out).data.detach().cpu().numpy()
src_domain_pred = np.argmax(src_domain_sigmoid, 1)
tgt_domain_sigmoid = torch.sigmoid(tgt_domain_out).data.detach().cpu().numpy()
tgt_domain_pred = np.argmax(tgt_domain_sigmoid, 1)
data_size = src_class_pred.shape[0]
# (src_class_pred==src_labels.data.detach().cpu().numpy()).sum()/data_size
src_domain_labels = np.zeros(src_domain_pred.shape[0])
tgt_domain_labels = np.ones(tgt_domain_pred.shape[0])
src_class_acc = (src_class_pred==src_labels.data.detach().cpu().numpy()).sum()/data_size
src_domain_acc = (src_domain_pred==src_domain_labels).sum()/data_size
tgt_class_acc = (tgt_class_pred==tgt_labels.data.detach().cpu().numpy()).sum()/data_size
tgt_domain_acc = (tgt_domain_pred==tgt_domain_labels).sum()/data_size
# print('acc performance:', src_class_acc, src_domain_acc, tgt_class_acc, tgt_domain_acc)
ax1 = fig.add_subplot(4, 2, ax_idx[0])
ax1.plot(src_class_sigmoid[:,1],'.b', label='src_class_sigmoid', markersize=3)
ax1.plot(src_class_sigmoid[:,1].round(),'b', alpha=0.5, label='src_class_decision')
ax1.plot(src_labels.data.detach().cpu().numpy(),'r', alpha=0.5, label='src_class_labels')
# ax1.set_title('src_class_sigmoid (adl=0, fall=1)')
ax1.legend(loc='upper right')
ax1.set_title(col_name, fontsize=20)
# ax1.set_ylabel('src_class_sigmoid (adl=0, fall=1)', rotation=0, size='large')
ax2 = fig.add_subplot(4, 2, ax_idx[1])
ax2.plot(src_domain_sigmoid[:,0],'.b', label='src_domain_sigmoid', markersize=3)
ax2.plot(src_domain_labels,'r', alpha=0.5, label='src_domain_labels')
# ax2.set_title('src_domain_sigmoid (src=0, tgt=1)')
ax2.legend(loc='upper right')
ax3 = fig.add_subplot(4, 2, ax_idx[2])
ax3.plot(tgt_class_sigmoid[:,1],'.b', label='tgt_class_sigmoid', markersize=3)
ax3.plot(tgt_class_sigmoid[:,1].round(),'b', alpha=0.5, label='tgt_class_decision')
ax3.plot(tgt_labels.data.detach().cpu().numpy(),'r', alpha=0.5, label='tgt_class_labels')
# ax3.set_title('tgt_class_sigmoid (adl=0, fall=1)')
ax3.legend(loc='upper right')
ax4 = fig.add_subplot(4, 2, ax_idx[3])
ax4.plot(tgt_domain_sigmoid[:,0],'.b', label='tgt_domain_sigmoid', markersize=3)
ax4.plot(tgt_domain_labels,'r', alpha=0.5, label='tgt_domain_labels')
# ax4.set_title('tgt_domain_sigmoid (src=0, tgt=1)')
ax4.legend(loc='upper right')
return np.stack((src_class_sigmoid[:,1], src_DataNameList_idx), axis=1), np.stack((tgt_class_sigmoid[:,1], tgt_DataNameList_idx), axis=1)
# return src_class_sigmoid[:,1], tgt_class_sigmoid[:,1]
def model_output_diagnosis_trainval(model, src_train_loader, tgt_train_loader, src_val_loader, tgt_val_loader, device, plt_title, i_CV, outputdir):
model.eval()
if not os.path.exists(outputdir):
os.makedirs(outputdir)
print('outputdir for model_output_diagnosis_trainval output:', outputdir)
fig = plt.figure(figsize=(10, 10), dpi=dpi)
_, _ = model_output_diagnosis(model, src_train_loader, tgt_train_loader, device, fig, 'train'+plt_title, ax_idx=[1,3,5,7])
src_class_sigmoid, tgt_class_sigmoid = model_output_diagnosis(model, src_val_loader, tgt_val_loader, device, fig, 'val'+plt_title, ax_idx=[2,4,6,8])
ax_list = fig.axes
ax_list[0].set_ylabel('src_class', size='large')
ax_list[1].set_ylabel('src_domain', size='large')
ax_list[2].set_ylabel('tgt_class', size='large')
ax_list[3].set_ylabel('tgt_domain', size='large')
fig.tight_layout()
plt.show()
fig.savefig(outputdir+'class_out_diagnosis_CV{}{}'.format(i_CV, plt_title))
pyplot.close(fig)
data_saver(src_class_sigmoid, 'src_class_sigmoid_CV{}'.format(i_CV), outputdir)
data_saver(tgt_class_sigmoid, 'tgt_class_sigmoid_CV{}'.format(i_CV), outputdir)
def model_features_diagnosis(model, src_loader, tgt_loader, device, ax, col_name, DR_mode='PCA'):
model.eval()
src_data = src_loader.dataset.data
src_labels = src_loader.dataset.labels
tgt_data = tgt_loader.dataset.data
tgt_labels = tgt_loader.dataset.labels
src_data = src_data.to(device)
src_labels = src_labels.to(device).long()
tgt_data = tgt_data.to(device)
tgt_labels = tgt_labels.to(device).long()
src_domain_labels = torch.zeros(src_data.size()[0]).to(device).long()
tgt_domain_labels = torch.ones(tgt_data.size()[0]).to(device).long()
src_feature, src_class_out, src_domain_out = model(src_data)
tgt_feature, tgt_class_out, tgt_domain_out = model(tgt_data)
feature_np = torch.cat([src_feature, tgt_feature], dim=0).data.detach().cpu().numpy()
labels_np = torch.cat([src_labels, tgt_labels], dim=0).data.detach().cpu().numpy()
domain_np = np.concatenate((src_domain_labels.data.detach().cpu().numpy(), tgt_domain_labels.data.detach().cpu().numpy()), axis=0)
feature_np = StandardScaler().fit_transform(feature_np) # normalizing the features
print('show standardize mean and std:', np.mean(feature_np),np.std(feature_np))
if DR_mode == 'PCA':
pca_features = PCA(n_components=10)
principalComponents_features = pca_features.fit_transform(feature_np)
var_pca = np.cumsum(np.round(pca_features.explained_variance_ratio_, decimals=3)*100)
print('PCA var:', var_pca)
explained_var = var_pca[1]
ax.set_xlabel('Principal Component - 1',fontsize=12)
ax.set_ylabel('Principal Component - 2',fontsize=12)
ax.set_title('{} (explained_var: {:.2f}%)'.format(col_name, explained_var),fontsize=15)
# ax.set_title('PCA of features extracted by Gf ({})'.format(col_name),fontsize=15)
ax.tick_params(axis='both', which='major', labelsize=12)
class_ids = [0, 1] # adl, fall
domain_ids = [0, 1] # src, tgt
colors = ['r', 'g']
markers = ['o', 'x']
legend_dict = {
'00': 'adl_src',
'01': 'adl_tgt',
'10': 'fall_src',
'11': 'fall_tgt',
}
pt_label = ['']
for class_id, marker in zip(class_ids,markers):
for domain_id, color in zip(domain_ids,colors):
indicesToKeep = np.where((labels_np==class_id) & (domain_np==domain_id))[0]
if class_id == 1:
alpha = 0.3
ax.scatter(principalComponents_features[indicesToKeep, 0],
principalComponents_features[indicesToKeep, 1],
s = 50, marker=marker, c=color, alpha=alpha,
label=legend_dict[str(class_id)+str(domain_id)])
else:
alpha = 0.3
ax.scatter(principalComponents_features[indicesToKeep, 0],
principalComponents_features[indicesToKeep, 1],
s = 50, marker=marker, edgecolors=color, facecolors='None', alpha=alpha,
label=legend_dict[str(class_id)+str(domain_id)])
ax.legend(loc='upper right', prop={'size': 15})
elif DR_mode == 'tSNE':
# pca_features = PCA(n_components=10)
# principalComponents_features = pca_features.fit_transform(feature_np)
RANDOM_STATE = 0
tsne = TSNE(n_components=2, perplexity=30, random_state=RANDOM_STATE)
tsne_features = tsne.fit_transform(feature_np)
print(feature_np.shape, tsne_features.shape)
# var_pca = np.cumsum(np.round(pca_features.explained_variance_ratio_, decimals=3)*100)
# print('PCA var:', var_pca)
# explained_var = var_pca[1]
ax.set_xlabel('Principal Component - 1',fontsize=12)
ax.set_ylabel('Principal Component - 2',fontsize=12)
# ax.set_title('{} (explained_var: {:.2f}%)'.format(col_name, explained_var),fontsize=15)
ax.set_title('{}'.format(col_name),fontsize=15)
# ax.set_title('PCA of features extracted by Gf ({})'.format(col_name),fontsize=15)
ax.tick_params(axis='both', which='major', labelsize=12)
class_ids = [0, 1] # adl, fall
domain_ids = [0, 1] # src, tgt
colors = ['r', 'g']
markers = ['o', 'x']
legend_dict = {
'00': 'adl_src',
'01': 'adl_tgt',
'10': 'fall_src',
'11': 'fall_tgt',
}
pt_label = ['']
for class_id, marker in zip(class_ids,markers):
for domain_id, color in zip(domain_ids,colors):
indicesToKeep = np.where((labels_np==class_id) & (domain_np==domain_id))[0]
if class_id == 1:
alpha = 0.3
ax.scatter(tsne_features[indicesToKeep, 0],
tsne_features[indicesToKeep, 1],
s = 50, marker=marker, c=color, alpha=alpha,
label=legend_dict[str(class_id)+str(domain_id)])
else:
alpha = 0.3
ax.scatter(tsne_features[indicesToKeep, 0],
tsne_features[indicesToKeep, 1],
s = 50, marker=marker, edgecolors=color, facecolors='None', alpha=alpha,
label=legend_dict[str(class_id)+str(domain_id)])
ax.legend(loc='upper right', prop={'size': 15})
def model_features_diagnosis_trainval(model, src_train_loader, tgt_train_loader, src_val_loader, tgt_val_loader, device, plt_title, i_CV, outputdir):
# DR_mode = 'PCA'
DR_mode = 'tSNE'
model.eval()
if not os.path.exists(outputdir):
os.makedirs(outputdir)
print('outputdir for model_features_diagnosis_trainval output:', outputdir)
fig = plt.figure(figsize=(13, 5), dpi=dpi)
fig.suptitle('{} of features extracted by Gf'.format(DR_mode), fontsize=18)
ax1 = fig.add_subplot(1, 2, 1)
ax2 = fig.add_subplot(1, 2, 2)
# model_features_diagnosis(model, src_train_loader, tgt_train_loader, device, ax1, 'train'+plt_title)
# model_features_diagnosis(model, src_val_loader, tgt_val_loader, device, ax2, 'val'+plt_title)
model_features_diagnosis(model, src_train_loader, tgt_train_loader, device, ax1, 'train'+plt_title, DR_mode=DR_mode)
model_features_diagnosis(model, src_val_loader, tgt_val_loader, device, ax2, 'val'+plt_title, DR_mode=DR_mode)
plt.show()
fig.savefig(outputdir+'feature_diagnosis_CV{}{}'.format(i_CV, plt_title))
pyplot.close(fig)
def get_mean(mean_std):
return float(mean_std.split('ยฑ')[0])
def get_std(mean_std):
return float(mean_std.split('ยฑ')[1])
def get_PAD(src_train_loader, tgt_train_loader, src_val_loader, tgt_val_loader, model, device, c=3000):
# start_time = time.time()
model.eval()
data = src_train_loader.dataset.data.to(device)
src_domain_labels = np.zeros(data.shape[0])
src_feature_out, _, _ = model(data)
data = tgt_train_loader.dataset.data.to(device)
tgt_domain_labels = np.ones(data.shape[0])
tgt_feature_out, _, _ = model(data)
train_data = np.concatenate((src_feature_out.data.detach().cpu().numpy(),tgt_feature_out.data.detach().cpu().numpy()),axis=0)
train_label = np.concatenate((src_domain_labels,tgt_domain_labels))
# print(train_data.shape, train_label.shape)
svm_model = svm.SVC(C=c, probability=True)
svm_model.fit(train_data, train_label)
data = src_val_loader.dataset.data.to(device)
src_domain_labels = np.zeros(data.shape[0])
src_feature_out, _, _ = model(data)
data = tgt_val_loader.dataset.data.to(device)
tgt_domain_labels = np.ones(data.shape[0])
tgt_feature_out, _, _ = model(data)
val_data = np.concatenate((src_feature_out.data.detach().cpu().numpy(),tgt_feature_out.data.detach().cpu().numpy()),axis=0)
val_label = np.concatenate((src_domain_labels,tgt_domain_labels))
svm_out = svm_model.predict_proba(val_data)
mse = mean_squared_error(val_label, svm_out[:,1])
PAD = 2. * (1. - 2. * mse)
# print('\nmse=', mse)
# print('PAD=', PAD)
# time_elapsed = time.time() - start_time
# print('time elapsed:', time.strftime("%H:%M:%S", time.gmtime(time_elapsed)))
# sys.exit()
return PAD
def modification_date(filename):
t = os.path.getmtime(filename)
return datetime.datetime.fromtimestamp(t)
def get_rep_stats(df_performance_table_agg, rep_n):
df_acc = df_performance_table_agg.loc[ ['source', 'DANN', 'target', 'domain', 'PAD_source', 'PAD_DANN'] , ].copy()
df_params = df_performance_table_agg.loc[ ['channel_n', 'batch_size', 'learning_rate', 'time_elapsed', 'num_params'], ].copy()
# accs
df_performance_table_all_mean = df_acc.applymap(get_mean)
df_performance_table_means = df_performance_table_all_mean.mean(axis=1)
df_performance_table_stds = df_performance_table_all_mean.std(axis=1)
df_performance_table_all_mean['mean'] = df_performance_table_means
df_performance_table_all_mean['std'] = df_performance_table_stds
df_performance_table_all_mean['rep'] = df_performance_table_all_mean[['mean', 'std']].apply(lambda x : '{:.3f}ยฑ{:.3f}'.format(x[0],x[1]), axis=1)
# params
df_params_means = df_params.mean(axis=1)
df_performance_table_agg['rep_avg'] = ''
df_performance_table_agg.loc[ ['source','DANN','target','domain','PAD_source','PAD_DANN'] , ['rep_avg']] = df_performance_table_all_mean.loc[:, 'rep']
df_performance_table_agg.loc[ ['channel_n','batch_size','learning_rate','time_elapsed','num_params'], ['rep_avg']] = df_params_means
return df_performance_table_agg
def get_optimal_v0(df_performance_table_agg):
df_performance_table_agg_temp = df_performance_table_agg.copy()
result = df_performance_table_agg_temp[['HP_i0','HP_i1','HP_i2']].sort_values(by='DANN', ascending=False, axis=1)
batch_size_optimal = result.loc['batch_size'][0]
result = df_performance_table_agg_temp[['HP_i3','HP_i3_1','HP_i4']].sort_values(by='DANN', ascending=False, axis=1)
channel_n_optimal = result.loc['channel_n'][0]
result = df_performance_table_agg_temp[['HP_i5','HP_i5_1','HP_i6']].sort_values(by='DANN', ascending=False, axis=1)
learning_rate_optimal = result.loc['learning_rate'][0]
return int(batch_size_optimal), int(channel_n_optimal), learning_rate_optimal
def get_optimal_v1(df_performance_table_agg):
df_performance_table_agg_temp = df_performance_table_agg.copy()
result = df_performance_table_agg_temp.sort_values(by='DANN', ascending=False, axis=1)
channel_n_optimal = result.loc['channel_n'][0]
return int(channel_n_optimal) |
import csv
import os
from django.core.management.base import BaseCommand
from responses.models import NewsOrgType, Response, Tool, ToolTask
class Command(BaseCommand):
help = "Parses initial responses from Google spreadsheet"
def parse_response(self, response):
news_org_type = self.parse_news_org_type(response["newsOrgType"])
tools_used = self.parse_tools(response["toolsUsed"])
most_important_tool = self.parse_most_important_tool(
response["mostImportantTool"]
)
tasks_used = self.parse_tool_tasks(response["tasksUsed"])
stopped_using = self.parse_boolean(response["stoppedUsing"])
talk_more = self.parse_boolean(response["talkMore"])
obj = Response.objects.update_or_create(
job_title=response["jobTItle"],
job_duties=response["jobDuties"],
news_org_type=news_org_type,
news_org_age=response["newsOrgAge"],
most_important_tool=most_important_tool,
tool_satisfaction=response["toolSatisfaction"],
tool_recommendation=response["toolRecommendation"],
stopped_using=stopped_using,
why_stopped_using=response["whyStoppedUsing"],
org_struggle=response["orgStruggles"],
org_comparison=response["orgComparison"],
org_communication=response["orgCommunication"],
org_sustainability=response["orgSustainability"],
talk_more=talk_more,
email=response["email"],
)[0]
obj.tools_used.set(tools_used)
obj.tasks_used.set(tasks_used)
def parse_news_org_type(self, news_org_type):
return NewsOrgType.objects.update_or_create(name=news_org_type)[0]
def parse_tools(self, tools):
parsed_tools = []
for tool in tools.split(","):
parsed_tools.append(Tool.objects.update_or_create(name=tool)[0])
return parsed_tools
def parse_most_important_tool(self, tool):
return Tool.objects.update_or_create(name=tool)[0]
def parse_tool_tasks(self, tool_tasks):
parsed_tasks = []
for task in tool_tasks.split(","):
parsed_tasks.append(
ToolTask.objects.update_or_create(name=task)[0]
)
return parsed_tasks
def parse_boolean(self, answer):
if answer == "Yes":
return True
return False
def handle(self, *args, **options):
print("Bootstrapping tool tasks")
cmd_path = os.path.dirname(os.path.realpath(__file__))
data_path = os.path.join(cmd_path, "../data/news_tools_census.tsv")
with open(data_path) as f:
reader = csv.DictReader(f, dialect="excel-tab")
for row in reader:
self.parse_response(row)
|
import dash_bootstrap_components as dbc
from dash import html
popovers = html.Div(
[
dbc.Button(
"Click Me",
id="component-target",
n_clicks=0,
),
dbc.Popover(
[
dbc.PopoverHeader("Popover header"),
dbc.PopoverBody("And here's some amazing content. Cool!"),
],
target="component-target",
trigger="click",
),
]
)
|
import mysql.connector
from mysql.connector import errorcode
import confidential
import constants
import time
from datetime import datetime
class Database:
"""A class that represents a database connection.
This class represents a database connection and manages all
querries to the database.
Attributes:
csx: The sql connection object.
"""
def __init__(self):
"""Inits Database class"""
self.is_connected = False
self.cnx = self.connect()
self.connection_established = datetime.now()
self.connection_lost = datetime.now()
def __del__(self):
"""Closes connection if database object gets deleted."""
self.disconnect()
def disconnect(self):
"""Closes connection to the database."""
if self.is_connected:
print('Connection closed.')
self.connection_lost = datetime.now()
self.is_connected = False
try:
self.cnx.close()
except:
pass
else:
print('Connection is closed already.')
def __str__(self) -> str:
"""Method that prints the server status"""
if self.is_connected:
return f'[Connected]: to {confidential.SQLHOSTNAME} since {self.connection_established}.'
else:
return f'[Disconnected]: since {self.connection_lost}.'
def connect(self):
"""Method that trys to establish a sql connection."""
if not self.is_connected:
number_of_trys = 0
connection_established = False
# try connecting a few times and wait in between trys
while number_of_trys < constants.SQLCONNECTIONTRYS and not connection_established:
# trying to connect to the database
try:
number_of_trys += 1
cnx = mysql.connector.connect(user=confidential.SQLUSERNAME,
database=confidential.SQLDATABASENAME,
host=confidential.SQLHOSTNAME,
password=confidential.SQLPASSWORD,
port=confidential.SQLPORT)
connection_established = datetime.now()
self.is_connected = True
print(
f'Connection to {confidential.SQLDATABASENAME}@{confidential.SQLHOSTNAME} established.')
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Identity could not be verified.")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist.")
else:
print(err)
self.is_connected = False
print("Could not establish connection. Connection closed.")
cnx.close()
else:
# try again in some time
if not connection_established:
time.sleep(constants.SQLCONNECTIONSLEEP)
# return the connection and set connection to true
return cnx
else:
print('Already connected.')
return False
# testing implementation
db = Database()
time.sleep(5)
db.disconnect()
|
# -*- coding: utf-8 -*-
'''
Salt modules to work with the Architect service.
'''
# Import python libs
from __future__ import absolute_import
import yaml
import logging
from architect_client.libarchitect import ArchitectClient
__virtualname__ = 'architect'
logger = logging.getLogger(__name__)
def __virtual__():
return __virtualname__
def _client():
return ArchitectClient()
def inventory():
'''
Get the Architect metadata inventory
CLI Examples:
.. code-block:: bash
salt-call architect.inventory
'''
data = yaml.load(_client().get_data())
return data
def node_pillar(name):
'''
Get the Architect node pillar for given Salt master.
CLI Examples:
.. code-block:: bash
salt-call architect.node_pillar node.domain
'''
data = yaml.load(_client().get_data(name))
return {
name: data
}
def node_classify(name, data={}):
'''
CLassify node by given dictionary of parameters
CLI Examples:
.. code-block:: bash
salt-call architect.node_classify minion.net {'param1': 'value2'}
'''
output = _client().classify_node({
'name': name,
'data': data
})
return output
def node_info():
'''
Get Salt minion metadata and forward it to the Architect master.
CLI Examples:
.. code-block:: bash
salt-call architect.minion_info
'''
data = {
'pillar': __salt__['pillar.data'](),
'grain': __salt__['grains.items'](),
'lowstate': __salt__['state.show_lowstate'](),
}
return data
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
import argparse
import os
import math
s = '''
16 8 7 6 6 1 1 1 1 1 1 1 1 1 1 1
7 7 6 6 1 1 1 1 1 1 1 1 1 1 1 30
7 6 6 1 1 1 1 1 1 1 1 1 1 1 30 28
6 8 1 1 1 1 1 1 1 1 1 1 1 32 35 29
8 1 1 1 1 1 1 1 1 1 1 1 32 35 32 28
1 1 1 1 1 1 1 1 1 1 1 35 40 42 40 35
1 1 1 1 1 1 1 1 1 1 35 44 42 40 35 31
1 1 1 1 1 1 1 1 1 35 44 44 50 53 52 45
1 1 1 1 1 1 1 1 31 34 44 55 53 52 45 39
1 1 1 1 1 1 1 31 34 40 41 47 52 45 52 50
1 1 1 1 1 1 30 32 36 41 47 52 54 57 50 46
1 1 1 1 1 36 32 36 44 47 52 57 60 60 55 50
1 1 1 1 36 39 42 44 48 52 57 61 60 60 55 51
1 1 1 39 42 47 48 46 59 57 56 55 52 51 54 51
1 1 41 46 47 48 48 49 53 56 53 50 51 52 51 50
1 43 47 47 48 48 49 57 57 56 50 52 52 51 50 50
'''
l = s.split()
l = [int(i) for i in l]
l = np.array(l)
table = np.resize(l, (16, 16))
def compress(file, factor):
#Check the file existence
if os.path.isfile(file):
image = cv2.imread(file, 0)
#plt.imshow(image)
#plt.show()
n = 16
indexorder = sorted(((x,y) for x in range(n) for y in range(n)), key = lambda p: (p[0]+p[1], -p[1] if (p[0]+p[1]) % 2 else p[1]) )
#print(table)
h, w = image.shape[:2]
newH = h
newW = w
compress_list = []
count_list = []
#Extend image's size to multiple of 16
while newH % 16 != 0:
newH += 1
while newW % 16 != 0:
newW += 1
whole = np.zeros((newH, newW), np.uint8)
newImage = np.zeros((newH, newW), np.uint8)
#Assign each value to new image
for i in range(h):
for j in range(w):
newImage[i, j] = image[i, j]
for i in range(0, newH, 16):
for j in range(0, newW, 16):
#Take 16*16 block
imsub = np.zeros((16, 16))
imsub[:16, :16] = newImage[i: i + 16, j: j + 16]
vis = np.zeros((16,16), np.float32)
vis[:16, :16] = imsub
#print(imsub)
#DCT process
dct = cv2.dct(vis)
#Divide process
result = np.divide(dct, table * factor)
result = np.round(result)
result = result.astype(int)
#print(result)
zigzag = [result[m, n] for m, n in indexorder]
#Count the last consecutive zeros
c = countZero(zigzag)
c = [str(int(s)) for s in c]
cnt = c.count("0")
count_list.append(cnt)
c += ["$", str(cnt)]
compress_list.append(c)
whole[i: i + 16, j: j + 16] = result
plt.imshow(whole)
plt.show()
cv2.imwrite('result{}.bmp'.format(factor), whole)
with open('result{}.txt'.format(factor), 'w') as f:
for l in compress_list:
for c in l:
f.write(c + " ")
f.write("\n")
f.close()
avg = sum(count_list) / len(count_list)
PSNR = calculate(image, whole)
with open('static.txt', 'a') as f:
f.write("{}:\n".format(file))
f.write("Factor: {}, Average zero: {}, File size: {} bytes, PSNR: {}\n".format(factor, avg, os.stat('result{}.txt'.format(factor)).st_size, PSNR))
def countZero(l):
find = None
for i, bit in enumerate(reversed(l)):
if int(bit) != 0:
find = i
break
return l[: len(l) - find]
def calculate(frame1, frame2):
h, w = frame1.shape[:2]
size = h * w
l = []
for hi in range(h):
for wi in range(w):
value = int(frame1[hi, wi] - frame2[hi, wi])
l.append(value ** 2)
MSE = sum(l) / size
PSNR = 10 * math.log10(255 * 255 / MSE)
return PSNR
#compress('./test1.bmp', 1)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Process an image.')
parser.add_argument('file', metavar='N', type=str,
help='an file for compress')
args = parser.parse_args()
for i in [1, 2, 4]:
compress(args.file, i)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-11-27 05:38
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('project', '0012_auto_20181113_1337'),
]
operations = [
migrations.CreateModel(
name='Momserahterima',
fields=[
('mom_id', models.AutoField(primary_key=True, serialize=False)),
('meeting_location', models.CharField(blank=True, max_length=100)),
('mom_date_time', models.DateTimeField()),
('attendees', models.TextField(max_length=1000)),
('start_project', models.DateField()),
('end_project', models.DateField()),
('catatan_durasi_project', models.TextField(max_length=1000)),
('uraian_pekerjaan_pm', models.TextField(max_length=1000)),
('waktu_pelaksanaan_pm', models.TextField(max_length=1000)),
('lokasi_pm', models.TextField(max_length=1000)),
('daftar_perangkat_pm', models.TextField(max_length=1000)),
('catatan_pm', models.TextField(max_length=1000)),
('sla_to_customer', models.CharField(blank=True, max_length=100)),
('sla_to_partner', models.CharField(blank=True, max_length=100)),
('catatan_sla', models.TextField(max_length=1000)),
('anggaran_opex_perbulan_or_pertahun', models.CharField(blank=True, max_length=100)),
('anggaran_capex_perbulan_or_pertahun', models.CharField(blank=True, max_length=100)),
('total_anggaran_operational', models.CharField(blank=True, max_length=100)),
('catatan_anggaran_operational', models.TextField(max_length=1000)),
('scope_of_work', models.TextField(max_length=1000)),
('catatan_tambahan', models.TextField(max_length=1000)),
('car', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='Momserahterima_car', to='project.Lintasartaperson')),
('delivery', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='Momserahterima_delivery', to='project.Lintasartaperson')),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='project.Project')),
('sa', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='Momserahterima_sa', to='project.Lintasartaperson')),
('sales', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='Momserahterima_sales', to='project.Lintasartaperson')),
],
),
]
|
#Print hello world
#print ('Hello world')
#Creating a variable to store first and last name
first_name = "Abdul"
last_name = "Al-Basith"
age = 26
#string concatenation
full_name = first_name + " " + last_name
#print (first_name)
#print (full_name)
#Use format string to print
#print ("Hello, my name is " + first_name + " and I am " + str(age) + " years old")
#format string - Easy way to format a lot of string with variables in it
#print(f"Hello, my name is {first_name} and I am {age} years old")
#create variables to store the names of all your classmates
#print all the names in the following message
'''
name1 = "Justin"
name2 ="Harrison"
name3 = "Abdul"
name4 = "Long"
name5 = "Griffen"
name6 = "Thanujan"
name7 = "Amelia"
print (f"The members of my cohort are {name1}, {name2}, {name3}, {name4}, {name5}, {name6}, and {name7}, and they are all awesome!")
'''
member_list = ["Justin","Harrison","Abdul","Long","Griffen", "Thanujan", "Amelia", "Claudia", "Nima"]
print (f"The members of my cohort are {', '.join(member_list)} and they are all awesome!") |
#!/usr/bin/env python
# This file right now will upload a file named by the user, to the container
# The contents will contain 100 lines with the same message
import os
import pyrax
import pyrax.exceptions as exc
import pyrax.utils as utils
# Credentials
pyrax.set_setting("identity_type", "rackspace")
creds_file = os.path.expanduser("~/.rackspace_cloud_credentials")
pyrax.set_credential_file(creds_file)
cf = pyrax.cloudfiles
# List current Containers
print "Your current containers are: ", cf.get_all_containers()
contName = raw_input("What container would you like to upload to?: ")
cont = cf.get_container(contName)
print
# Lets user choose name of file
fileName = raw_input("What is to be the name of this file?: ")
#Create contents of file
testContent = "This is a test file...\n"*100
#Store the file
obj = cf.store_object(cont, fileName, testContent)
print "Stored object:",obj
|
import pandas as pd
import re
from . import api
from datetime import datetime
from typing import Optional, List, Dict
from io import StringIO
class AppsFlyerReporter:
api: api.AppsFlyerAPI
verbose: bool = False
timezone: str = 'GMT'
def __init__(self, api: api.AppsFlyerAPI, verbose: bool=False, timezone: str='GMT'):
self.api = api
self.verbose = verbose
self.timezone = timezone
self._date_format = '%Y-%m-%d'
def _convert_response_to_data_frame(self, response: str, columns: Optional[List[str]]) -> pd.DataFrame:
stream = StringIO(response)
df = pd.read_csv(stream, dtype='object')
if columns is not None:
df = df[columns]
return df
def get_installs_report(self, start_date: datetime, end_date: datetime, columns: Optional[List[str]]=None) -> pd.DataFrame:
report = self.get_report(
report_endpoint='installs_report',
start_date=start_date,
end_date=end_date,
columns=columns
)
return report
def get_events_report(self, start_date: datetime, end_date: datetime, event_names: List[str], columns: Optional[List[str]]=None) -> pd.DataFrame:
report = self.get_report(
report_endpoint='in_app_events_report',
start_date=start_date,
end_date=end_date,
report_parameters={'event_name': ','.join(event_names)},
columns=columns
)
return report
def get_master_report(self, start_date: datetime, end_date: datetime, groupings: List[str], kpis: List[str], app_ids: Optional[List[str]]=None, columns: Optional[List[str]]=None) -> pd.DataFrame:
parameters = {
'from': start_date.strftime(self._date_format),
'to': end_date.strftime(self._date_format),
'app_id': ','.join(app_ids) if app_ids is not None else self.api.app_id,
'groupings': ','.join(groupings),
'kpis': ','.join(kpis),
'timezone': 'GMT',
}
endpoint = 'export/master_report'
response = self.api.get(
endpoint=endpoint,
parameters=parameters,
verbose=self.verbose
)
data_frame = self._convert_response_to_data_frame(response=response, columns=columns)
return data_frame
def get_report(self, report_endpoint: str, start_date: datetime, end_date: datetime, report_parameters: Dict[str, any]={}, columns: Optional[List[str]]=None) -> pd.DataFrame:
parameters = {
'from': start_date.strftime(self._date_format),
'to': end_date.strftime(self._date_format),
'timezone': self.timezone,
}
parameters.update(report_parameters)
endpoint = 'export/{app_id}/' + report_endpoint
response = self.api.get(
endpoint=endpoint,
parameters=parameters,
verbose=self.verbose
)
data_frame = self._convert_response_to_data_frame(response=response, columns=columns)
return data_frame
class AppsFlyerDataLockerReporter:
api: api.AppsFlyerDataLockerAPI
def __init__(self, api: api.AppsFlyerDataLockerAPI):
self.api = api
def get_hour_part_report(self, event_name: str, date: datetime, hour_value: int, part: int) -> pd.DataFrame:
key = f'{self.api.prefix(event_name=event_name, date=date, hour_value=hour_value)}part-{part:05}.gz'
part_object = self.api.get_object(key=key)
df = pd.read_csv(part_object['Body'], compression='gzip', dtype='object')
return df
def get_hour_part_metadata(self, event_name: str, date: datetime, hour_value: int) -> int:
bucket_contents = self.api.get_contents(event_name=event_name, date=date, hour_value=hour_value)
if bucket_contents is None:
return {'completed': False, 'part_count': 0}
filtered_contents = [content for content in bucket_contents if re.match(r'.*/part-\d+.gz$', content['Key'])]
completed = len([content for content in bucket_contents if re.match(r'.*/_SUCCESS$', content['Key'])]) > 0
return {'completed': completed, 'part_count': len(filtered_contents)} |
from panda3d.core import NodePath, CollisionBox, CollisionNode, Vec4, ModelNode, BoundingBox, Vec3
from panda3d.core import Point3, CKeyValues, BitMask32, RenderState, ColorAttrib, CullBinAttrib
from panda3d.core import PStatCollector
from .MapWritable import MapWritable
from bsp.leveleditor import LEGlobals
from .TransformProperties import OriginProperty, AnglesProperty, ScaleProperty, ShearProperty, TransformProperty
from . import MetaData
from .ObjectProperty import ObjectProperty
from bsp.leveleditor.math.Line import Line
from bsp.leveleditor.geometry.Box import Box
from bsp.leveleditor.geometry.GeomView import GeomView
from bsp.leveleditor.viewport.ViewportType import VIEWPORT_2D_MASK, VIEWPORT_3D_MASK
from enum import IntEnum
BoundsBox3DState = RenderState.make(
ColorAttrib.makeFlat(Vec4(1, 1, 0, 1))
)
BoundsBox2DState = RenderState.make(
ColorAttrib.makeFlat(Vec4(1, 0, 0, 1)),
CullBinAttrib.make("selected-foreground", 0)
)
MapObjectInit = PStatCollector("Arch:CreateSolid:MapObjInit")
# Base class for any object in the map (brush, entity, etc)
class MapObject(MapWritable):
ObjectName = "object"
def __init__(self, id):
MapObjectInit.start()
MapWritable.__init__(self, base.document)
self.temporary = False
self.id = id
self.selected = False
self.classname = ""
self.parent = None
self.children = {}
self.boundingBox = BoundingBox(Vec3(-0.5, -0.5, -0.5), Vec3(0.5, 0.5, 0.5))
self.boundsBox = Box()
self.boundsBox.addView(GeomView.Lines, VIEWPORT_3D_MASK, state = BoundsBox3DState)
self.boundsBox.addView(GeomView.Lines, VIEWPORT_2D_MASK, state = BoundsBox2DState)
self.boundsBox.generateGeometry()
self.collNp = None
self.group = None
self.properties = {}
# All MapObjects have transform
self.addProperty(OriginProperty(self))
self.addProperty(AnglesProperty(self))
self.addProperty(ScaleProperty(self))
self.addProperty(ShearProperty(self))
self.np = NodePath(ModelNode(self.ObjectName + ".%i" % self.id))
self.np.setPythonTag("mapobject", self)
self.applyCollideMask()
# Test bounding volume at this node and but nothing below it.
self.np.node().setFinal(True)
MapObjectInit.stop()
def shouldWriteTransform(self):
return True
def getClassName(self):
return self.classname
def isWorld(self):
return False
def r_findAllParents(self, parents, type):
if not self.parent or self.parent.isWorld():
return
if type is None or isinstance(self.parent, type):
parents.append(self.parent)
self.parent.r_findAllParents(parents, type)
def findAllParents(self, type = None):
parents = []
self.r_findAllParents(parents, type)
return parents
def findTopmostParent(self, type = None):
parents = self.findAllParents(type)
if len(parents) == 0:
return None
return parents[len(parents) - 1]
def r_findAllChildren(self, children, type):
for child in self.children.values():
if type is None or isinstance(child, type):
children.append(child)
child.r_findAllChildren(children, type)
def findAllChildren(self, type = None):
children = []
self.r_findAllChildren(children, type)
return children
def applyCollideMask(self):
self.np.setCollideMask(LEGlobals.ObjectMask)
def setTemporary(self, flag):
self.temporary = flag
# Returns the bounding volume of the object itself, not including children objects.
def getObjBounds(self, other = None):
if not other:
other = self.np.getParent()
return self.np.getTightBounds(other)
# Returns the min and max points of the bounds of the object, not including children.
def getBounds(self, other = None):
if not other:
other = self.np.getParent()
mins = Point3()
maxs = Point3()
self.np.calcTightBounds(mins, maxs, other)
return [mins, maxs]
def findChildByID(self, id):
if id == self.id:
return self
if id in self.children:
return self.children[id]
for child in self.children.values():
ret = child.findChildByID(id)
if ret is not None:
return ret
return None
def hasChildWithID(self, id):
return id in self.children
def copy(self, generator):
raise NotImplementedError
def paste(self, o, generator):
raise NotImplementedError
def clone(self):
raise NotImplementedError
def unclone(self, o):
raise NotImplementedError
#
# Base copy and paste functions shared by all MapObjects.
# Each specific MapObject must implement the functions above for their
# specific functionality.
#
def copyProperties(self, props):
newProps = {}
for key, prop in props.items():
newProp = prop.clone(self)
newProp.setValue(prop.getValue())
newProps[key] = newProp
self.updateProperties(newProps)
def copyBase(self, other, generator, clone = False):
if clone and other.id != self.id:
parent = other.parent
setPar = other.parent is not None and other.parent.hasChildWithID(other.id) and other.parent.children[other.id] == other
if setPar:
other.reparentTo(NodePath())
other.id = self.id
if setPar:
other.reparentTo(parent)
other.parent = self.parent
for child in self.children.values():
if clone:
newChild = child.clone()
else:
newChild = child.copy(generator)
newChild.reparentTo(other)
other.setClassname(self.classname)
other.copyProperties(self.properties)
other.selected = self.selected
def pasteBase(self, o, generator, performUnclone = False):
if performUnclone and o.id != self.id:
parent = self.parent
setPar = self.parent is not None and self.parent.hasChildWithID(self.id) and self.parent.children[self.id] == self
if setPar:
self.reparentTo(NodePath())
self.id = o.id
if setPar:
self.reparentTo(parent)
for child in o.children.values():
if performUnclone:
newChild = child.clone()
else:
newChild = child.copy(generator)
newChild.reparentTo(self)
self.setClassname(o.classname)
self.copyProperties(o.properties)
self.selected = o.selected
def getName(self):
return "Object"
def getDescription(self):
return "Object in a map."
def addProperty(self, prop):
self.properties[prop.name] = prop
if isinstance(prop, TransformProperty):
prop.setWritable(self.shouldWriteTransform())
# Returns list of property names with the specified value types.
def getPropsWithValueType(self, types):
if isinstance(types, str):
types = [types]
props = []
for propName, prop in self.properties.items():
if prop.valueType in types:
props.append(propName)
return props
def getPropNativeType(self, key):
prop = self.properties.get(key, None)
if not prop:
return str
return prop.getNativeType()
def getPropValueType(self, key):
prop = self.properties.get(key, None)
if not prop:
return "string"
return prop.valueType
def getPropDefaultValue(self, prop):
if isinstance(prop, str):
prop = self.properties.get(prop, None)
if not prop:
return ""
return prop.defaultValue
def getPropertyValue(self, key, asString = False, default = ""):
prop = self.properties.get(key, None)
if not prop:
return default
if asString:
return prop.getSerializedValue()
else:
return prop.getValue()
def getProperty(self, name):
return self.properties.get(name, None)
def updateProperties(self, data):
for key, value in data.items():
if not isinstance(value, ObjectProperty):
# If only a value was specified and not a property object itself,
# this is an update to an existing property.
prop = self.properties.get(key, None)
if not prop:
continue
oldValue = prop.getValue()
val = prop.getUnserializedValue(value)
# If the property has a min/max range, ensure the value we want to
# set is within that range.
if (not prop.testMinValue(val)) or (not prop.testMaxValue(val)):
# Not within range. Use the default value
val = prop.defaultValue
prop.setValue(val)
else:
# A property object was given, simply add it to the dict of properties.
prop = value
oldValue = None
val = prop.getValue()
self.properties[prop.name] = prop
self.propertyChanged(prop, oldValue, val)
def propertyChanged(self, prop, oldValue, newValue):
if oldValue != newValue:
self.send('objectPropertyChanged', [self, prop, newValue])
def setAbsOrigin(self, origin):
self.np.setPos(base.render, origin)
self.transformChanged()
def setOrigin(self, origin):
self.np.setPos(origin)
self.transformChanged()
def getAbsOrigin(self):
return self.np.getPos(base.render)
def getOrigin(self):
return self.np.getPos()
def setAngles(self, angles):
self.np.setHpr(angles)
self.transformChanged()
def setAbsAngles(self, angles):
self.np.setHpr(base.render, angles)
self.transformChanged()
def getAbsAngles(self):
return self.np.getHpr(base.render)
def getAngles(self):
return self.np.getHpr()
def setScale(self, scale):
self.np.setScale(scale)
self.transformChanged()
def setAbsScale(self, scale):
self.np.setScale(base.render, scale)
self.transformChanged()
def getAbsScale(self):
return self.np.getScale(base.render)
def getScale(self):
return self.np.getScale()
def setShear(self, shear):
self.np.setShear(shear)
self.transformChanged()
def setAbsShear(self, shear):
self.np.setShear(base.render, shear)
self.transformChanged()
def getAbsShear(self):
return self.np.getShear(base.render)
def getShear(self):
return self.np.getShear()
def transformChanged(self):
self.recalcBoundingBox()
self.send('objectTransformChanged', [self])
def showBoundingBox(self):
self.boundsBox.np.reparentTo(self.np)
def hideBoundingBox(self):
self.boundsBox.np.reparentTo(NodePath())
def select(self):
self.selected = True
self.showBoundingBox()
#self.np.setColorScale(1, 0, 0, 1)
def deselect(self):
self.selected = False
self.hideBoundingBox()
#self.np.setColorScale(1, 1, 1, 1)
def setClassname(self, classname):
self.classname = classname
def fixBounds(self, mins, maxs):
# Ensures that the bounds are not flat on any axis
sameX = mins.x == maxs.x
sameY = mins.y == maxs.y
sameZ = mins.z == maxs.z
invalid = False
if sameX:
# Flat horizontal
if sameY and sameZ:
invalid = True
elif not sameY:
mins.x = mins.y
maxs.x = maxs.y
elif not sameZ:
mins.x = mins.z
maxs.x = maxs.z
if sameY:
# Flat forward/back
if sameX and sameZ:
invalid = True
elif not sameX:
mins.y = mins.x
maxs.y = maxs.x
elif not sameZ:
mins.y = mins.z
maxs.y = maxs.z
if sameZ:
if sameX and sameY:
invalid = True
elif not sameX:
mins.z = mins.x
maxs.z = maxs.x
elif not sameY:
mins.z = mins.y
maxs.z = maxs.y
return [invalid, mins, maxs]
def recalcBoundingBox(self):
if not self.np:
return
# Don't have the picker box or selection visualization contribute to the
# calculation of the bounding box.
if self.collNp:
self.collNp.stash()
self.hideBoundingBox()
# Calculate a bounding box relative to ourself
mins, maxs = self.getBounds(self.np)
invalid, mins, maxs = self.fixBounds(mins, maxs)
if invalid:
mins = Point3(-8)
maxs = Point3(8)
self.boundingBox = BoundingBox(mins, maxs)
self.boundsBox.setMinMax(mins, maxs)
if self.selected:
self.showBoundingBox()
if self.collNp:
self.collNp.unstash()
self.collNp.node().clearSolids()
self.collNp.node().addSolid(CollisionBox(mins, maxs))
self.collNp.hide(~VIEWPORT_3D_MASK)
self.send('mapObjectBoundsChanged', [self])
def removePickBox(self):
if self.collNp:
self.collNp.removeNode()
self.collNp = None
def delete(self):
if not self.temporary:
# Take the children with us
for child in list(self.children.values()):
child.delete()
self.children = None
# if we are selected, deselect
base.selectionMgr.deselect(self)
if self.boundsBox:
self.boundsBox.cleanup()
self.boundsBox = None
self.removePickBox()
if not self.temporary:
self.reparentTo(NodePath())
self.np.removeNode()
self.np = None
self.properties = None
self.metaData = None
self.temporary = None
def __clearParent(self):
if self.parent:
self.parent.__removeChild(self)
self.np.reparentTo(NodePath())
self.parent = None
def __setParent(self, other):
if isinstance(other, NodePath):
# We are reparenting directly to a NodePath, outside of the MapObject tree.
self.parent = None
self.np.reparentTo(other)
else:
self.parent = other
if self.parent:
self.parent.__addChild(self)
self.np.reparentTo(base.render)
def reparentTo(self, other):
# If a NodePath is passed to this method, the object will be placed under the specified node
# in the Panda3D scene graph, but will be taken out of the MapObject tree. If None is passed,
# the object will be parented to base.render and taken out of the MapObject tree.
#
# Use reparentTo(NodePath()) to place the object outside of both the scene graph and the
# MapObject tree.
self.__clearParent()
self.__setParent(other)
def __addChild(self, child):
self.children[child.id] = child
#self.recalcBoundingBox()
def __removeChild(self, child):
if child.id in self.children:
del self.children[child.id]
#self.recalcBoundingBox()
def getEditorValues(self):
return {}
def readEditorValues(self, kv):
pass
def writeEditorValues(self, parent):
values = self.getEditorValues()
if len(values) > 0:
kv = CKeyValues("editor", parent)
for key, value in values.items:
kv.setKeyValue(key, value)
def doWriteKeyValues(self, parent):
kv = CKeyValues(self.ObjectName, parent)
self.writeKeyValues(kv)
for child in self.children.values():
child.doWriteKeyValues(kv)
self.writeEditorValues(kv)
def writeKeyValues(self, keyvalues):
keyvalues.setKeyValue("id", str(self.id))
# Write out our object properties
for name, prop in self.properties.items():
if prop.isWritable():
prop.writeKeyValues(keyvalues)
def readKeyValues(self, keyvalues):
for i in range(keyvalues.getNumKeys()):
key = keyvalues.getKey(i)
value = keyvalues.getValue(i)
if MetaData.isPropertyExcluded(key):
continue
# Find the property with this name.
prop = self.properties.get(key, None)
if not prop:
# Prop wasn't explicit or part of FGD metadata (if it's an Entity)
continue
nativeValue = prop.getUnserializedValue(value)
# Set the value!
self.updateProperties({prop.name: nativeValue})
|
from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone
from .models import Post
# Create your views here.
def get_blog(request):
posts = Post.objects.all().order_by('-published_date')
return render(request, "blog/blog.html", {'posts': posts})
def post_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
post.views += 1
post.save()
return render(request, "blog/postdetail.html", {'post': post}) |
from django.db import connections
from django.core.management import call_command
from django.test import TransactionTestCase, TestCase
try:
from django.test import SimpleTestCase as DjangoBaseTestCase
DjangoBaseTestCase # Avoid pyflakes warning about redefinition of import
except ImportError:
DjangoBaseTestCase = TestCase
def is_django_unittest(item):
"""
Returns True if the item is a Django test case, otherwise False.
"""
return hasattr(item.obj, 'im_class') and issubclass(item.obj.im_class, DjangoBaseTestCase)
def get_django_unittest(item):
"""
Returns a Django unittest instance that can have _pre_setup() or
_post_teardown() invoked to setup/teardown the database before a test run.
"""
if 'transaction_test_case' in item.keywords:
cls = TransactionTestCase
elif item.config.option.no_db:
cls = TestCase
cls._fixture_setup = lambda self: None
else:
cls = TestCase
return cls(methodName='__init__')
def django_setup_item(item):
if 'transaction_test_case' in item.keywords:
# Nothing needs to be done
pass
else:
# Use the standard TestCase teardown
get_django_unittest(item)._pre_setup()
def django_teardown_item(item):
if 'transaction_test_case' in item.keywords:
# Flush the database and close database connections
# Django does this by default *before* each test instead of after
for db in connections:
call_command('flush', verbosity=0, interactive=False, database=db)
for conn in connections.all():
conn.close()
else:
# Use the standard TestCase teardown
get_django_unittest(item)._post_teardown()
|
#!/usr/bin/env python3
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generate a spatial analysis against an arbitrary library.
To use, build the 'binary_size_tool' target. Then run this tool, passing
in the location of the library to be analyzed along with any other options
you desire.
"""
import json
import logging
import multiprocessing
import optparse
import os
import re
import shutil
import struct
import subprocess
import sys
import tempfile
import time
import binary_size_utils
import elf_symbolizer
# Node dictionary keys. These are output in json read by the webapp so
# keep them short to save file size.
# Note: If these change, the webapp must also change.
NODE_TYPE_KEY = 'k'
NODE_NAME_KEY = 'n'
NODE_CHILDREN_KEY = 'children'
NODE_SYMBOL_TYPE_KEY = 't'
NODE_SYMBOL_SIZE_KEY = 'value'
NODE_MAX_DEPTH_KEY = 'maxDepth'
NODE_LAST_PATH_ELEMENT_KEY = 'lastPathElement'
# The display name of the bucket where we put symbols without path.
NAME_NO_PATH_BUCKET = '(No Path)'
# Try to keep data buckets smaller than this to avoid killing the
# graphing lib.
BIG_BUCKET_LIMIT = 3000
def _MkChild(node, name):
child = node[NODE_CHILDREN_KEY].get(name)
if child is None:
child = {NODE_NAME_KEY: name, NODE_CHILDREN_KEY: {}}
node[NODE_CHILDREN_KEY][name] = child
return child
def SplitNoPathBucket(node):
"""NAME_NO_PATH_BUCKET can be too large for the graphing lib to
handle. Split it into sub-buckets in that case."""
root_children = node[NODE_CHILDREN_KEY]
if NAME_NO_PATH_BUCKET in root_children:
no_path_bucket = root_children[NAME_NO_PATH_BUCKET]
old_children = no_path_bucket[NODE_CHILDREN_KEY]
count = 0
for symbol_type, symbol_bucket in old_children.items():
count += len(symbol_bucket[NODE_CHILDREN_KEY])
if count > BIG_BUCKET_LIMIT:
new_children = {}
no_path_bucket[NODE_CHILDREN_KEY] = new_children
current_bucket = None
index = 0
for symbol_type, symbol_bucket in old_children.items():
for symbol_name, value in symbol_bucket[
NODE_CHILDREN_KEY].items():
if index % BIG_BUCKET_LIMIT == 0:
group_no = (index / BIG_BUCKET_LIMIT) + 1
current_bucket = _MkChild(
no_path_bucket,
'%s subgroup %d' % (NAME_NO_PATH_BUCKET, group_no))
assert not NODE_TYPE_KEY in node or node[
NODE_TYPE_KEY] == 'p'
node[NODE_TYPE_KEY] = 'p' # p for path
index += 1
symbol_size = value[NODE_SYMBOL_SIZE_KEY]
AddSymbolIntoFileNode(current_bucket, symbol_type,
symbol_name, symbol_size)
def MakeChildrenDictsIntoLists(node):
largest_list_len = 0
if NODE_CHILDREN_KEY in node:
largest_list_len = len(node[NODE_CHILDREN_KEY])
child_list = []
for child in node[NODE_CHILDREN_KEY].values():
child_largest_list_len = MakeChildrenDictsIntoLists(child)
if child_largest_list_len > largest_list_len:
largest_list_len = child_largest_list_len
child_list.append(child)
node[NODE_CHILDREN_KEY] = child_list
return largest_list_len
def AddSymbolIntoFileNode(node, symbol_type, symbol_name, symbol_size):
"""Puts symbol into the file path node |node|.
Returns the number of added levels in tree. I.e. returns 2."""
# 'node' is the file node and first step is to find its symbol-type bucket.
node[NODE_LAST_PATH_ELEMENT_KEY] = True
node = _MkChild(node, symbol_type)
assert not NODE_TYPE_KEY in node or node[NODE_TYPE_KEY] == 'b'
node[NODE_SYMBOL_TYPE_KEY] = symbol_type
node[NODE_TYPE_KEY] = 'b' # b for bucket
# 'node' is now the symbol-type bucket. Make the child entry.
node = _MkChild(node, symbol_name)
if NODE_CHILDREN_KEY in node:
if node[NODE_CHILDREN_KEY]:
logging.warning(
'A container node used as symbol for %s.' % symbol_name)
# This is going to be used as a leaf so no use for child list.
del node[NODE_CHILDREN_KEY]
node[NODE_SYMBOL_SIZE_KEY] = symbol_size
node[NODE_SYMBOL_TYPE_KEY] = symbol_type
node[NODE_TYPE_KEY] = 's' # s for symbol
return 2 # Depth of the added subtree.
def MakeCompactTree(symbols, symbol_path_origin_dir):
result = {
NODE_NAME_KEY: '/',
NODE_CHILDREN_KEY: {},
NODE_TYPE_KEY: 'p',
NODE_MAX_DEPTH_KEY: 0
}
seen_symbol_with_path = False
cwd = os.path.abspath(os.getcwd())
for symbol_name, symbol_type, symbol_size, file_path, _address in symbols:
if 'vtable for ' in symbol_name:
symbol_type = '@' # hack to categorize these separately
# Take path like '/foo/bar/baz', convert to ['foo', 'bar', 'baz']
if file_path and file_path != "??":
file_path = os.path.abspath(
os.path.join(symbol_path_origin_dir, file_path))
# Let the output structure be relative to $CWD if inside $CWD,
# otherwise relative to the disk root. This is to avoid
# unnecessary click-through levels in the output.
if file_path.startswith(cwd + os.sep):
file_path = file_path[len(cwd):]
if file_path.startswith('/'):
file_path = file_path[1:]
seen_symbol_with_path = True
else:
file_path = NAME_NO_PATH_BUCKET
path_parts = file_path.split('/')
# Find preexisting node in tree, or update if it already exists
node = result
depth = 0
while len(path_parts) > 0:
path_part = path_parts.pop(0)
if len(path_part) == 0:
continue
depth += 1
node = _MkChild(node, path_part)
assert not NODE_TYPE_KEY in node or node[NODE_TYPE_KEY] == 'p'
node[NODE_TYPE_KEY] = 'p' # p for path
depth += AddSymbolIntoFileNode(node, symbol_type, symbol_name,
symbol_size)
result[NODE_MAX_DEPTH_KEY] = max(result[NODE_MAX_DEPTH_KEY], depth)
if not seen_symbol_with_path:
logging.warning('Symbols lack paths. Data will not be structured.')
# The (no path) bucket can be extremely large if we failed to get
# path information. Split it into subgroups if needed.
SplitNoPathBucket(result)
largest_list_len = MakeChildrenDictsIntoLists(result)
if largest_list_len > BIG_BUCKET_LIMIT:
logging.warning('There are sections with %d nodes. '
'Results might be unusable.' % largest_list_len)
return result
def DumpCompactTree(symbols, symbol_path_origin_dir, outfile):
tree_root = MakeCompactTree(symbols, symbol_path_origin_dir)
with open(outfile, 'w') as out:
out.write('var tree_data=')
# Use separators without whitespace to get a smaller file.
json.dump(tree_root, out, separators=(',', ':'))
print('Writing %d bytes json' % os.path.getsize(outfile))
def MakeSourceMap(symbols):
sources = {}
for _sym, _symbol_type, size, path, _address in symbols:
key = None
if path:
key = os.path.normpath(path)
else:
key = '[no path]'
if key not in sources:
sources[key] = {'path': path, 'symbol_count': 0, 'size': 0}
record = sources[key]
record['size'] += size
record['symbol_count'] += 1
return sources
# Regex for parsing "nm" output. A sample line looks like this:
# 0167b39c 00000018 t ACCESS_DESCRIPTION_free /path/file.c:95
#
# The fields are: address, size, type, name, source location
# Regular expression explained ( see also: https://xkcd.com/208 ):
# ([0-9a-f]{8,}+) The address
# [\s]+ Whitespace separator
# ([0-9a-f]{8,}+) The size. From here on out it's all optional.
# [\s]+ Whitespace separator
# (\S?) The symbol type, which is any non-whitespace char
# [\s*] Whitespace separator
# ([^\t]*) Symbol name, any non-tab character (spaces ok!)
# [\t]? Tab separator
# (.*) The location (filename[:linennum|?][ (discriminator n)]
sNmPattern = re.compile(
r'([0-9a-f]{8,})[\s]+([0-9a-f]{8,})[\s]*(\S?)[\s*]([^\t]*)[\t]?(.*)')
class Progress():
def __init__(self):
self.count = 0
self.skip_count = 0
self.collisions = 0
self.time_last_output = time.time()
self.count_last_output = 0
self.disambiguations = 0
self.was_ambiguous = 0
def RunElfSymbolizer(outfile, library, addr2line_binary, nm_binary, jobs,
disambiguate, src_path):
nm_output = RunNm(library, nm_binary)
nm_output_lines = nm_output.splitlines()
nm_output_lines_len = len(nm_output_lines)
address_symbol = {}
progress = Progress()
def map_address_symbol(symbol, addr):
progress.count += 1
if addr in address_symbol:
# 'Collision between %s and %s.' % (str(symbol.name),
# str(address_symbol[addr].name))
progress.collisions += 1
else:
if symbol.disambiguated:
progress.disambiguations += 1
if symbol.was_ambiguous:
progress.was_ambiguous += 1
address_symbol[addr] = symbol
progress_output()
def progress_output():
progress_chunk = 100
if progress.count % progress_chunk == 0:
time_now = time.time()
time_spent = time_now - progress.time_last_output
if time_spent > 1.0:
# Only output at most once per second.
progress.time_last_output = time_now
chunk_size = progress.count - progress.count_last_output
progress.count_last_output = progress.count
if time_spent > 0:
speed = chunk_size / time_spent
else:
speed = 0
progress_percent = (100.0 * (
progress.count + progress.skip_count) / nm_output_lines_len)
disambiguation_percent = 0
if progress.disambiguations != 0:
disambiguation_percent = (100.0 * progress.disambiguations /
progress.was_ambiguous)
sys.stdout.write(
'\r%.1f%%: Looked up %d symbols (%d collisions, '
'%d disambiguations where %.1f%% succeeded)'
' - %.1f lookups/s.' %
(progress_percent, progress.count, progress.collisions,
progress.disambiguations, disambiguation_percent, speed))
# In case disambiguation was disabled, we remove the source path (which upon
# being set signals the symbolizer to enable disambiguation)
if not disambiguate:
src_path = None
symbolizer = elf_symbolizer.ELFSymbolizer(
library,
addr2line_binary,
map_address_symbol,
max_concurrent_jobs=jobs,
source_root_path=src_path)
user_interrupted = False
try:
for binary_line in nm_output_lines:
line = binary_line.decode()
match = sNmPattern.match(line)
if match:
location = match.group(5)
if not location:
addr = int(match.group(1), 16)
size = int(match.group(2), 16)
if addr in address_symbol: # Already looked up, shortcut
# ELFSymbolizer.
map_address_symbol(address_symbol[addr], addr)
continue
elif size == 0:
# Save time by not looking up empty symbols (do they even exist?)
print('Empty symbol: ' + line)
else:
symbolizer.SymbolizeAsync(addr, addr)
continue
progress.skip_count += 1
except KeyboardInterrupt:
user_interrupted = True
print('Interrupting - killing subprocesses. Please wait.')
try:
symbolizer.Join()
except KeyboardInterrupt:
# Don't want to abort here since we will be finished in a few seconds.
user_interrupted = True
print('Patience you must have my young padawan.')
print('')
if user_interrupted:
print('Skipping the rest of the file mapping. '
'Output will not be fully classified.')
symbol_path_origin_dir = os.path.dirname(os.path.abspath(library))
with open(outfile, 'w') as out:
for binary_line in nm_output_lines:
line = binary_line.decode()
match = sNmPattern.match(line)
if match:
location = match.group(5)
if not location:
addr = int(match.group(1), 16)
symbol = address_symbol.get(addr)
if symbol is not None:
path = '??'
if symbol.source_path is not None:
path = os.path.abspath(
os.path.join(symbol_path_origin_dir,
symbol.source_path))
line_number = 0
if symbol.source_line is not None:
line_number = symbol.source_line
out.write('%s\t%s:%d\n' % (line, path, line_number))
continue
out.write('%s\n' % line)
print('%d symbols in the results.' % len(address_symbol))
def RunNm(binary, nm_binary):
cmd = [
nm_binary, '-C', '--print-size', '--size-sort', '--reverse-sort', binary
]
nm_process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(process_output, err_output) = nm_process.communicate()
if nm_process.returncode != 0:
if err_output:
raise Exception(err_output)
else:
raise Exception(process_output)
return process_output
def GetNmSymbols(nm_infile, outfile, library, jobs, verbose, addr2line_binary,
nm_binary, disambiguate, src_path):
if nm_infile is None:
if outfile is None:
outfile = tempfile.NamedTemporaryFile(delete=False).name
if verbose:
print('Running parallel addr2line, dumping symbols to ' + outfile)
RunElfSymbolizer(outfile, library, addr2line_binary, nm_binary, jobs,
disambiguate, src_path)
nm_infile = outfile
elif verbose:
print('Using nm input from ' + nm_infile)
with open(nm_infile, 'r') as infile:
return list(binary_size_utils.ParseNm(infile))
PAK_RESOURCE_ID_TO_STRING = {"inited": False}
def LoadPakIdsFromResourceFile(filename):
"""Given a file name, it loads everything that looks like a resource id
into PAK_RESOURCE_ID_TO_STRING."""
with open(filename) as resource_header:
for line in resource_header:
if line.startswith("#define "):
line_data = line.split()
if len(line_data) == 3:
try:
resource_number = int(line_data[2])
resource_name = line_data[1]
PAK_RESOURCE_ID_TO_STRING[
resource_number] = resource_name
except ValueError:
pass
def GetReadablePakResourceName(pak_file, resource_id):
"""Pak resources have a numeric identifier. It is not helpful when
trying to locate where footprint is generated. This does its best to
map the number to a usable string."""
if not PAK_RESOURCE_ID_TO_STRING['inited']:
# Try to find resource header files generated by grit when
# building the pak file. We'll look for files named *resources.h"
# and lines of the type:
# #define MY_RESOURCE_JS 1234
PAK_RESOURCE_ID_TO_STRING['inited'] = True
gen_dir = os.path.join(os.path.dirname(pak_file), 'gen')
if os.path.isdir(gen_dir):
for dirname, _dirs, files in os.walk(gen_dir):
for filename in files:
if filename.endswith('resources.h'):
LoadPakIdsFromResourceFile(
os.path.join(dirname, filename))
return PAK_RESOURCE_ID_TO_STRING.get(resource_id,
'Pak Resource %d' % resource_id)
def AddPakData(symbols, pak_file):
"""Adds pseudo-symbols from a pak file."""
pak_file = os.path.abspath(pak_file)
with open(pak_file, 'rb') as pak:
data = pak.read()
PAK_FILE_VERSION = 4
HEADER_LENGTH = 2 * 4 + 1 # Two uint32s. (file version, number of entries)
# and one uint8 (encoding of text resources)
INDEX_ENTRY_SIZE = 2 + 4 # Each entry is a uint16 and a uint32.
version, num_entries, _encoding = struct.unpack('<IIB',
data[:HEADER_LENGTH])
assert version == PAK_FILE_VERSION, (
'Unsupported pak file '
'version (%d) in %s. Only '
'support version %d' % (version, pak_file, PAK_FILE_VERSION))
if num_entries > 0:
# Read the index and data.
data = data[HEADER_LENGTH:]
for _ in range(num_entries):
resource_id, offset = struct.unpack('<HI', data[:INDEX_ENTRY_SIZE])
data = data[INDEX_ENTRY_SIZE:]
_next_id, next_offset = struct.unpack('<HI',
data[:INDEX_ENTRY_SIZE])
resource_size = next_offset - offset
symbol_name = GetReadablePakResourceName(pak_file, resource_id)
symbol_path = pak_file
symbol_type = 'd' # Data. Approximation.
symbol_size = resource_size
symbols.append((symbol_name, symbol_type, symbol_size, symbol_path))
def _find_in_system_path(binary):
"""Locate the full path to binary in the system path or return None
if not found."""
system_path = os.environ["PATH"].split(os.pathsep)
for path in system_path:
binary_path = os.path.join(path, binary)
if os.path.isfile(binary_path):
return binary_path
return None
def CheckDebugFormatSupport(library, addr2line_binary):
"""Kills the program if debug data is in an unsupported format.
There are two common versions of the DWARF debug formats and
since we are right now transitioning from DWARF2 to newer formats,
it's possible to have a mix of tools that are not compatible. Detect
that and abort rather than produce meaningless output."""
tool_output = subprocess.check_output([addr2line_binary,
'--version']).decode()
version_re = re.compile(r'^GNU [^ ]+ .* (\d+).(\d+).*?$', re.M)
parsed_output = version_re.match(tool_output)
major = int(parsed_output.group(1))
minor = int(parsed_output.group(2))
supports_dwarf4 = major > 2 or major == 2 and minor > 22
if supports_dwarf4:
return
print('Checking version of debug information in %s.' % library)
debug_info = subprocess.check_output(
['readelf', '--debug-dump=info', '--dwarf-depth=1', library])
dwarf_version_re = re.compile(r'^\s+Version:\s+(\d+)$', re.M)
parsed_dwarf_format_output = dwarf_version_re.search(debug_info)
version = int(parsed_dwarf_format_output.group(1))
if version > 2:
print(
'The supplied tools only support DWARF2 debug data but the binary\n'
+ 'uses DWARF%d. Update the tools or compile the binary\n' % version
+ 'with -gdwarf-2.')
sys.exit(1)
def main():
usage = """%prog [options]
Runs a spatial analysis on a given library, looking up the source locations
of its symbols and calculating how much space each directory, source file,
and so on is taking. The result is a report that can be used to pinpoint
sources of large portions of the binary, etceteras.
Under normal circumstances, you only need to pass two arguments, thusly:
%prog --library /path/to/library --destdir /path/to/output
In this mode, the program will dump the symbols from the specified library
and map those symbols back to source locations, producing a web-based
report in the specified output directory.
Other options are available via '--help'.
"""
parser = optparse.OptionParser(usage=usage)
parser.add_option(
'--nm-in',
metavar='PATH',
help='if specified, use nm input from <path> instead of '
'generating it. Note that source locations should be '
'present in the file; i.e., no addr2line symbol lookups '
'will be performed when this option is specified. '
'Mutually exclusive with --library.')
parser.add_option(
'--destdir',
metavar='PATH',
help='write output to the specified directory. An HTML '
'report is generated here along with supporting files; '
'any existing report will be overwritten.')
parser.add_option(
'--library',
metavar='PATH',
help='if specified, process symbols in the library at '
'the specified path. Mutually exclusive with --nm-in.')
parser.add_option(
'--pak',
metavar='PATH',
help='if specified, includes the contents of the '
'specified *.pak file in the output.')
parser.add_option(
'--nm-binary',
help='use the specified nm binary to analyze library. '
'This is to be used when the nm in the path is not for '
'the right architecture or of the right version.')
parser.add_option(
'--addr2line-binary',
help='use the specified addr2line binary to analyze '
'library. This is to be used when the addr2line in '
'the path is not for the right architecture or '
'of the right version.')
parser.add_option(
'--jobs',
type='int',
help='number of jobs to use for the parallel '
'addr2line processing pool; defaults to 1. More '
'jobs greatly improve throughput but eat RAM like '
'popcorn, and take several gigabytes each. Start low '
'and ramp this number up until your machine begins to '
'struggle with RAM. '
'This argument is only valid when using --library.')
parser.add_option(
'-v',
'--verbose',
dest='verbose',
action='store_true',
help='be verbose, printing lots of status information.')
parser.add_option(
'--nm-out',
metavar='PATH',
help='(deprecated) No-op. nm.out is stored in --destdir.')
parser.add_option(
'--no-nm-out',
action='store_true',
help='do not keep the nm output file. This file is useful '
'if you want to see the fully processed nm output after '
'the symbols have been mapped to source locations, or if '
'you plan to run explain_binary_size_delta.py. By default '
'the file \'nm.out\' is placed alongside the generated '
'report. The nm.out file is only created when using '
'--library.')
parser.add_option(
'--disable-disambiguation',
action='store_true',
help='disables the disambiguation process altogether,'
' NOTE: this may, depending on your toolchain, produce'
' output with some symbols at the top layer if addr2line'
' could not get the entire source path.')
parser.add_option(
'--source-path',
default='./',
help='the path to the source code of the output binary, '
'default set to current directory. Used in the'
' disambiguation process.')
opts, _args = parser.parse_args()
if ((not opts.library) and
(not opts.nm_in)) or (opts.library and opts.nm_in):
parser.error('exactly one of --library or --nm-in is required')
if opts.nm_out:
print('WARNING: --nm-out is deprecated and has no effect.',
file=sys.stderr)
if (opts.nm_in):
if opts.jobs:
print('WARNING: --jobs has no effect when used with --nm-in',
file=sys.stderr)
if not opts.destdir:
parser.error('--destdir is a required argument')
if not opts.jobs:
# Use the number of processors but cap between 2 and 4 since raw
# CPU power isn't the limiting factor. It's I/O limited, memory
# bus limited and available-memory-limited. Too many processes and
# the computer will run out of memory and it will be slow.
opts.jobs = max(2, min(4, multiprocessing.cpu_count()))
if opts.addr2line_binary:
assert os.path.isfile(opts.addr2line_binary)
addr2line_binary = opts.addr2line_binary
else:
addr2line_binary = _find_in_system_path('addr2line')
assert addr2line_binary, 'Unable to find addr2line in the path. '\
'Use --addr2line-binary to specify location.'
if opts.nm_binary:
assert os.path.isfile(opts.nm_binary)
nm_binary = opts.nm_binary
else:
nm_binary = _find_in_system_path('nm')
assert nm_binary, 'Unable to find nm in the path. Use --nm-binary '\
'to specify location.'
if opts.pak:
assert os.path.isfile(opts.pak), 'Could not find ' % opts.pak
print('addr2line: %s' % addr2line_binary)
print('nm: %s' % nm_binary)
if opts.library:
CheckDebugFormatSupport(opts.library, addr2line_binary)
# Prepare output directory and report guts
if not os.path.exists(opts.destdir):
os.makedirs(opts.destdir, 0o755)
nm_out = os.path.join(opts.destdir, 'nm.out')
if opts.no_nm_out:
nm_out = None
# Copy report boilerplate into output directory. This also proves that the
# output directory is safe for writing, so there should be no problems writing
# the nm.out file later.
data_js_file_name = os.path.join(opts.destdir, 'data.js')
d3_out = os.path.join(opts.destdir, 'd3')
if not os.path.exists(d3_out):
os.makedirs(d3_out, 0o755)
d3_src = os.path.join(os.path.dirname(__file__), '..', '..', 'd3', 'src')
template_src = os.path.join(os.path.dirname(__file__), 'template')
shutil.copy(os.path.join(d3_src, 'LICENSE'), d3_out)
shutil.copy(os.path.join(d3_src, 'd3.js'), d3_out)
shutil.copy(os.path.join(template_src, 'index.html'), opts.destdir)
shutil.copy(os.path.join(template_src, 'D3SymbolTreeMap.js'), opts.destdir)
# Run nm and/or addr2line to gather the data
symbols = GetNmSymbols(opts.nm_in, nm_out, opts.library, opts.jobs,
opts.verbose is True, addr2line_binary, nm_binary,
opts.disable_disambiguation is None,
opts.source_path)
# Post-processing
if opts.pak:
AddPakData(symbols, opts.pak)
if opts.library:
symbol_path_origin_dir = os.path.dirname(os.path.abspath(opts.library))
else:
# Just a guess. Hopefully all paths in the input file are absolute.
symbol_path_origin_dir = os.path.abspath(os.getcwd())
# Dump JSON for the HTML report.
DumpCompactTree(symbols, symbol_path_origin_dir, data_js_file_name)
print('Report saved to ' + opts.destdir + '/index.html')
if __name__ == '__main__':
sys.exit(main())
|
import unittest
import config
from fpgen import Book
from fpgen import userOptions
class TestBookVarious(unittest.TestCase):
def setUp(self):
self.book = Book(None, None, 0, 't')
def tearDown(self):
config.uopt = userOptions()
# Test the method Book.parseVersion
def test_book_version(self):
major, minor, letter = Book.parseVersion("4.55d")
self.assertEqual(major, "4")
self.assertEqual(minor, "55")
self.assertEqual(letter, "d")
def test_book_version_noletter(self):
major, minor, letter = Book.parseVersion("4.55")
self.assertEqual(major, "4")
self.assertEqual(minor, "55")
self.assertEqual(letter, "")
def test_book_version_check_equal(self):
self.book.umeta.add("generator", "4.55a")
config.VERSION = "4.55a"
self.book.versionCheck()
def test_book_version_check_more_letter(self):
self.book.umeta.add("generator", "4.55a")
config.VERSION = "4.55f"
self.book.versionCheck()
def test_book_version_check_less_letter(self):
self.book.umeta.add("generator", "4.55f")
config.VERSION = "4.55a"
with self.assertRaises(SystemExit) as cm:
self.book.versionCheck()
self.assertEqual(cm.exception.code, 1)
def test_book_version_check_more_minor(self):
self.book.umeta.add("generator", "4.54")
config.VERSION = "4.56"
self.book.versionCheck()
def test_book_version_check_less_minor(self):
self.book.umeta.add("generator", "4.55")
config.VERSION = "4.54a"
with self.assertRaises(SystemExit) as cm:
self.book.versionCheck()
self.assertEqual(cm.exception.code, 1)
|
import json
//output 1
response = unirest.get("https://stock.p.mashape.com/v1/fund/AAPL",
headers={
"X-Mashape-Key": "aHGqgGEXU8mshPl017c9cFPupR7Cp1gTJJ5jsndLHL6NxfgJ9y",
"Accept": "application/json"
}
)
//output 2
response = unirest.get("https://stock.p.mashape.com/v1/prediction/AAPL",
headers={
"X-Mashape-Key": "aHGqgGEXU8mshPl017c9cFPupR7Cp1gTJJ5jsndLHL6NxfgJ9y",
"Accept": "application/json"
}
)
//output 3
response = unirest.get("https://stock.p.mashape.com/v1/extended-fund/aaple",
headers={
"X-Mashape-Key": "aHGqgGEXU8mshPl017c9cFPupR7Cp1gTJJ5jsndLHL6NxfgJ9y",
"Accept": "application/json"
}
)
|
import csv
from googletrans import Translator
translator = Translator()
def translate(row, column):
result = translator.translate(row[column], dest="ko")
return row+[result.text]
def convert(input_file, output_file):
with open(input_file, 'r', encoding='utf-8') as fi:
with open(output_file, 'w', encoding='utf-8', newline='') as fo:
fo.write('\ufeff')
ci = csv.reader(fi)
co = csv.writer(fo)
header = True
column_abstract = -1
column_title = -1
for row in ci:
if header:
column_abstract = row.index('Abstract')
column_title = row.index('Title')
header = False
co.writerow(row+['Translated'])
else:
print(row[column_title])
co.writerow(translate(row, column_abstract))
convert('scopus.csv', 'converted.csv')
|
"""udislist URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.http.response import HttpResponse, HttpResponseBadRequest
def multiply(request, x, y):
if not x.isnumeric():
return HttpResponseBadRequest("bad request")
return HttpResponse("{} * {} = {}".format(x, y, int(x) * int(y)))
def age(request, name, value): # view function
return HttpResponse("{}, you are {} years old.".format(name.title(), value))
def boom(request):
assert False, "BOOM!!!!"
# TODO: http://127.0.0.1:8000/x/23/45/ ==> "23 x 45 = 1035"
urlpatterns = [
url(r'^x/(?P<x>\w+)/(?P<y>\d+)/$', multiply),
url(r'^age/(?P<name>\w+)/(?P<value>\d+)/$', age),
url(r'^age/(?P<value>\d+)/(?P<name>\w+)/$', age),
url(r'^age/(?P<name>\w+)/$', age, kwargs={'value': '33'}),
url(r'^kid/(?P<name>\w+)/$', age, kwargs={'value': '2'}),
url(r'^boom/', boom),
url(r'^admin/', admin.site.urls),
]
|
#!/usr/bin/env python
"""
Author: Patrick Monnahan
Purpose: This script creates a bed file of NON-genic regions to be excluded in structural variant discovery
Takes the following arguments:
-gff : Full path to gff file containing gene locations (can contain other elements as well...these will just be ignored)
-b : Buffer on either side of gene boundary. E.g Gene1 ends at 100bp and Gene2 starts at 500bp. If b=10, then the non-genic region will be 110-490')
"""
# Import necessary arguments
import argparse
# Specify arguments to be read from the command line
parser = argparse.ArgumentParser()
parser.add_argument('-gff', type=str, metavar='gff_path', required=True, help='path to gff file')
parser.add_argument('-b', type=int, metavar='buffer', default=2000, help='Buffer on either side of gene boundary. E.g Gene1 ends at 100bp and Gene2 starts at 500bp. If b=10, then the non-genic region will be 110-490')
parser.add_argument('-r', action='store_true', help='restrictTo_mainChroms')
args = parser.parse_args()
def merge_intervals(intervals):
# taken from https://codereview.stackexchange.com/questions/69242/merging-overlapping-intervals
sorted_by_lower_bound = sorted(intervals, key=lambda tup: tup[0])
merged = []
for higher in sorted_by_lower_bound:
if not merged:
merged.append(higher)
else:
lower = merged[-1]
# test for intersection between lower and higher:
# we know via sorting that lower[0] <= higher[0]
if higher[0] <= lower[1]:
upper_bound = max(lower[1], higher[1])
merged[-1] = (lower[0], upper_bound) # replace by merged interval
else:
merged.append(higher)
return merged
old_chrom = "99"
included_chrs = ['1','2','3','4','5','6','7','8','9','10','chr1','chr2','chr3','chr4','chr5','chr6','chr7','chr8','chr9','chr10','chr01','chr02','chr03','chr04','chr05','chr06','chr07','chr08','chr09','chr10']
Unmerged_Intervals = {}
# Begin looping over lines in the gff file
with open(args.gff, 'r') as gff:
for i, line in enumerate(gff):
if line[0] != "#": # Ignore all lines in the header of the gff
if line.split()[1] != "wareLab": # This is a catch for the B73 gff because it has a wierd first line
line = line.strip("\n").split()
chrom = line[0].replace("M","chr")
start = int(line[3]) # Lower boundary of entry
stop = int(line[4]) # Upper boundary of entry
if line[2] == "gene":
if start - args.b < 0:
start = 0
else:
start = start - args.b
if chrom != old_chrom:
if args.r is True and chrom in included_chrs:
Unmerged_Intervals[chrom] = [(start,stop)]
elif args.r is False:
Unmerged_Intervals[chrom] = [(start,stop)]
old_chrom = chrom
else:
if args.r is True and chrom in included_chrs:
Unmerged_Intervals[chrom].append((start,stop))
elif args.r is False:
Unmerged_Intervals[chrom].append((start,stop))
for chrom in Unmerged_Intervals:
Merged_Intervals = merge_intervals(Unmerged_Intervals[chrom])
for i in Merged_Intervals:
print(f"{chrom}:{i[0]}-{i[1]}")
|
from datetime import datetime
import backtrader as bt
import backtrader.indicators as btind
class MySignal(bt.Indicator):
lines = ('signal',)
params = (('period', 30),)
def __init__(self):
self.lines.signal = self.data - bt.indicators.SMA(period=self.p.period)
cerebro = bt.Cerebro() # create a "Cerebro" engine instance
# Create a data feed
data = bt.feeds.YahooFinanceData(dataname='AAPL',
fromdate=datetime(2016, 1, 1),
todate=datetime(2019, 5, 31))
cerebro.adddata(data)
cerebro.add_signal(bt.SIGNAL_LONGSHORT, MySignal)
cerebro.addwriter(bt.WriterFile, csv=True, out='test_file.csv')
result = cerebro.run()
# print(result)
cerebro.plot()
|
"""Unit test for netutils.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import socket
import sys
import unittest
from treadmill import netutils
@unittest.skipUnless(sys.platform.startswith('linux'), 'Requires Linux')
class NetutilsTest(unittest.TestCase):
"""Tests for teadmill.netutils
"""
def test_netstat_listen_0_0_0_0(self):
"""Tests netutils.netstat"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('0.0.0.0', 0))
sock.listen(1)
port = sock.getsockname()[1]
self.assertIn(port, netutils.netstat(os.getpid()))
sock.close()
def test_netstat_listen_127_0_0_1(self):
"""Tests netutils.netstat"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('127.0.0.1', 0))
sock.listen(1)
port = sock.getsockname()[1]
# Do not report ports listening on 127.0.0.1
self.assertNotIn(port, netutils.netstat(os.getpid()))
sock.close()
if __name__ == '__main__':
unittest.main()
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Copyright (c) 2014 by Delphix. All rights reserved.
#
from types import *
from string import upper
import threading
class SimpleStorage():
"""
A simple interface to a storage system. You might imageine that behind this
would be a database of some sort. However for ease of setting up and using
this demo app, we just store the objects in dictionaries.
"""
lock = threading.Lock()
# a dictionary of dictionaries, where each sub-dictionary is keyed by the
# rooted type of the objects that may be stored in it
collections = {}
# A list of unique id's for each type.
nextUniqueInt = {}
# object to notify when we've created, updated or deleted an object
notificationSystem = None
# cached value of a function to give us the right type
_rootedTypeForType = None
def __init__(self, rootedTypeForType, notificationSystem):
self.notificationSystem = notificationSystem;
self._rootedTypeForType = rootedTypeForType # function to look up the rooted type for a type
def _getCollection(self, rootedTypeName):
""" Get a specific collection """
if type(rootedTypeName) is not StringType:
raise Exception('rootedTypeName parameter must be a string')
if rootedTypeName not in self.collections:
self.collections[rootedTypeName] = {}
return self.collections[rootedTypeName]
def getAll(self, rootedTypeName):
"""
Returns a list of all objects of the specified type. Note that the type
must be a rooted type, and that the returned list may be empty
Raises exception if parameters are bad
"""
if type(rootedTypeName) is not StringType:
raise Exception('rootedTypeName parameter must be a string')
self.lock.acquire()
if rootedTypeName not in self.collections:
collection = {}
else:
collection = self.collections[rootedTypeName];
results = []
for key in collection:
results.append(collection[key])
self.lock.release()
return results
def get(self, rootedTypeName, reference):
"""
Returns an objct of type typeName with reference reference, or None
The type name must be a rooted type
Raises exception if parameters are bad
"""
if type(rootedTypeName) is not StringType:
raise Exception('rootedTypeName parameter must be a string.')
if type(reference) is not StringType:
raise Exception('reference parameter must be a string.')
self.lock.acquire()
collection = self._getCollection(rootedTypeName)
if reference in collection:
result = collection[reference]
else:
result = None
self.lock.release()
return result
def add(self, obj):
"""
Adds the specified object to the storage system
The type name must be a rooted type
Raises exception if parameters are bad
"""
if type(obj) is not InstanceType:
raise Exception('obj must be an instantiated object.')
if not hasattr(obj, 'reference'):
raise Exception('Can not store an object that has no reference attribute.')
if obj.reference is not None:
raise Exception('Can not add an object that already has a reference.')
rootedTypeName = self._rootedTypeForType(obj.type);
self.lock.acquire()
if rootedTypeName not in self.nextUniqueInt:
self.nextUniqueInt[rootedTypeName] = 0
newReference = upper(rootedTypeName) + '-' + str(self.nextUniqueInt[rootedTypeName])
self.nextUniqueInt[rootedTypeName] = self.nextUniqueInt[rootedTypeName] + 1
obj.reference = newReference
collection = self._getCollection(rootedTypeName)
collection[newReference] = obj
self.notificationSystem.create(rootedTypeName, newReference)
self.lock.release()
return obj
def update(self, obj):
""" Updates an object with the same reference and type as the one passed in """
if type(obj) is not InstanceType:
raise Exception('obj must be an instantiated object.')
if not hasattr(obj, 'reference'):
raise Exception('Can not update an object that has no reference attribute.')
if obj.reference is None:
raise Exception('Can not update an object if the new copy has no reference value.')
rootedTypeName = self._rootedTypeForType(obj.type);
self.lock.acquire()
collection = self._getCollection(rootedTypeName)
collection[obj.reference] = obj
self.notificationSystem.update(rootedTypeName, obj.reference)
self.lock.release()
return obj
def delete(self, rootedTypeName, reference):
""" Deletes the specified object """
if type(rootedTypeName) is not StringType:
raise Exception('typeName parameter must be a string.')
if type(reference) is not StringType:
raise Exception('reference parameter must be a string.')
self.lock.acquire()
collection = self._getCollection(rootedTypeName)
result = collection.pop(reference, None)
if result is not None:
self.notificationSystem.delete(rootedTypeName, reference)
self.lock.release()
|
from contextlib import contextmanager
import os
from dotenv import load_dotenv
from sqlalchemy import create_engine
from sqlalchemy.engine.url import URL
from sqlalchemy.exc import IntegrityError, ProgrammingError
from sqlalchemy.orm import sessionmaker, Session
from bitcoin_acks.logging import log
load_dotenv()
is_test = False
def get_url():
pg_url = URL(drivername='postgresql+psycopg2',
username=os.environ['POSTGRES_USER'],
password=os.environ['POSTGRES_PASSWORD'],
host=os.environ['PGHOST'],
port=os.environ['PGPORT'],
database=os.environ['POSTGRES_DB'])
log.debug('get PG url', pg_url=pg_url)
return pg_url
pg_url = get_url()
@contextmanager
def session_scope(echo=False,
raise_integrity_error=True,
raise_programming_error=True) -> Session:
engine = create_engine(pg_url, echo=echo,
connect_args={'sslmode': 'prefer'})
session_maker = sessionmaker(bind=engine)
session = session_maker()
try:
yield session
session.commit()
except IntegrityError:
session.rollback()
if raise_integrity_error:
raise
except ProgrammingError:
session.rollback()
if raise_programming_error:
raise
except:
session.rollback()
raise
finally:
session.close()
|
# Function: Permutations
# Dependency:
# Input: list such as ['e', 'a', 'e', 'o', 'r', 't', 's', 'm', 'n', 'z'], and a integer such as 10
# Output: set of permutations of your input
# Description:
def Permutations(input_list, number):
return
# Test Codes
if __name__ == "__main__":
Letters = ['e', 'a', 'e', 'o', 'r', 't', 's', 'm', 'n', 'z']
number = 10
Permutations(Letters, number) |
# -*- coding: utf-8 -*-
from openerp import models, fields, api
import datetime
class check_vaucher_do(models.Model):
_name = 'check.vaucher_do'
date_con = fields.Date(string="Fecha", required=False, )
description = fields.Char(string="Descripcion", required=True, )
amount_check = fields.Float(string="Monto", required=True, )
concept_id = fields.Many2one(comodel_name="account.voucher", string="", required=False, )
class check_vaucher(models.Model):
_inherit = 'account.voucher'
concepts_ids = fields.One2many(comodel_name="check.vaucher_do", inverse_name="concept_id", string="Conceptos", required=False, )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.