content
stringlengths 5
1.05M
|
|---|
import pytest
from deep_lyric_visualizer.generator.generator_object import (GeneratorObject)
from deep_lyric_visualizer.generator.generation_environment import (GenerationEnvironment,
WikipediaBigGANGenerationEnviornment)
from deep_lyric_visualizer.generator.generatorio import GeneratorIO, PickleGeneratorIO, YAMLGeneratorIO
from unittest.mock import Mock
class TestGenerator:
def test_initialization(self):
mock_pickle_wiki = Mock(WikipediaBigGANGenerationEnviornment)
mock_yaml_wiki = Mock(WikipediaBigGANGenerationEnviornment)
mock_pickle_wiki.SAVE_FILETYPE = 'pickle'
mock_yaml_wiki.SAVE_FILETYPE = 'yaml'
mock_pickle_some_other_gen_env = Mock(GenerationEnvironment)
mock_yaml_some_other_gen_env = Mock(GenerationEnvironment)
mock_yaml_some_other_gen_env.SAVE_FILETYPE = 'yaml'
mock_pickle_some_other_gen_env.SAVE_FILETYPE = 'pickle'
not_a_gen_env = Mock(str)
gen = GeneratorObject(mock_pickle_wiki)
assert isinstance(gen.env, GenerationEnvironment)
assert isinstance(gen.env, WikipediaBigGANGenerationEnviornment)
assert isinstance(gen.genio, GeneratorIO)
assert isinstance(gen.genio, PickleGeneratorIO)
gen = GeneratorObject(mock_yaml_wiki)
assert isinstance(gen.env, GenerationEnvironment)
assert isinstance(gen.env, WikipediaBigGANGenerationEnviornment)
assert isinstance(gen.genio, GeneratorIO)
assert isinstance(gen.genio, YAMLGeneratorIO)
gen = GeneratorObject(mock_yaml_some_other_gen_env)
assert isinstance(gen.env, GenerationEnvironment)
assert not isinstance(gen.env, WikipediaBigGANGenerationEnviornment)
assert isinstance(gen.genio, GeneratorIO)
assert isinstance(gen.genio, YAMLGeneratorIO)
gen = GeneratorObject(mock_pickle_some_other_gen_env)
assert isinstance(gen.env, GenerationEnvironment)
assert not isinstance(gen.env, WikipediaBigGANGenerationEnviornment)
assert isinstance(gen.genio, GeneratorIO)
assert isinstance(gen.genio, PickleGeneratorIO)
with pytest.raises(ValueError):
gen = GeneratorObject(not_a_gen_env)
|
# -*- coding: utf-8 -*-
from scipy import signal
import numpy as np
import matplotlib.pyplot as plt
fig_path = "/Users/gabriel/Documents/Research/USGS_Work/gmprocess/figs/spectra/"
#%%
def get_fft(series, fs, nfft):
ft = np.fft.fft(series,fs)
freq = np.fft.fftfreq(nfft)
return freq, ft
def get_ifft(series, fs, nfft, real=True):
if real:
ift = np.fft.ifft(series,fs).real
else:
ift = np.fft.ifft(series,fs)
return ift
def ps_psd_difference(ps,psd):
diff = []
if len(ps) == len(psd):
diff = np.zeros(len(ps))
for i in range(len(ps)):
diff[i] = psd[i]/ps[i]
else:
print("Spectra must be of the same size")
return diff
#%%
# Make a test signal
np.random.seed(0)
delta = .01
fs = 1/ delta
time_vec = np.arange(0, 70, delta)
#%% Sine wave
delta = .01
fs = 1/ delta
t = np.arange(0,256,delta)
wndw_factor=500
overlap_factor=2
nfft = len(t)
nperseg=len(t)/wndw_factor
noverlap=nperseg/overlap_factor
# filename = "sine_wave_input-del_.01-nfft_256k"
# plt.plot(t,np.sin(t))
# plt.title("Test sine wave, ∆=0.01, N=256000")
# plt.savefig(fname=fig_path + filename + ".png", dpi=500)
# plt.show()
#%% Calculate PSD of test signal with Welch's Method
freqs_psd, psd = signal.welch(np.sin(t),fs=fs, nfft=nfft, nperseg=nperseg, noverlap=noverlap, scaling="density")
# freqs_psd, psd = signal.welch(np.sin(t),fs=fs, nfft=nfft, nperseg=nperseg, scaling="density")
# freqs_psd, psd = signal.welch(np.sin(t),fs=fs, nfft=nfft, scaling="density")
# freqs_psd, psd = signal.welch(np.sin(t),fs=fs, scaling="density")
# freqs_psd, psd = signal.welch(np.sin(t), scaling="density")
freqs_ps, ps = signal.welch(np.sin(t),fs=fs, nfft=nfft, nperseg=nperseg, noverlap=noverlap, scaling="spectrum")
# freqs_ps, ps = signal.welch(np.sin(t),fs=fs, nfft=nfft, nperseg=nperseg, scaling="spectrum")
# freqs_ps, ps = signal.welch(np.sin(t),fs=fs, nfft=nfft, scaling="spectrum")
# freqs_ps, ps = signal.welch(np.sin(t),fs=fs,scaling="spectrum")
# freqs_ps, ps = signal.welch(np.sin(t),scaling="spectrum")
diff = ps_psd_difference(ps,psd)
# Note: freqs are the same for both psd and ps
# filename = "sine_wave-default_fs_and_nfft"
# filename = "sine_wave-default_fs_and_nfft_nperseg"
# filename = "sine_wave-set_fs_and_default_nfft_nperseg"
filename = "sine_wave-using_fs_and_nfft_nperseg->" + str(wndw_factor) + "->" + str(100/overlap_factor) + "perc_overlap"
# No Scaling
plt.plot(freqs_psd, psd, label="PSD")
plt.plot(freqs_ps, ps, "--", label="PS")
plt.title("Parameter Testing for Welch's Method")
plt.legend()
plt.savefig(fname=fig_path + filename + "-no_scaling.png", dpi=500)
plt.show()
# Log x
plt.semilogx(freqs_psd, psd, label="PSD")
plt.semilogx(freqs_ps, ps, "--", label="PS")
plt.title("Parameter Testing for Welch's Method")
plt.legend()
plt.savefig(fname=fig_path + filename + "-logx.png", dpi=500)
plt.show()
# Log Y
plt.semilogy(freqs_psd, psd, label="PSD")
plt.semilogy(freqs_ps, ps, "--", label="PS")
plt.title("Parameter Testing for Welch's Method")
plt.legend()
plt.savefig(fname=fig_path + filename + "-logy.png", dpi=500)
plt.show()
#%% Scaling differences
# PSD / PS scaling ratios
# plt.figure(figsize=(4, 6))
plt.semilogy(freqs_psd, diff, label="PSD/PS ratio")
ax = plt.gca()
# plt.annotate("Variation due to rounding error", xy=(2, 1), xytext=(3, 1.5))
plt.title("Differencing PSD and PS")
plt.legend()
plt.savefig(fname=fig_path + "diff-" + filename + "-logy.png", dpi=500)
plt.show()
#%% Calculate PSD of test signal with just a periodogram
# freqs_ps, ps = signal.periodogram(np.sin(t),fs)
# freqs_psd, psd = signal.periodogram(np.sin(t),fs,scaling="density")
# # No Scaling
# plt.plot(freqs_psd, psd, label="PSD")
# plt.plot(freqs_ps, ps, "--", label="PS")
# plt.legend()
# plt.show()
# # Log x
# plt.semilogx(freqs_psd, psd, label="PSD")
# plt.semilogx(freqs_ps, ps, "--", label="PS")
# plt.legend()
# plt.show()
# # Log Y
# plt.semilogy(freqs_psd, psd, label="PSD")
# plt.semilogy(freqs_ps, ps, "--", label="PS")
# plt.legend()
# plt.show()
#%% Forward FFT
# # Forward transform
# t = np.arange(256)
# sp = np.fft.fft(np.sin(t))
# freq = np.fft.fftfreq(t.shape[-1])
# # plt.plot(freq, sp.real, freq, sp.imag)
# plt.plot(freq, sp.real)
# plt.show()
# # Forward transform, ortho normalization
# t = np.arange(256)
# sp = np.fft.fft(np.sin(t),norm='ortho')
# freq = np.fft.fftfreq(t.shape[-1])
# # plt.plot(freq, sp.real, freq, sp.imag)
# plt.plot(freq, sp.real)
# plt.show()
# Forward transform finer spacing (larger NFFT)
t = np.arange(0,256,0.1)
sp = np.fft.fft(np.sin(t))
freq = np.fft.fftfreq(t.shape[-1],0.1)
# plt.plot(freq, sp.real, freq, sp.imag)
# plt.plot(freq, sp.real)
# plt.show()
# # Forward transform, finer spacing, ortho normalization
# t = np.arange(0,256,0.1)
# sp = np.fft.fft(np.sin(t),norm='ortho')
# freq = np.fft.fftfreq(t.shape[-1],0.1)
# # plt.plot(freq, sp.real, freq, sp.imag)
# plt.plot(freq, sp.real)
# plt.show()
#
# Inverse FFT
#
# # Inverse transform
# t = np.arange(256)
# sig = np.fft.ifft(sp)
# plt.plot(t, sig)
# # plt.show()
# # Inverse transform, ortho normalization
# t = np.arange(256)
# sig = np.fft.ifft(sp, norm='ortho')
# plt.plot(t, sig)
# plt.show()
# Inverse transform, finer spacing (larger NFFT)
t = np.arange(0,256,0.1)
sig = np.fft.ifft(sp)
plt.plot(t, sig)
plt.show()
# Inverse transform, finer spacing, ortho normalization
# t = np.arange(0,256,0.1)
# sig = np.fft.ifft(sp, norm='ortho')
# plt.plot(t, sig)
# plt.show()
|
#!/usr/bin/env python3
import numpy as np
import pandas as pd
ORIGINAL_MODEL = 'purePursuitUSCity'
THRESHOLD_KEYS = {
'TTD': ['Time to destination (Source)', 'Time to destination (FollowUp)'],
'TTO': ['Balancing (Source)', 'Balancing (FollowUp)'],
}
def evaluate_results(results, test_distances, thresholds):
false_positives = set()
possible_failures = 0
failures = 0
mutants_killed = set()
skipped = set()
for i in range(len(results)):
model = results.loc[i, "Model"]
if model is not np.NaN:
source = str(int(results.loc[i, "Test Case"]))
followup = f'{source}:{results.loc[i, "MRIP"]}'
distance = test_distances[source]
failure = [False, False]
for threshold_key in THRESHOLD_KEYS:
threshold = thresholds[threshold_key]
for test_num, results_col in enumerate(THRESHOLD_KEYS[threshold_key]):
test_id = (source, followup)[test_num]
if test_id not in skipped:
value = results.loc[i, results_col]
if model == ORIGINAL_MODEL and value >= 99999:
# value >= 99999 means that the car did not arrive to destination, skip test case
skipped.add(test_id)
continue
if model != ORIGINAL_MODEL:
possible_failures += 1
if value / distance > threshold:
failure[test_num] = True
if model == ORIGINAL_MODEL:
if failures:
raise Exception('Original system results appeared after mutant results')
if failure[0]:
false_positives.add(source)
if failure[1]:
false_positives.add(followup)
else:
if failure[0] and source not in false_positives:
failures += 1
mutants_killed.add(model)
if failure[1] and followup not in false_positives:
failures += 1
mutants_killed.add(model)
return false_positives, possible_failures, failures, mutants_killed, skipped
def main():
import sys
if len(sys.argv) != 4:
print(f'./Evaluate.py <RESULTS_FILE> <TEST_DISTANCES_FILE> <THRESHOLDS_FILE>')
exit(0)
results_file = sys.argv[1]
test_distances_file = sys.argv[2]
thresholds_file = sys.argv[3]
results = None
if results_file.endswith('.xlsx'):
results = pd.read_excel(results_file, engine='openpyxl')
else:
results = pd.read_csv(results_file)
test_distances = None
if test_distances_file.endswith('.xlsx'):
test_distances = pd.read_excel(test_distances_file, engine='openpyxl')
else:
test_distances = pd.read_csv(test_distances_file)
test_distances = {
str(test_distances.loc[i, "testcase"]): test_distances.loc[i, "distance"]
for i in range(len(test_distances))
}
thresholds = None
if thresholds_file.endswith('.xlsx'):
thresholds = pd.read_excel(thresholds_file, engine='openpyxl')
else:
thresholds = pd.read_csv(thresholds_file)
thresholds = { key: thresholds.loc[0, key] for key in THRESHOLD_KEYS }
false_positives, possible_failures, failures, mutants_killed, skipped = evaluate_results(results=results, test_distances=test_distances, thresholds=thresholds)
print(f'DetectedFailures={failures}/{possible_failures}')
print(f'FPsCount={len(false_positives)}')
print(f'MutantsKilledCount={len(mutants_killed)}')
print(f'FPs={false_positives}')
print(f'MutantsKilled={mutants_killed}')
print(f'Skipped={skipped}')
if __name__ == '__main__':
main()
|
from django.contrib import admin
from .models import Coins
# Register your models here.
@admin.register(Coins)
class CoinAdmin(admin.ModelAdmin):
list_display = ['id', 'username', 'coins']
|
class User():
def __init__(self, username, password, nickname):
self._username = username
self._password = password
self._nickname = nickname
self._winRate = None
self._wins = 0
self._loses = 0
self._gamesPlayed = 0
self._doraemonPlayed = 0
self._doraemonWins = 0
self._doraemonLoses = 0
self._doraemonWinRate = 0
self.currentIP = None
def loginFrom(self, ip):
self.currentIP = ip
def userLogout(self):
self.currentIP = None
@property
def username(self):
return self._username
@username.setter
def username(self, value):
if not isinstance(value , str):
raise TypeError("Value must be a string type")
elif len(value) < 6:
raise ValueError("Value must be at least 6 characters")
self._username = value
@property
def password(self):
return self._password
@password.setter
def password(self, value):
if not isinstance(value, str):
raise TypeError("Value must be a string type")
elif len(value) < 8:
raise ValueError("Value must be at least 8 characters")
self._password = value
@property
def nickname(self):
return self._nickname
@nickname.setter
def nickname(self, value):
if not isinstance(value, str):
raise TypeError("Value must be string type")
elif len(value) < 4:
raise ValueError("Value must be at least 4 characters")
self._nickname = value
@property
def winRate(self):
return self._winRate
@property
def wins(self):
return self._wins
@wins.setter
def wins(self, value):
if not isinstance(value, int):
raise TypeError("Value must be an integer type")
elif value < self._wins:
raise ValueError("Value can only be incremented")
self._wins = value
self.winRate = self.wins / self.gamesPlayed
@property
def loses(self):
return self._loses
@loses.setter
def loses(self, value):
if not isinstance(value, int):
raise TypeError("Value must be an integer type")
elif value < self._loses:
raise ValueError("Value can only be incremented")
self._loses = value
@property
def gamesPlayed(self):
return self._gamesPlayed
@gamesPlayed.setter
def gamesPlayed(self, value):
if not isinstance(value, int):
raise TypeError("Value must be an integer type")
elif value < self._gamesPlayed:
raise ValueError("Value can only be incremented")
self._gamesPlayed = value
@property
def doraemonPlayed(self):
return self._doraemonPlayed
@doraemonPlayed.setter
def doraemonPlayed(self, value):
if not isinstance(value, int):
raise TypeError("Value must be an integer type")
elif value < self._gamesPlayed:
raise ValueError("Value can only be incremented")
self._doraemonPlayed = value
@property
def doraemonWins(self):
return self._doraemonWins
@doraemonWins.setter
def doraemonWins(self, value):
if not isinstance(value, int):
raise TypeError("Value must be an integer type")
elif value < self._wins:
raise ValueError("Value can only be incremented")
self._doraemonWins = value
self._doraemonWinRate = self.doraemonWins / self.doraemonPlayed
@property
def doraemonLoses(self):
return self._doraemonLoses
@doraemonLoses.setter
def doraemonLoses(self, value):
if not isinstance(value, int):
raise TypeError("Value must be an integer type")
elif value < self._loses:
raise ValueError("Value can only be incremented")
self._doraemonLoses = value
@property
def doraemonWinRate(self):
return self._doraemonWinRate
def gameWonDoraemon(self):
self.doraemonPlayed += 1
self.doraemonWins += 1
def gameLostDoraemon(self):
self.doraemonPlayed += 1
self.doraemonLoses += 1
|
from scripts.register_fragments import *
class RefinePair(RegisterFragment):
def __init__(self, setting_path, ply_files, s, t, transform):
super().__init__(setting_path, ply_files, s, t)
self._transform = transform
def refine(self, source, target, init_transform):
""" multiscale icp refinement
:param source: source fragment
:param target: target fragment
:param init_transform: initial estimation of transformation matrix
:return: [success, transformation, information]
"""
(transformation, information) = self.multiscale_icp(
source, target,
[self._voxel_len, self._voxel_len / 2.0, self._voxel_len / 4.0], [50, 30, 14],
init_transform)
# TODO: debug mode visualization
return True, transformation, information
def run(self):
"""refine registration of a pair of fragment
:return: [success, transformation, information]
"""
self.configure()
print("reading %s ..." % self._ply_files[self._s])
source = o3d.io.read_point_cloud(self._ply_files[self._s])
print("reading %s ..." % self._ply_files[self._t])
target = o3d.io.read_point_cloud(self._ply_files[self._t])
(success, transformation, information) = self.refine(source, target, self._transform)
if self._debug:
utils.print_m(transformation)
utils.print_m(information)
return success, transformation, information
class RefineRegistration(RegisterFragments):
def __init__(self, config):
super().__init__(config)
def build_posegraph(self):
"""build posegraph
:return: None
"""
static_io = self.config.setting.io.static_io
self.posegraph = o3d.io.read_pose_graph(os.path.join(self.scene_path(), static_io.scene.posegraph_optimized))
num_files = len(self._ply_files)
match_results = {}
for edge in self.posegraph.edges:
s = edge.source_node_id
t = edge.target_node_id
match_results[s * num_files + t] = MatchResult(s, t, edge.transformation)
self.config.export(self.setting_path())
if self.parallel():
from joblib import Parallel, delayed
import multiprocessing as mp
refines = [RefinePair(self.setting_path(), self._ply_files, match_results[r].s, match_results[r].t,
match_results[r].transformation)
for r in match_results]
num_processes = min(mp.cpu_count(), max(len(match_results), 1))
num_processes = min(num_processes, self.config.setting.parameters.cpu_num)
results = Parallel(n_jobs=num_processes)(delayed(wrap_run)(refine) for refine in refines)
for i, r in enumerate(match_results):
match_results[r].success = results[i][0]
match_results[r].transformation = results[i][1]
match_results[r].information = results[i][2]
else:
for r in match_results:
refine = RefinePair(
self.setting_path(), self._ply_files, match_results[r].s, match_results[r].t,
match_results[r].transformation)
(match_results[r].success, match_results[r].transformation, match_results[r].information) \
= refine.run()
self.posegraph = o3d.pipelines.registration.PoseGraph()
self.odometry = np.identity(4)
self.posegraph.nodes.append(o3d.pipelines.registration.PoseGraphNode(self.odometry))
for r in match_results:
if match_results[r].success:
self.update_posegraph(
match_results[r].s, match_results[r].t,
match_results[r].transformation,
match_results[r].information)
o3d.io.write_pose_graph(
os.path.join(self.scene_path(), self.config.setting.io.static_io.scene.refined_posegraph),
self.posegraph)
def optimize_posegraph(self):
""" optimize pose graph
:return: None
"""
static_io = self.config.setting.io.static_io
parameters = self.config.setting.parameters
posegraph_path = os.path.join(self.scene_path(), static_io.scene.refined_posegraph)
optimized_posegraph_path = os.path.join(self.scene_path(), static_io.scene.refined_optimized_posegraph)
global_optimization(posegraph_path, optimized_posegraph_path,
max_cor_dist= parameters.integration.voxel_len_coarse * 1.4,
pref_loop_closure=parameters.optimization.pref_loop_closure_register)
def run(self):
"""refine fragments registration results
:return: None
"""
print("Start refine rough registration of fragments.")
o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Debug)
self._ply_files = utils.get_file_list(self.frag_path(), ".ply")
self.build_posegraph()
self.optimize_posegraph()
|
#!/usr/bin/python
# coding: utf-8 -*-
# (c) 2017, Wayne Witzel III <wayne@riotousliving.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_user
author: "Wayne Witzel III (@wwitzel3)"
version_added: "2.3"
short_description: create, update, or destroy Ansible Tower user.
description:
- Create, update, or destroy Ansible Tower users. See
U(https://www.ansible.com/tower) for an overview.
options:
username:
description:
- The username of the user.
required: True
first_name:
description:
- First name of the user.
last_name:
description:
- Last name of the user.
email:
description:
- Email address of the user.
required: True
password:
description:
- Password of the user.
superuser:
description:
- User is a system wide administator.
type: bool
default: 'no'
auditor:
description:
- User is a system wide auditor.
type: bool
default: 'no'
state:
description:
- Desired state of the resource.
default: "present"
choices: ["present", "absent"]
extends_documentation_fragment: tower
'''
EXAMPLES = '''
- name: Add tower user
tower_user:
username: jdoe
password: foobarbaz
email: jdoe@example.org
first_name: John
last_name: Doe
state: present
tower_config_file: "~/tower_cli.cfg"
'''
from ansible.module_utils.ansible_tower import tower_argument_spec, tower_auth_config, tower_check_mode, HAS_TOWER_CLI
try:
import tower_cli
import tower_cli.utils.exceptions as exc
from tower_cli.conf import settings
except ImportError:
pass
def main():
argument_spec = tower_argument_spec()
argument_spec.update(dict(
username=dict(required=True),
first_name=dict(),
last_name=dict(),
password=dict(no_log=True),
email=dict(required=True),
superuser=dict(type='bool', default=False),
auditor=dict(type='bool', default=False),
state=dict(choices=['present', 'absent'], default='present'),
))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_TOWER_CLI:
module.fail_json(msg='ansible-tower-cli required for this module')
username = module.params.get('username')
first_name = module.params.get('first_name')
last_name = module.params.get('last_name')
password = module.params.get('password')
email = module.params.get('email')
superuser = module.params.get('superuser')
auditor = module.params.get('auditor')
state = module.params.get('state')
json_output = {'username': username, 'state': state}
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
tower_check_mode(module)
user = tower_cli.get_resource('user')
try:
if state == 'present':
result = user.modify(username=username, first_name=first_name, last_name=last_name,
email=email, password=password, is_superuser=superuser,
is_auditor=auditor, create_on_missing=True)
json_output['id'] = result['id']
elif state == 'absent':
result = user.delete(username=username)
except (exc.ConnectionError, exc.BadRequest) as excinfo:
module.fail_json(msg='Failed to update the user: {0}'.format(excinfo), changed=False)
json_output['changed'] = result['changed']
module.exit_json(**json_output)
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
|
"Base definitions for transports based on IP networking."
import os
import socket
import logging
from thespian.actors import ActorAddress
_localAddresses = set(['', '127.0.0.1', 'localhost', None])
def _probeAddrInfo(usage, useAddr, af, socktype, proto):
try:
return socket.getaddrinfo(useAddr, 0, af, socktype, proto, usage)
except Exception as ex:
logging.warning('Unable to get address info'
' for address %s (%s, %s, %s, %s): %s %s',
useAddr, af, socktype, proto, usage, type(ex), ex)
return [None]
def getLocalAddresses():
# Use a quick UDP socket to get this system's INET addresses
af = socket.AF_INET
socktype = socket.SOCK_DGRAM
proto = socket.IPPROTO_UDP
try:
hostname = socket.gethostname()
except Exception:
logging.warning('Unable to determine hostname')
hostname = None
try:
fqdn = socket.getfqdn()
except Exception:
logging.warning('Unable to determine fqdn')
fqdn = None
return set(rslt[4][0]
for usage in [0, socket.AI_PASSIVE]
for useAddr in [None, hostname, fqdn]
for rslt in _probeAddrInfo(usage, useAddr, af, socktype, proto)
if rslt).union(_localAddresses)
class ThisSystem(object):
def __init__(self):
self._myAddresses = getLocalAddresses()
userspec = os.getenv('THESPIAN_BASE_IPADDR', None)
if userspec:
self.add_local_addr(userspec)
def cmpIP2Tuple(self, t1, t2):
"""Function to compare two IP 2-tuple addresses. Direct equality is
easiest, but there are several additional equalities for the
first element: '', '0.0.0.0', '127.0.0.1', any localIP address.
Also, a port of 0 or None should match any other port.
"""
if t1 == t2:
return True # easiest
# Start by comparing ports, and if they are a match, check all
# possible addresses.
return (t1[1] == t2[1] or
t1[1] in [None, 0] or
t2[1] in [None, 0]) and \
self.isSameSystem(t1, t2)
def isSameSystem(self, t1, t2):
"""Function to compare two IP 2-tuple addresses ignoring ports to see
if they exist on the same system. Direct equality is
easiest, but there are several additional equalities for
the the local system: '', '0.0.0.0', '127.0.0.1', any localIP
address.
"""
if t1[0] == t2[0]:
return True
# The local system has several alternative references: if both
# addresses refer to the local system with one of the
# references then they are equal.
localIDs = self._myAddresses
return t1[0] in localIDs and t2[0] in localIDs
def add_local_addr(self, newaddr):
if newaddr not in self._myAddresses:
self._myAddresses.add(newaddr)
def isLocalAddr(self, addr):
return addr in self._myAddresses
@staticmethod
def _isLocalReference(addr): return addr in _localAddresses
thisSystem = ThisSystem()
class IPActorAddress(object):
def __init__(self, af, socktype, proto, baseaddr, port, external=False):
"""If external is "truthy", this should be an address that is reachable
from external nodes. If external is false, this is usually
an address that is going to be listened on locally. For
example, "0.0.0.0" can be used for a non-external
listen-any address, but cannot be used for sending messages.
A "truthy" value of external can be an external address to
try. Using the address of the Convention Leader (if any)
is recommended to ensure that the address chosen is
appropriate for the network supporting the Convention. By
default, the address is Go Daddy's public webserver
address.
"""
self.af = af
self.socktype = socktype
self.proto = proto
if baseaddr and external and baseaddr == '0.0.0.0':
baseaddr = None
if baseaddr == '':
baseaddr = None if external else '127.0.0.1'
base2 = os.getenv('THESPIAN_BASE_IPADDR', None)
if base2:
baseaddr = base2
if external and not baseaddr:
# Trick to get the "public" IP address... doesn't work so
# well if there are multiple routes, or if the public site
# is not accessible. (needs work)
remoteAddr = (
external
if isinstance(external, tuple)
else ((external, 80)
if isinstance(external, str)
else (external.bindname
if isinstance(external, IPActorAddress)
else (external.addressDetails.sockname
if (isinstance(external, ActorAddress) and
isinstance(external.addressDetails,
IPActorAddress))
else ('8.8.8.8', 80)))))
if thisSystem._isLocalReference(remoteAddr[0]):
remoteAddr = ('8.8.8.8', remoteAddr[1])
try:
# Use a UDP socket: no actual connection is made
s = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP)
try:
s.connect(remoteAddr)
baseaddr = s.getsockname()[0]
finally:
s.close()
except TypeError:
# Probably specified the Admin Port as a string...
print('Error connecting to %s' % (str(remoteAddr)))
import traceback
traceback.print_exc()
except OSError as er:
if er.errno == 101 \
and (not isinstance(external, tuple) or not external[0]):
# Network unreachable, ENETUNREACH, but no specific
# network requested, so run local-only with loopback
# addresses.
baseaddr = '127.0.0.1'
except socket.error as er:
if er.errno == 101 \
and (not isinstance(external, tuple) or not external[0]):
# Network unreachable, ENETUNREACH, but no specific
# network requested, so run local-only with loopback
# addresses.
baseaddr = '127.0.0.1'
except Exception:
pass
if not baseaddr:
raise RuntimeError('Unable to determine valid external socket address.')
thisSystem.add_local_addr(baseaddr)
res = socket.getaddrinfo(baseaddr, port, af, socktype, proto,
socket.AI_PASSIVE
if baseaddr is None and not external else 0)
af, socktype, proto, canonname, sa = res[0]
self.sockname = sa
self.bindname = ('', sa[1]) if external else sa
def __eq__(self, o):
return self.af == o.af and self.socktype == o.socktype and \
self.proto == o.proto and \
thisSystem.cmpIP2Tuple(self.sockname, o.sockname)
def __ne__(self, o):
return not self.__eq__(o)
def __hash__(self):
return hash((self.socketArgs, self.connectArgs))
def isLocalAddr(self):
return thisSystem.isLocalAddr(self.sockname[0])
def __str__(self):
if not hasattr(self, '_str_form'):
self._str_form = ''.join(['(',
self._str_kind(),
'|',
self._str_aps(),
')'])
return self._str_form
def __getstate__(self):
# Removed the cached str() form when pickling so that it is
# regenerated on the (possibly) remote end.
if not hasattr(self, '_str_form'):
return self.__dict__
odict = self.__dict__.copy()
del odict['_str_form']
return odict
def _str_kind(self):
# n.b. ignores self.socktype of SOCK_STREAM or SOCK_DGRAM
if self.proto == socket.IPPROTO_TCP:
return ('TCP' if self.af == socket.AF_INET
else ('TCP6' if self.af == socket.AF_INET6
else 'TCP?'))
return 'UDP' if self.proto == socket.IPPROTO_TCP \
else '%s.%s.%s' % (self.af, self.socktype, self.proto)
def _str_addr(self):
return '' if self.isLocalAddr() else self.sockname[0]
def _str_port(self):
return ':%d' % self.sockname[1]
def _str_suffix(self):
return ''
def _str_aps(self):
return ''.join([self._str_addr(),
self._str_port(),
self._str_suffix()])
@property
def socketArgs(self): return (self.af, self.socktype, self.proto)
@property
def bindArgs(self): return self.bindname,
@property
def connectArgs(self): return self.sockname,
def isSameSystem(self, other_addr):
if isinstance(other_addr, ActorAddress):
other_addr = other_addr.addressDetails
do_cmp = lambda A: thisSystem.isSameSystem(self.sockname, A)
if isinstance(other_addr, IPActorAddress):
return do_cmp(other_addr.sockname)
if isinstance(other_addr, socket.socket):
return do_cmp(other_addr.getsockname())
return do_cmp(other_addr)
class UDPv4ActorAddress(IPActorAddress):
def __init__(self, initialIPAddr=None, initialIPPort=0, external=False):
super(UDPv4ActorAddress, self).__init__(socket.AF_INET,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP,
initialIPAddr,
initialIPPort,
external)
def _str_kind(self):
return 'UDP'
class TCPv4ActorAddress(IPActorAddress):
def __init__(self, initialIPAddr=None, initialIPPort=0, external=False):
super(TCPv4ActorAddress, self).__init__(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP,
initialIPAddr,
initialIPPort,
external)
def _str_kind(self):
return 'T'
class TCPv6ActorAddress(IPActorAddress):
def __init__(self, initialIPAddr=None, initialIPPort=0, external=False):
super(TCPv6ActorAddress, self).__init__(socket.AF_INET6,
socket.SOCK_STREAM,
socket.IPPROTO_TCP,
initialIPAddr,
initialIPPort,
external)
def _str_kind(self):
return 'TCP6'
def _str_addr(self):
return '[%s]' % self.sockname[0]
def _str_port(self):
return ':%d.%d.%d' % self.sockname[1:]
class RoutedTCPv4ActorAddress(TCPv4ActorAddress):
def __init__(self, anIPAddr, anIPPort, adminAddr, txOnly, external=False):
super(RoutedTCPv4ActorAddress, self).__init__(anIPAddr, anIPPort,
external=external)
self.routing = [None, adminAddr] if txOnly else [adminAddr]
def _str_suffix(self):
return '~' + '~'.join(['A' if A is None else
(A.addressDetails._str_aps()
if hasattr(A.addressDetails, '_str_aps')
else str(A))
for A in self.routing])
class TXOnlyAdminTCPv4ActorAddress(TCPv4ActorAddress):
# Only assigned to the Admin; allows remote admins to know to wait
# for a connection instead of trying to initiate one.
def __init__(self, anIPAddr, anIPPort, external):
super(TXOnlyAdminTCPv4ActorAddress, self).__init__(anIPAddr, anIPPort,
external=external)
self.routing = [None] # remotes must communicate via their local admin
def _str_suffix(self):
return '>'
|
#!/usr/bin/env python3
# Copyright 2020 Salesforce Research (Aadyot Bhatnagar)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import argparse
from distutils.util import strtobool
import logging
import kaldiio
import tqdm
from speech_datasets.transform import Transformation
from speech_datasets.utils.io_utils import get_commandline_args, consolidate_utt_info
from speech_datasets.utils.types import str_or_none, humanfriendly_or_none
from speech_datasets.utils.writers import file_writer_helper
logger = logging.getLogger(__name__)
def parse_args():
parser = argparse.ArgumentParser(
description="read .wav files & ")
parser.add_argument("--feature-config", default=None, type=str_or_none,
help="YAML file for feature extraction (if extracting any features)")
parser.add_argument("--text-file", default=None,
help="file mapping utterance ID to transcript")
parser.add_argument("--utt2spk-file", default=None,
help="file mapping utterance ID to speaker ID")
parser.add_argument("--archive-format", type=str, default="hdf5", choices=["mat", "hdf5"],
help="Specify the file format for output. \"mat\" is the matrix format in kaldi")
parser.add_argument("--sample-frequency", type=humanfriendly_or_none, default=None,
help="If the sampling rate is specified, resample the input.")
parser.add_argument("--compress", type=strtobool, default=False, help="Save in compressed format")
parser.add_argument("--compression-method", type=int, default=2,
help="Specify the method(if mat) or " "gzip-level(if hdf5)")
parser.add_argument("--verbose", "-V", default=0, type=int, help="Verbose option")
parser.add_argument("--segments", type=str,
help="segments-file format: each line is either"
"<segment-id> <recording-id> <start-time> <end-time>"
"e.g. call-861225-A-0050-0065 call-861225-A 5.0 6.5")
parser.add_argument("rspecifier", type=str, help="WAV scp file")
parser.add_argument("wspecifier", type=str, help="Write specifier")
return parser.parse_args()
def main():
args = parse_args()
logfmt = "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"
if args.verbose > 0:
logging.basicConfig(level=logging.INFO, format=logfmt)
else:
logging.basicConfig(level=logging.WARN, format=logfmt)
logger.info(get_commandline_args())
utt_text_speaker = consolidate_utt_info(
scp=None, text=args.text_file, utt2spk=args.utt2spk_file)
with kaldiio.ReadHelper(
args.rspecifier, segments=args.segments
) as reader, file_writer_helper(
args.wspecifier,
filetype=args.archive_format,
compress=args.compress,
compression_method=args.compression_method,
sample_frequency=args.sample_frequency,
transform=Transformation(args.feature_config)
) as writer:
for utt_id, (rate, wave) in tqdm.tqdm(reader, miniters=100, maxinterval=30):
utt_dict = {"x": wave, "rate": rate}
utt_dict.update(utt_text_speaker.get(utt_id, {}))
try:
writer[utt_id] = utt_dict
except Exception as e:
logger.warning(
f"Failed to process utterance {utt_id} with exception:\n{str(e)}")
continue
if __name__ == "__main__":
main()
|
#!/usr/bin/python3
import os
def main():
os.system('rm dist/* -rf')
os.system('python3 setup.py sdist')
os.system('python3 setup.py bdist_wheel --universal')
os.system('twine upload dist/*')
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
#
# tcpv4tracer Trace TCP connections.
# For Linux, uses BCC, eBPF. Embedded C.
#
# USAGE: tcpv4tracer [-h] [-v] [-p PID] [-N NETNS]
#
# You should generally try to avoid writing long scripts that measure multiple
# functions and walk multiple kernel structures, as they will be a burden to
# maintain as the kernel changes.
# The following code should be replaced, and simplified, when static TCP probes
# exist.
#
# Copyright 2017 Kinvolk GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License")
from bcc import BPF
import argparse as ap
from socket import inet_ntop, AF_INET, AF_INET6
from struct import pack
bpf_text = """
#define KBUILD_MODNAME "foo"
#include <uapi/linux/ptrace.h>
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wtautological-compare"
#include <linux/skbuff.h>
#pragma clang diagnostic pop
#include <net/ip.h>
#include <net/tcp.h>
#include <bcc/proto.h>
#define DATA_BUFF_SIZE 128
struct data_t {
char payload[DATA_BUFF_SIZE];
unsigned int size;
};
BPF_PERF_OUTPUT(tcp_payload);
static struct tcphdr *skb_to_tcphdr(const struct sk_buff *skb)
{
// unstable API. verify logic in tcp_hdr() -> skb_transport_header().
return (struct tcphdr *)(skb->head + skb->transport_header);
}
static inline struct iphdr *skb_to_iphdr(const struct sk_buff *skb)
{
// unstable API. verify logic in ip_hdr() -> skb_network_header().
return (struct iphdr *)(skb->head + skb->network_header);
}
static inline unsigned char *payload_pointer(const struct tcphdr *tcp, const u8 offset) {
return (unsigned char *)tcp + (offset * 4);
}
static inline unsigned char *tail_pointer(const struct sk_buff *skb) {
return skb->head + skb->tail;
}
int tcp_sniff(struct pt_regs *ctx, struct sk_buff *skb) {
u8 offset = 5;
struct tcphdr *tcp = skb_to_tcphdr(skb);
unsigned char *payload = NULL;
struct data_t data;
if (bpf_probe_read(&offset, 1, ((u_int8_t *)tcp) + 12) != 0)
return 0;
offset = offset >> 4;
payload = payload_pointer(tcp, offset);
data.size = tail_pointer(skb) - payload;
if (data.size < 1)
return 0;
bpf_probe_read(&data.payload, DATA_BUFF_SIZE, payload);
tcp_payload.perf_submit(ctx, &data, sizeof(struct data_t));
return 0;
}
"""
def print_tcp_payload(cpu, data, size):
event = b["tcp_payload"].event(data)
print(event.size, event.payload)
# initialize BPF
b = BPF(text=bpf_text)
b.attach_kprobe(event="tcp_v4_rcv", fn_name="tcp_sniff")
print("Tracing TCP payload. Ctrl-C to end.")
b["tcp_payload"].open_perf_buffer(print_tcp_payload)
while True:
try:
b.perf_buffer_poll()
except KeyboardInterrupt:
exit()
|
from flask import *
import requests
import sqlite3
import random
import json
##配置文件
api_url1 = 'http://127.0.0.1:5700/send_msg'
api_url2 = "http://127.0.0.1:5700/delete_msg"
qq_group=["723174283"]
##初始化
for a in qq_group:
db = sqlite3.connect("qq.db")
cur=db.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS qq"+a+"(qq_id TEXT,confirm TEXT)")
db.commit()
cur.close()
db.close()
##数据库增加
def inc(db_name = "", qq_id = "",con_id = ""):
db = sqlite3.connect("qq.db")
cur=db.cursor()
cur.execute("INSERT INTO qq"+db_name+" values(?,?)",(str(qq_id),str(con_id)))
db.commit()
cur.close()
db.close()
return ''
##数据库删除
def delqq(db_name = "", qq_id = ""):
db = sqlite3.connect("qq.db")
cur=db.cursor()
n=cur.execute("DELETE FROM qq"+db_name+" WHERE qq_id="+qq_id+"")
db.commit()
cur.close()
db.close()
return ''
##数据库查询
def check(db_name = "", qq_id = ""):
db = sqlite3.connect("qq.db")
cur=db.cursor()
cur.execute("SELECT * FROM qq"+db_name+" where qq_id="+qq_id+"")
result = cur.fetchone()
cur.close()
db.close()
return result
##撤回
def del_msg(msg_id = 0):
msg = {
"message_id":msg_id
}
msg_re = requests.post(api_url2,data=msg)
print(msg_re)
return ''
##群消息发送
def group_msg(group_id = 0 , message = ""):
msg = {
'group_id':group_id,
'message':message,
'auto_escape':False
}
msg_re = requests.post(api_url1,data=msg)
print(msg_re)
return ''
##主程序
bot_server = Flask(__name__)
@bot_server.route('/',methods=['POST'])
def server():
data = request.get_data().decode('utf-8')
data = json.loads(data)
print(data)
##进群消息
if data["post_type"] == "notice" and data["notice_type"] == "group_increase":
con_id = random.sample('zyxwvutsrqponmlkjihgfedcba',8)
inc(str(data["group_id"]),str(data["user_id"]),str(con_id))
group_msg(data["group_id"],"请在群内发送以下字符串\n"+str(con_id)+"\n然后您将可以在本群发言")
if data["post_type"] == "message":
if str(data["group_id"]) in qq_group:
result = check(str(data["group_id"]),str(data["user_id"]))
if result:
if result[1] in data["message"]:
group_msg(data["group_id"],"恭喜您通过验证!!!")
delqq(str(data["group_id"]), str(data["user_id"]))
else:
del_msg(data["message_id"])
group_msg(data["group_id"],"请完成验证")
return ''
if __name__ == '__main__':
bot_server.run(host="127.0.0.1",port=5701,debug=True)
|
# -*- coding: utf-8 -*-
from django.contrib import admin, messages
from django.http import HttpResponse, HttpResponseRedirect
from django.utils.translation import ugettext_lazy as _
from civil.apps.search.models import SavedSearch
#==============================================================================
def save_search_action(description=_("Save search")):
"""
"""
def save_search(modeladmin, request, queryset):
SavedSearch.from_action(request, queryset)
messages.success(request, _('Successfully saved search'))
return HttpResponseRedirect(request.get_full_path())
save_search.short_description = description
return save_search
|
# -*- coding: utf-8 -*-
from bot.dfs.bridge.data import Data
from gevent import monkey
monkey.patch_all()
import logging.config
from datetime import datetime
from gevent import spawn, sleep
from bot.dfs.bridge.workers.base_worker import BaseWorker
from bot.dfs.bridge.utils import business_date_checker, generate_doc_id
logger = logging.getLogger(__name__)
class RequestForReference(BaseWorker):
""" Edr API XmlData Bridge """
def __init__(self, reference_queue, request_to_sfs, request_db, services_not_available, sleep_change_value,
delay=15):
super(RequestForReference, self).__init__(services_not_available)
self.start_time = datetime.now()
self.delay = delay
self.request_to_sfs = request_to_sfs
self.request_db = request_db
# init queues for workers
self.reference_queue = reference_queue
# blockers
self.sleep_change_value = sleep_change_value
def sfs_checker(self):
"""Get request ids from redis, check date, check quantity of documents"""
while not self.exit:
self.services_not_available.wait()
if business_date_checker():
try:
request_ids = self.request_db.get_pending_requests()
logger.info(u"got pending requests: {}".format(request_ids))
except Exception as e:
logger.warning(u'Fail to get pending requests. Message {}'.format(e.message))
else:
self.check_incoming_correspondence(request_ids)
sleep(15)
def check_incoming_correspondence(self, request_ids):
for request_id, request_data in request_ids.items():
code = request_data['code']
ca_name = ''
try:
cert = self.request_to_sfs.sfs_get_certificate_request(ca_name)
except Exception as e:
logger.warning(u'Fail to get certificate. Message {}'.format(e.message))
sleep()
else:
try:
quantity_of_docs = self.request_to_sfs.sfs_check_request(code)
except Exception as e:
logger.warning(
u'Fail to check for incoming correspondence. Message {}'.format(e.message))
sleep()
else:
if int(quantity_of_docs) != 0:
self.sfs_receiver(request_id, code, ca_name, cert)
def sfs_receiver(self, request_id, code, ca_name, cert):
"""Get documents from SFS, put request id with received documents to queue"""
try:
received_docs = self.request_to_sfs.sfs_receive_request(code, ca_name, cert)
except Exception as e:
logger.warning(u'Fail to check for incoming correspondence. Message {}'.format(e.message))
sleep()
else:
try:
logger.info('Put request_id {} to process...'.format(request_id))
all_the_data = self.request_db.get_tenders_of_request(request_id)
yamled_data = {"meta": {"id": "123"}} # TODO: placeholder; presume it will contain stuff needed
for data in all_the_data:
data.file_content['meta'].update(yamled_data['meta'])
self.reference_queue.put((yamled_data, all_the_data))
except Exception as e:
logger.exception(u"Message: {}".format(e.message))
else:
logger.info(
u'Received docs with request_id {} is already in process or was processed.'.format(request_id))
def _start_jobs(self):
return {'sfs_checker': spawn(self.sfs_checker)}
|
import datetime
import logging
from osf.models import AbstractNode
from api.caching.tasks import update_storage_usage_cache
from django.core.management.base import BaseCommand
from datetime import timezone
from framework.celery_tasks import app as celery_app
from django.db import transaction
logger = logging.getLogger(__name__)
DAYS = 1
@celery_app.task(name='management.commands.update_storage_usage')
def update_storage_usage(dry_run=False, days=DAYS):
with transaction.atomic():
modified_limit = timezone.now() - timezone.timedelta(days=days)
recently_modified = AbstractNode.objects.filter(modified__gt=modified_limit)
for modified_node in recently_modified:
file_op_occurred = modified_node.logs.filter(action__contains='file', created__gt=modified_limit).exists()
if not modified_node.is_quickfiles and file_op_occurred:
update_storage_usage_cache(modified_node.id, modified_node._id)
if dry_run:
raise RuntimeError('Dry run -- Transaction rolled back')
class Command(BaseCommand):
help = '''Updates the storage usage for all nodes modified in the last day'''
def add_arguments(self, parser):
parser.add_argument(
'--dry_run',
type=bool,
default=False,
help='Run script but do not commit',
)
parser.add_argument(
'--days',
type=int,
default=DAYS,
help='How many days to backfill',
)
def handle(self, *args, **options):
script_start_time = datetime.datetime.now()
logger.info('Script started time: {}'.format(script_start_time))
logger.debug(options)
dry_run = options['dry_run']
days = options['days']
if dry_run:
logger.info('DRY RUN')
update_storage_usage(dry_run, days)
script_finish_time = datetime.datetime.now()
logger.info('Script finished time: {}'.format(script_finish_time))
logger.info('Run time {}'.format(script_finish_time - script_start_time))
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as fcnal
from sklearn.pipeline import Pipeline
from .metrics import eval_target_model
# Determine device to run network on (runs on gpu if available)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def label_to_onehot(labels, num_classes=10):
""" Converts label into a vector.
Args:
labels (int): Class label to convert to tensor.
num_classes (int): Number of classes for the model.
Returns:
(torch.tensor): Torch tensor with 0's everywhere except for 1 in
correct class.
"""
one_hot = torch.eye(num_classes)
return one_hot[labels.long()]
def train(model=None, data_loader=None, test_loader=None,
optimizer=None, criterion=None, n_epochs=0,
classes=None, verbose=False):
"""
Function to train a model provided
specified train/test sets and associated
training parameters.
Parameters
----------
model : Module
PyTorch conforming nn.Module function
data_loader : DataLoader
PyTorch dataloader function
test_loader : DataLoader
PyTorch dataloader function
optimizer : opt object
PyTorch conforming optimizer function
criterion : loss object
PyTorch conforming loss function
n_epochs : int
number of training epochs
classes : list
list of classes
verbose : boolean
flag for verbose print statements
"""
losses = []
for epoch in range(n_epochs):
model.train()
for i, batch in enumerate(data_loader):
data, labels = batch
data, labels = data.to(device), labels.to(device)
optimizer.zero_grad()
outputs = model(data)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
losses.append(loss.item())
if verbose:
print("[{}/{}][{}/{}] loss = {}"
.format(epoch, n_epochs, i,
len(data_loader), loss.item()))
# evaluate performance on testset at the end of each epoch
print("[{}/{}]".format(epoch, n_epochs))
print("Training:")
train_acc = eval_target_model(model, data_loader, classes=classes)
print("Test:")
test_acc = eval_target_model(model, test_loader, classes=classes)
# plt.plot(losses)
# plt.show()
return train_acc, test_acc
def train_attacker(attack_model=None, shadow_model=None,
shadow_train=None, shadow_out=None,
optimizer=None, criterion=None, n_epochs=0, k=0,
verbose=False):
"""
Trains attack model (classifies a sample as in or
out of training set) using shadow model outputs
(probabilities for sample class predictions).
The type of shadow model used can vary.
Parameters
----------
attack_model : Module
PyTorch conforming nn.Module function
shadow_model : Module
PyTorch conforming nn.Module function
shadow_train : DataLoader
PyTorch dataloader function
shadow_out : DataLoader
PyTorch dataloader function
optimizer : opt object
PyTorch conforming optimizer function
criterion : loss object
PyTorch conforming loss function
n_epochs : int
number of training epochs
k : int
Value at which to end using train data list
"""
in_predicts = []
out_predicts = []
if type(shadow_model) is not Pipeline:
shadow_model = shadow_model
shadow_model.eval()
for epoch in range(n_epochs):
total = 0
correct = 0
train_top = np.empty((0, 2))
out_top = np.empty((0, 2))
for i, ((train_data, train_lbls),
(out_data, out_lbls)) in enumerate(zip(shadow_train,
shadow_out)):
# out_data = torch.randn(out_data.shape)
mini_batch_size = train_data.shape[0]
out_mini_batch_size = out_data.shape[0]
if mini_batch_size != out_mini_batch_size:
continue
'''if mini_batch_size != out_mini_batch_size:
break'''
if type(shadow_model) is not Pipeline:
train_data = train_data.to(device).detach()
out_data = out_data.to(device).detach()
train_posteriors = fcnal.softmax(shadow_model(train_data),
dim=1)
out_posteriors = fcnal.softmax(shadow_model(out_data),
dim=1)
else:
traininputs = train_data.view(train_data.shape[0], -1)
outinputs = out_data.view(out_data.shape[0], -1)
in_preds = shadow_model.predict_proba(traininputs)
train_posteriors = torch.from_numpy(in_preds).float()
# for p in in_preds:
# in_predicts.append(p.max())
out_preds = shadow_model.predict_proba(outinputs)
out_posteriors = torch.from_numpy(out_preds).float()
# for p in out_preds:
# out_predicts.append(p.max())
train_sort, _ = torch.sort(train_posteriors, descending=True)
train_top_k = train_sort[:, :k].clone().to(device)
for p in train_top_k:
in_predicts.append((p.max()).item())
out_sort, _ = torch.sort(out_posteriors, descending=True)
out_top_k = out_sort[:, :k].clone().to(device)
for p in out_top_k:
out_predicts.append((p.max()).item())
train_top = np.vstack((train_top,
train_top_k[:, :2].cpu().detach().numpy()))
out_top = np.vstack((out_top,
out_top_k[:, :2].cpu().detach().numpy()))
train_lbl = torch.ones(mini_batch_size).to(device)
out_lbl = torch.zeros(out_mini_batch_size).to(device)
optimizer.zero_grad()
train_predictions = torch.squeeze(attack_model(train_top_k))
out_predictions = torch.squeeze(attack_model(out_top_k))
loss_train = criterion(train_predictions, train_lbl)
loss_out = criterion(out_predictions, out_lbl)
loss = (loss_train + loss_out) / 2
if type(shadow_model) is not Pipeline:
loss.backward()
optimizer.step()
correct += (train_predictions >= 0.5).sum().item()
correct += (out_predictions < 0.5).sum().item()
total += train_predictions.size(0) + out_predictions.size(0)
if verbose:
print("[{}/{}][{}/{}] loss = {:.2f}, accuracy = {:.2f}"
.format(epoch, n_epochs, i, len(shadow_train),
loss.item(), 100 * correct / total))
# Plot distributions for target predictions
# in training set and out of training set
"""
fig, ax = plt.subplots(2,1)
plt.subplot(2,1,1)
plt.hist(in_predicts, bins='auto')
plt.title('In')
plt.subplot(2,1,2)
plt.hist(out_predicts, bins='auto')
plt.title('Out')
"""
'''
plt.scatter(out_top.T[0,:], out_top.T[1,:], c='b')
plt.scatter(train_top.T[0,:], train_top.T[1,:], c='r')
plt.show()
'''
class softCrossEntropy(torch.nn.Module):
def __init__(self, alpha=0.95):
"""
:param alpha: Strength (0-1) of influence from soft labels in training
"""
super(softCrossEntropy, self).__init__()
self.alpha = alpha
return
def forward(self, inputs, target, true_labels):
"""
:param inputs: predictions
:param target: target (soft) labels
:param true_labels: true (hard) labels
:return: loss
"""
KD_loss = self.alpha
KD_loss *= nn.KLDivLoss(size_average=False)(
fcnal.log_softmax(inputs, dim=1),
fcnal.softmax(target, dim=1)
)
KD_loss += (1-self.alpha)*fcnal.cross_entropy(inputs, true_labels)
return KD_loss
def distill_training(teacher=None, learner=None, data_loader=None,
test_loader=None, optimizer=None,
criterion=None, n_epochs=0, verbose=False):
"""
:param teacher: network to provide soft labels in training
:param learner: network to distill knowledge into
:param data_loader: data loader for training data set
:param test_loaderL data loader for validation data
:param optimizer: optimizer for training
:param criterion: objective function, should allow for soft labels.
We suggest softCrossEntropy
:param n_epochs: epochs for training
:param verbose: verbose == True will print loss at each batch
:return: None, teacher model is trained in place
"""
losses = []
for epoch in range(n_epochs):
teacher.eval()
learner.train()
for i, batch in enumerate(data_loader):
with torch.set_grad_enabled(False):
data, labels = batch
data, labels = data.to(device), labels.to(device)
soft_lables = teacher(data)
with torch.set_grad_enabled(True):
optimizer.zero_grad()
outputs = learner(data)
loss = criterion(outputs, soft_lables, labels)
loss.backward()
optimizer.step()
losses.append(loss.item())
if verbose:
print("[{}/{}][{}/{}] loss = {}"
.format(epoch, n_epochs, i,
len(data_loader), loss.item()))
# evaluate performance on testset at the end of each epoch
print("[{}/{}]".format(epoch, n_epochs))
print("Training:")
train_acc = eval_target_model(learner, data_loader, classes=None)
print("Testing:")
test_acc = eval_target_model(learner, test_loader, classes=None)
return train_acc, test_acc
def inf_adv_train(target_model=None, inf_model=None, train_set=None,
test_set=None, inf_in_set=None, target_optim=None,
target_criterion=None, inf_optim=None, inf_criterion=None,
n_epochs=0, privacy_theta=0, verbose=False):
"""Method to run adversarial training during membership inference
Args:
target_model (nn.Module): Target classifier to adversarially train.
inf_model (nn.Module): Adversary attacking the target during training.
train_set (DataLoader): DataLoader pointing to the classfier trainign
set (split[0]).
test_set (DataLoader): DataLoader poiting to the validation set. Also
used as out-of-set for the inference (split[1]).
inf_in_set (DataLoader): Data loader pointing to a subset of the
train_set used for inference in-set (split[4])
target_optim (torch.optim): Target optimizer.
target_criterion (nn.Module): Target loss criterion.
inf_optim (torch.optim): Adversary optimizer.
inf_criterion (nn.Module): Adversary loss criterion.
privacy_theta (float): Regularization constant. Sets relative
importance of classification loss vs. adversarial loss.
vebose (bool): If True will print the loss at each step in training.
Returns:
Example:
Todos:
Include example.
"""
# inf_losses = []
# losses = []
inf_model.train()
target_model.train()
for epoch in range(n_epochs):
train_top = np.array([])
out_top = np.array([])
train_p = np.array([])
out_p = np.array([])
total_inference = 0
total_correct_inference = 0
for k_count, ((in_data, _), (out_data, _)) in enumerate(zip(inf_in_set,
test_set)):
# train inference network
in_data, out_data = in_data.to(device), out_data.to(device)
mini_batch_size = in_data.shape[0]
out_mini_batch_size = out_data.shape[0]
train_lbl = torch.ones(mini_batch_size).to(device)
out_lbl = torch.zeros(out_mini_batch_size).to(device)
train_posteriors = fcnal.softmax(target_model(in_data), dim=1)
out_posteriors = fcnal.softmax(target_model(out_data), dim=1)
train_sort, _ = torch.sort(train_posteriors, descending=True)
out_sort, _ = torch.sort(out_posteriors, descending=True)
t_p = train_sort[:, :4].cpu().detach().numpy().flatten()
o_p = out_sort[:, :4].cpu().detach().numpy().flatten()
train_p = np.concatenate((train_p, t_p))
out_p = np.concatenate((out_p, o_p))
train_top = np.concatenate((train_top,
train_sort[:, 0].cpu().
detach().numpy()))
out_top = np.concatenate((out_top,
out_sort[:, 0].cpu().detach().numpy()))
inf_optim.zero_grad()
train_inference = inf_model(train_posteriors,
label_to_onehot(train_lbl).to(device))
train_inference = torch.squeeze(train_inference)
#
out_inference = inf_model(out_posteriors,
label_to_onehot(out_lbl).to(device))
out_inference = torch.squeeze(out_inference)
#
total_inference += 2*mini_batch_size
total_correct_inference += torch.sum(train_inference > 0.5).item()
total_correct_inference += torch.sum(out_inference < 0.5).item()
loss_train = inf_criterion(train_inference, train_lbl)
loss_out = inf_criterion(out_inference, out_lbl)
loss = privacy_theta * (loss_train + loss_out)/2
loss.backward()
inf_optim.step()
# train classifiction network
train_imgs, train_lbls = iter(train_set).next()
train_imgs, train_lbls = train_imgs.to(device), train_lbls.to(device)
target_optim.zero_grad()
outputs = target_model(train_imgs)
train_posteriors = fcnal.softmax(outputs, dim=1)
loss_classification = target_criterion(outputs, train_lbls)
train_lbl = torch.ones(mini_batch_size).to(device)
train_inference = inf_model(train_posteriors,
label_to_onehot(train_lbls).to(device))
train_inference = torch.squeeze(train_inference)
loss_infer = inf_criterion(train_inference, train_lbl)
loss = loss_classification - privacy_theta * loss_infer
loss.backward()
target_optim.step()
if verbose:
print("[{}/{}] loss = {}"
.format(epoch, n_epochs, loss.item()))
|
import torch
from pytorch3d.structures import Meshes
from pytorch3d.renderer import look_at_view_transform, FoVPerspectiveCameras, RasterizationSettings, PointLights, \
MeshRenderer, MeshRasterizer, SoftPhongShader, TexturesVertex
from vedo.mesh import Mesh
from vedo.io import screenshot
from vedo.plotter import show, Plotter
import matplotlib.pyplot as plt
import time
from multiprocessing import Pool
from pathlib import Path
from Utils.mesh_utils import MeshCreator
def show_vedo_mesh_old(verts, faces, filling_factors):
start = time.time()
mesh = Mesh([verts, faces])
mesh.backColor('blue').lineColor('white').lineWidth(0)
# retrieve them as numpy arrays
# printc('points():\n', mesh.points(), c=3)
# printc('faces(): \n', mesh.faces(), c=3)
# show(mesh, labs, __doc__, viewup='z', axes=1)
colors = []
all_cells = mesh.faces()
for i in range(mesh.NCells()):
points = all_cells[i]
ff_sum = 0
for p in points:
ff_sum += filling_factors[p]
c = int((ff_sum / 3) * 200)
colors.append((c, 0, 0))
mesh.cellIndividualColors(colors)
# show(mesh, __doc__, viewup='z', interactive=False, camera={'pos': (-1, -1, 2)}) # isometric: 2 2 2
plotter = Plotter(size=(1024, 1024), interactive=False)
plotter += mesh
plotter += __doc__
plotter.show(viewup='z', camera={'pos': (-1, -1, 2)})
screenshot()
end = time.time()
print(f"Calculation took {end - start} seconds.")
class VedoMeshSaver:
""" First Attempt on parallelizing the texture calculation. Is currently slower than before :( """
def __init__(self, verts, faces, filling_factors):
mesh = Mesh([verts, faces])
self.filling_factors = filling_factors
self.verts = verts
self.faces = faces
self.all_cells = mesh.faces()
self.n_cells = mesh.NCells()
def calc_color(self, cell):
points = self.all_cells[cell]
ff_sum = 0
for p in points:
ff_sum += self.filling_factors[p]
c = int((ff_sum / 3) * 200)
return (c, 0, 0)
def show_vedo_mesh(self):
start = time.time()
# retrieve them as numpy arrays
# printc('points():\n', mesh.points(), c=3)
# printc('faces(): \n', mesh.faces(), c=3)
# show(mesh, labs, __doc__, viewup='z', axes=1)
with Pool(processes=8) as pool:
colors = pool.map(self.calc_color, range(self.n_cells))
mesh = Mesh([self.verts, self.faces])
mesh.backColor('blue').lineColor('white').lineWidth(0)
mesh.cellIndividualColors(colors)
show(mesh, __doc__, viewup='z', interactive=False, camera={'pos': (-1, -1, 2)}) # isometric: 2 2 2
screenshot()
end = time.time()
print(f"Calculation took {end - start} seconds.")
def save_p3d_mesh(verts, faces, filling_factors):
features = [(int(i * 255), 0, 0) for i in filling_factors]
features = torch.unsqueeze(torch.Tensor(features), 0)
if torch.cuda.is_available():
device = torch.device("cuda:0")
torch.cuda.set_device(device)
else:
device = torch.device("cpu")
texture = TexturesVertex(features)
mesh = Meshes(torch.unsqueeze(torch.Tensor(verts), 0), torch.unsqueeze(torch.Tensor(faces), 0), texture).cuda()
# Initialize a camera.
# Rotate the object by increasing the elevation and azimuth angles
R, T = look_at_view_transform(dist=2.0, elev=-50, azim=-90)
cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
# Define the settings for rasterization and shading. Here we set the output image to be of size
# 512x512. As we are rendering images for visualization purposes only we will set faces_per_pixel=1
# and blur_radius=0.0. We also set bin_size and max_faces_per_bin to None which ensure that
# the faster coarse-to-fine rasterization method is used. Refer to rasterize_meshes.py for
# explanations of these parameters. Refer to docs/notes/renderer.md for an explanation of
# the difference between naive and coarse-to-fine rasterization.
raster_settings = RasterizationSettings(
image_size=1024,
blur_radius=0.0,
faces_per_pixel=1,
)
# Place a point light in front of the object. As mentioned above, the front of the cow is facing the
# -z direction.
lights = PointLights(device=device, location=[[0.0, 0.0, -3.0]])
# Create a phong renderer by composing a rasterizer and a shader. The textured phong shader will
# interpolate the texture uv coordinates for each vertex, sample from a texture image and
# apply the Phong lighting model
renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=cameras,
raster_settings=raster_settings
),
shader=SoftPhongShader(
device=device,
cameras=cameras,
lights=lights
)
)
img = renderer(mesh)
plt.figure(figsize=(10, 10))
plt.imshow(img[0].cpu().numpy())
plt.show()
if __name__ == '__main__':
file = Path("/home/lukas/rtm/rtm_files_3d/2020-08-24_11-20-27_111_RESULT.erfh5")
from Pipeline.data_loader_mesh import DataLoaderMesh
sensor_verts_path = Path("/home/lukas/rtm/sensor_verts_3d_272_subsampled.dump")
dl = DataLoaderMesh(sensor_verts_path=sensor_verts_path)
data = dl.get_sensor_flowfront_mesh(file)
sample = data[150][1]
mc = MeshCreator(file)
verts, faces, _ = mc.get_mesh_components()
show_vedo_mesh_old(verts, faces, sample)
# save_p3d_mesh(verts, faces, sample)
pass
|
import numpy as np
from ultramodule import divstd_prime, crossco, cc_multiorde
import h5py
from tqdm import tqdm
from tqdm import tnrange
###################################################################################################
len_vared=10
vmr_min=5
vmr_max=12
segment=10.
mol="vo"
specdir="./rainbow/"
exotempdir="./spec/"
whichdata="kawa"
opt_tech="-fin"
red_mode="sysrem"
###################################################################################################
ccd_list_2breduced=["blue","red"]
h5f_techni = h5py.File("techni-"+str(whichdata)+str(opt_tech)+".info", 'r')
drvs=h5f_techni["Doppler shifts variation"][:]
output_tmp="rainbow-wasp33"+str(whichdata)+"all_"+str(mol)+"_segment-"+str(segment)+"pix_template"+str(opt_tech)+".h5"
h5f_temp = h5py.File(specdir+"template/"+output_tmp, 'r')
output_cc="rainbow-wasp33"+str(whichdata)+"all_"+str(mol)+"_segment-"+str(segment)+"pix_cc"+str(opt_tech)+".h5"
h5f_cc = h5py.File(specdir+"post_"+str(red_mode)+"/cc_result/"+str(output_cc), 'w')
h5f_reduced = h5py.File(specdir+"post_"+str(red_mode)+"/rainbow"+str(whichdata)+"_postSYSREM_fullmatrix.h5", 'r')
for ccd in ccd_list_2breduced:
if ccd=="blue":
len_order=19
else:
len_order=13
if red_mode=="svd":
redvar="sv"
elif red_mode=="sysrem":
redvar="systematics"
# h5f_reduced = h5py.File(specdir+"post_"+str(red_mode)+"/"+str(ccd)+str(whichdata)+"_postsvd_fullmatrix.h5", 'r')
for vmr in tqdm(range(vmr_min,vmr_max),ascii="True",desc=str(ccd)+" CCD"):
cc_map_full_SA=[]
for varreduc in tqdm(range(len_vared),ascii="True",desc="VMR= 10^-"+str(vmr)+" RSV"):
cc_svd_collect_SA=[] #Different SV are saved in this matrix
for order in tqdm (range(1,len_order),ascii="True",desc="Order"):
# yobs= h5f_reduced[str(ccd)+"-flux-order-"+str(order)+"sv-"+str(varreduc)][:]
yobs= h5f_reduced[str(ccd)+"-flux-order-"+str(order)+"sysrem"][:][varreduc]
std_frames=[]
std_pix=[]
for wvbin in range (len(yobs[1])):
std_pix.append(np.std(yobs[:,wvbin]))
for filenum in range (len(yobs)):
std_frames.append(np.std(yobs[filenum,:]))
std_frames=np.array(std_frames,dtype="float")
yobs=divstd_prime(yobs,std_pix,std_frames)
cc_order_SA=np.zeros((len(yobs),len(drvs)),dtype="float")
for numspec in range(len(yobs)):
cc_numspec=[]
for seg_num in range (int(segment)):
y_temp_order=h5f_temp[str(ccd)+"vmr-"+str(vmr)+"-order-"+str(order)+"-segment-"+str(seg_num)][:] #Loading template of vmr, v= -55., 355
low_lim=int(((seg_num)/segment)*len(yobs[0]))
up_lim=int(((seg_num+1.)/segment)*len(yobs[0]))
fluxsegment=yobs[numspec][low_lim:up_lim]
cc_each_seg=[]
for rv in range(len(drvs)):
cc_rv=crossco(fluxsegment,y_temp_order[rv])
cc_each_seg.append(cc_rv)
cc_numspec.append(np.array(cc_each_seg))
cc_numspec=np.array(cc_numspec)[~np.isnan(np.mean(cc_numspec,axis=1))]
cc_numspec_SA= np.mean(cc_numspec,axis=0)
cc_order_SA[numspec]=cc_numspec_SA
cc_svd_collect_SA.append(cc_order_SA)
cc_map_full_SA.append(cc_svd_collect_SA)
h5f_cc.create_dataset(str(ccd)+"_cc_vmr_"+str(vmr), data= cc_map_full_SA)#Writing the template of the specific vmr
# h5f_reduced.close()
h5f_reduced.close()
h5f_cc.close()
h5f_temp.close()
h5f_techni.close()
|
from os import listdir
from os.path import isfile, join, isdir
import numpy as np
import scipy.io as io
import torch
from torch.utils.data import Dataset, DataLoader, random_split
from torchvision import transforms
from PIL import Image
from pytorch_lightning.core.datamodule import LightningDataModule
import os
"""
2K filler images were chosen in equal proportions from the same scene categories (94-105 images per category).
In a single session, a participant would see a sequence of about 1000 images, of which about 157-158 were targets
randomly sampled from the 630 target images. These targets repeated a total of 3 times throughout the image
sequence, spaced 50-60 filler images apart. Filler images only occured once throughout the entire image sequence.
image: http://figrim.mit.edu/Fillers.zip
annot: http://figrim.mit.edu/FIXATIONMAPS_fillers.zip
fixlc: http://figrim.mit.edu/FIXATIONLOCS_fillers.zip
dataset structure:
dataset/Fillers/
-FIXATIONLOCS_fillers/[airport_terminal, amusement_park, badlands, ..., tower]
-FIXATIONMAPS_fillers/[airport_terminal, amusement_park, badlands, ..., tower]
-Fillers/[airport_terminal, amusement_park, badlands, ..., tower]
"""
class FILLER(Dataset):
@staticmethod
def load_data(data_directory, error_list=[]):
images = []
for directory in sorted(os.listdir(data_directory)):
# ignore any hidden folder like .DS_Store
if directory[0] == ".":
continue
sub_directory = join(data_directory, directory)
images.extend(
sorted([join(sub_directory, f) for f in listdir(sub_directory) if
(isfile(join(sub_directory, f)) and f not in error_list)]))
return images
def __init__(self, args, mode="test"):
self.resize = args.resize
self.normalize = args.normalize
self.norm_mean = [float(i) for i in args.norm_mean.split('+')]
self.norm_std = [float(i) for i in args.norm_std.split('+')]
self.SLICING_INDEX = -65
# FIGRIM dataset contains some image inside Targets folder without any fixation map and loc
self.FILLER_DATASET_ERROR = ['zwompf2.edf', '.DS_Store']
data_root = args.data_root
# transform for image and annotation/fixation map, respectively
transform_list = {"image": [], "annotation": []}
if self.resize:
transform_list["image"].append(transforms.Resize((args.height, args.width)))
transform_list["annotation"].append(transforms.Resize((args.height, args.width), Image.NEAREST))
if self.normalize:
transform_list["image"].append(transforms.Normalize(self.norm_mean, self.norm_std))
transform_list["image"].append(transforms.ToTensor())
transform_list["annotation"].append(transforms.ToTensor())
self.transform = {"image": transforms.Compose(transform_list["image"]),
"annotation": transforms.Compose(transform_list["annotation"])}
if mode == "test":
self.images = FILLER.load_data(join(data_root, "Fillers"), self.FILLER_DATASET_ERROR)
self.images = self.images[self.SLICING_INDEX:]
self.annotations = FILLER.load_data(join(data_root, "FIXATIONMAPS_fillers"), self.FILLER_DATASET_ERROR)
self.annotations = self.annotations[self.SLICING_INDEX:]
self.fixations = FILLER.load_data(join(data_root, "FIXATIONLOCS_fillers"), self.FILLER_DATASET_ERROR)
self.fixations = self.fixations[self.SLICING_INDEX:]
else:
self.images = FILLER.load_data(join(data_root, "Fillers"), self.FILLER_DATASET_ERROR)
self.images = self.images[:self.SLICING_INDEX]
self.annotations = FILLER.load_data(join(data_root, "FIXATIONMAPS_fillers"), self.FILLER_DATASET_ERROR)
self.annotations = self.annotations[:self.SLICING_INDEX]
self.fixations = FILLER.load_data(join(data_root, "FIXATIONLOCS_fillers"), self.FILLER_DATASET_ERROR)
self.fixations = self.fixations[:self.SLICING_INDEX]
def __len__(self):
return len(self.images)
def __getitem__(self, idx):
img_path = self.images[idx]
annotation_path = self.annotations[idx]
fixation_path = self.fixations[idx]
image = Image.open(img_path)
annotation = Image.open(annotation_path)
fixation = io.loadmat(fixation_path)["fixLocs"]
fixation = Image.fromarray(fixation)
image = self.transform["image"](image)
# handling grayscale images
if image.shape[0] == 1:
image = torch.cat((image, image, image), dim=0)
annotation = self.transform["annotation"](annotation)
fixation = self.transform["annotation"](fixation)
sample = {'image': image, 'annotation': annotation, 'fixation': fixation}
return sample
class FILLERDataModule(LightningDataModule):
def prepare_data(self, *args, **kwargs):
pass
def __init__(self, args):
super().__init__()
self.args = args
self.SPLIT = 200
def setup(self, stage):
# split dataset
if stage == 'fit':
data_train = FILLER(self.args, 'train')
self.data_train, self.data_val = random_split(data_train, [len(data_train) - self.SPLIT, self.SPLIT])
if stage == 'test':
self.data_test = FILLER(self.args, 'test')
def train_dataloader(self):
data_train = DataLoader(self.data_train,
batch_size=self.args.batch_size,
shuffle=self.args.shuffle,
num_workers=self.args.dataloader_workers,
pin_memory=True,
drop_last=False,
prefetch_factor=self.args.prefetch
)
return data_train
def val_dataloader(self):
data_val = DataLoader(self.data_val,
batch_size=self.args.batch_size,
shuffle=self.args.shuffle,
num_workers=self.args.dataloader_workers,
pin_memory=True,
drop_last=False,
prefetch_factor=self.args.prefetch
)
return data_val
def test_dataloader(self):
data_test = DataLoader(self.data_test,
batch_size=self.args.batch_size,
shuffle=self.args.shuffle,
num_workers=self.args.dataloader_workers,
pin_memory=True,
drop_last=False,
prefetch_factor=self.args.prefetch
)
return data_test
|
from dogapi import dog_http_api as api
api.api_key = '9775a026f1ca7d1c6c5af9d94d9595a4'
api.application_key = '87ce4a24b5553d2e482ea8a8500e71b8ad4554ff'
# Get an alert's details
api.get_alert(525)
|
# vim: fdm=indent
'''
author: Fabio Zanini
date: 02/03/18
content: IO functions.
'''
# Modules
import numpy as np
import pandas as pd
# Functions
def read_10X(fdict, make_dense=False):
import scipy.io
mat = scipy.io.mmread(fdict['matrix'])
genes = pd.read_csv(
fdict['genes'],
sep='\t',
squeeze=True,
index_col=0,
header=None,
).index
barcodes = pd.read_csv(
fdict['barcodes'],
sep='\t',
squeeze=True,
index_col=0,
header=None,
).index
if not make_dense:
return {
'matrix': mat,
'featurenames': genes,
'samplenames': barcodes,
}
else:
df = pd.DataFrame(
data=mat.todense(),
index=genes,
columns=barcodes,
)
return df
def read_dataframe(fn):
return pd.read_csv(fn, sep='\t', index_col=0)
|
'''
In Section 2.3.3, we note that our Vector class supports a syntax such as
v = u + [5, 3, 10, −2, 1], in which the sum of a vector and list returns
a new vector. However, the syntax v = [5, 3, 10, −2, 1] + u is illegal.
Explain how the Vector class definition can be revised so that this syntax
generates a new vector.
'''
def __add__(self, other):
'''Return sum of two vectors.'''
if len(self) != len(other): # relies on len method
raise ValueError('dimensions must agree')
result = Vector(len(self)) # start with vector of zero
for j in range(len(self)):
#print('self[{}]+other[{}]='.format(j,j),self[j]+other[j])
result[j] = self[j] + other[j]
#print('result=',result)
return result
__radd__ = __add__
|
""" Bootstrapping the yield curve """
from math import log, exp
from typing import List
from securities.bond import Bond
from term_structures.curve import Curve
class BootstrappedCurve(Curve):
"""
Bootstrap a curve of bond instruments to zero coupon rates
"""
def __init__(self):
self._instruments = {} # Map each maturity to an instrument
super().__init__({})
def add_instrument(self, bond):
""" Save instrument info by maturity """
self._curve[bond.maturity_term] = None
self._instruments[bond.maturity_term] = bond
@property
def rates(self) -> List:
""" Calculate list of available zero rates """
for bond in self._instruments.values():
if bond.maturity_term not in self._curve:
raise KeyError("term {} not found".format(bond.maturity_term))
self._curve[bond.maturity_term] = self.bond_spot_rate(bond)
return [self._curve[t] for t in self.times]
def bond_spot_rate(self, bond: Bond) -> float:
""" return the spot rate for the input bond """
if bond.coupon == 0:
# get rate of a zero coupon bond
spot_rate = log(bond.par / bond.price) / bond.maturity_term
else:
# calculate spot rate of a bond
number_of_periods = \
int(bond.maturity_term * bond.compounding_frequency)
value = bond.price
cpn_payment = bond.coupon * bond.par / bond.compounding_frequency
for i in range(1, number_of_periods):
term = i / float(bond.compounding_frequency)
rate = self._curve[term]
discounted_coupon_value = cpn_payment * exp(-rate * term)
value -= discounted_coupon_value
last = number_of_periods / float(bond.compounding_frequency)
spot_rate = -log(value / (bond.par + cpn_payment)) / last
return spot_rate
|
import enum
class OrderStatusEdit(enum.Enum):
PENDING_STORE = 'PENDING_STORE'
PREPARING = 'PREPARING'
READY = 'READY'
ON_IT = 'ON_IT'
DONE = 'DONE'
class OrderStatus(enum.Enum):
COMPLETED = 'COMPLETED'
PREPARING = 'PREPARING'
PENDING_PICKUP = 'PENDING_PICKUP'
PICKED_UP = 'PICKED_UP'
CANCELLED = 'CANCELLED'
DELIVERING = 'DELIVERING'
PENDING_STORE = 'PENDING_STORE'
PENDING_HELPER = 'PENDING_HELPER'
class OrderGroupStatus(enum.Enum):
COMPLETED = 'COMPLETED'
DELIVERING = 'DELIVERING'
PENDING_PICKUP = 'PENDING_PICKUP'
PENDING_HELPER = 'PENDING_HELPER'
|
#!/bin/python3
class Class1:
def collinear(x1, y1, x2, y2, x3, y3):
a = x1 * (y2 - y3) + x2 * (y3 - y1) + x3 * (y1 - y2)
if (a == 0):
print('Yes')
else:
print('No')
def main():
cl1 = Class1
islinear = cl1.collinear(1, 1, 1, 1, 4, 5)
if __name__ == "__main__":
main()
|
from setuptools import setup
setup(
# Needed to silence warnings (and to be a worthwhile package)
name='DA_Functions',
url='https://github.com/armandotoledo/ibv_da_functions',
author='Armando Toledo',
author_email='armando.toledo.v@hotmail.com',
# Needed to actually package something
packages=['da_functions'],
# Needed for dependencies
install_requires=['numpy', 'pandas'],
# *strongly* suggested for sharing
version='0.1',
# The license can be anything you like
license='MIT',
description='Basic tools/functions for ibvogt Data Analytics',
# long_description=open('README.txt').read(),
)
|
"""Common loss functions package"""
from typing import Callable
# Type hinting
Loss = Callable[[float, float], float]
def difference(result: float, target: float) -> float:
return target - result
|
#!/usr/bin/python
#
# Copyright (c) 2016 Red Hat
# Luke Hinds (lhinds@redhat.com)
# This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# 0.1: OpenSCAP paramiko connection functions
import os
import socket
import paramiko
import functest.utils.functest_logger as ft_logger
# add installer IP from env
INSTALLER_IP = os.getenv('INSTALLER_IP')
# Set up loggers
logger = ft_logger.Logger("security_scan").getLogger()
paramiko.util.log_to_file("/var/log/paramiko.log")
class SetUp:
def __init__(self, *args):
self.args = args
def keystonepass(self):
com = self.args[0]
client = paramiko.SSHClient()
privatekeyfile = os.path.expanduser('/root/.ssh/id_rsa')
selectedkey = paramiko.RSAKey.from_private_key_file(privatekeyfile)
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
client.connect(INSTALLER_IP, port=22, username='stack',
pkey=selectedkey)
except paramiko.SSHException:
logger.error("Password is invalid for "
"undercloud host: {0}".format(INSTALLER_IP))
except paramiko.AuthenticationException:
logger.error("Authentication failed for "
"undercloud host: {0}".format(INSTALLER_IP))
except socket.error:
logger.error("Socker Connection failed for "
"undercloud host: {0}".format(INSTALLER_IP))
stdin, stdout, stderr = client.exec_command(com)
return stdout.read()
client.close()
def getockey(self):
remotekey = self.args[0]
localkey = self.args[1]
privatekeyfile = os.path.expanduser('/root/.ssh/id_rsa')
selectedkey = paramiko.RSAKey.from_private_key_file(privatekeyfile)
transport = paramiko.Transport((INSTALLER_IP, 22))
transport.connect(username='stack', pkey=selectedkey)
try:
sftp = paramiko.SFTPClient.from_transport(transport)
except paramiko.SSHException:
logger.error("Authentication failed for "
"host: {0}".format(INSTALLER_IP))
except paramiko.AuthenticationException:
logger.error("Authentication failed for "
"host: {0}".format(INSTALLER_IP))
except socket.error:
logger.error("Socker Connection failed for "
"undercloud host: {0}".format(INSTALLER_IP))
sftp.get(remotekey, localkey)
sftp.close()
transport.close()
class ConnectionManager:
def __init__(self, host, port, user, localkey, *args):
self.host = host
self.port = port
self.user = user
self.localkey = localkey
self.args = args
def remotescript(self):
localpath = self.args[0]
remotepath = self.args[1]
com = self.args[2]
client = paramiko.SSHClient()
privatekeyfile = os.path.expanduser('/root/.ssh/id_rsa')
selectedkey = paramiko.RSAKey.from_private_key_file(privatekeyfile)
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# Connection to undercloud
try:
client.connect(INSTALLER_IP, port=22, username='stack',
pkey=selectedkey)
except paramiko.SSHException:
logger.error("Authentication failed for "
"host: {0}".format(self.host))
except paramiko.AuthenticationException:
logger.error("Authentication failed for "
"host: {0}".format(self.host))
except socket.error:
logger.error("Socker Connection failed for "
"undercloud host: {0}".format(self.host))
transport = client.get_transport()
local_addr = ('127.0.0.1', 0)
channel = transport.open_channel("direct-tcpip",
(self.host, int(self.port)),
(local_addr))
remote_client = paramiko.SSHClient()
remote_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# Tunnel to overcloud
try:
remote_client.connect('127.0.0.1', port=22, username=self.user,
key_filename=self.localkey, sock=channel)
sftp = remote_client.open_sftp()
sftp.put(localpath, remotepath)
except paramiko.SSHException:
logger.error("Authentication failed for "
"host: {0}".format(self.host))
except paramiko.AuthenticationException:
logger.error("Authentication failed for "
"host: {0}".format(self.host))
except socket.error:
logger.error("Socker Connection failed for "
"undercloud host: {0}".format(self.host))
output = ""
stdin, stdout, stderr = remote_client.exec_command(com)
stdout = stdout.readlines()
# remove script
sftp.remove(remotepath)
remote_client.close()
client.close()
# Pipe back stout
for line in stdout:
output = output + line
if output != "":
return output
def remotecmd(self):
com = self.args[0]
client = paramiko.SSHClient()
privatekeyfile = os.path.expanduser('/root/.ssh/id_rsa')
selectedkey = paramiko.RSAKey.from_private_key_file(privatekeyfile)
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# Connection to undercloud
try:
client.connect(INSTALLER_IP, port=22, username='stack',
pkey=selectedkey)
except paramiko.SSHException:
logger.error("Authentication failed for "
"host: {0}".format(self.host))
except paramiko.AuthenticationException:
logger.error("Authentication failed for "
"host: {0}".format(self.host))
except socket.error:
logger.error("Socker Connection failed for "
"undercloud host: {0}".format(self.host))
transport = client.get_transport()
local_addr = ('127.0.0.1', 0) # 0 denotes choose random port
channel = transport.open_channel("direct-tcpip",
(self.host, int(self.port)),
(local_addr))
remote_client = paramiko.SSHClient()
remote_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# Tunnel to overcloud
try:
remote_client.connect('127.0.0.1', port=22, username=self.user,
key_filename=self.localkey, sock=channel)
except paramiko.SSHException:
logger.error("Authentication failed for "
"host: {0}".format(self.host))
except paramiko.AuthenticationException:
logger.error("Authentication failed for "
"host: {0}".format(self.host))
except socket.error:
logger.error("Socker Connection failed for "
"undercloud host: {0}".format(self.host))
chan = remote_client.get_transport().open_session()
chan.get_pty()
feed = chan.makefile()
chan.exec_command(com)
print feed.read()
remote_client.close()
client.close()
def download_reports(self):
dl_folder = self.args[0]
reportfile = self.args[1]
reportname = self.args[2]
resultsname = self.args[3]
client = paramiko.SSHClient()
privatekeyfile = os.path.expanduser('/root/.ssh/id_rsa')
selectedkey = paramiko.RSAKey.from_private_key_file(privatekeyfile)
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# Connection to overcloud
try:
client.connect(INSTALLER_IP, port=22, username='stack',
pkey=selectedkey)
except paramiko.SSHException:
logger.error("Authentication failed for "
"host: {0}".format(self.host))
except paramiko.AuthenticationException:
logger.error("Authentication failed for "
"host: {0}".format(self.host))
except socket.error:
logger.error("Socker Connection failed for "
"undercloud host: {0}".format(self.host))
transport = client.get_transport()
local_addr = ('127.0.0.1', 0) # 0 denotes choose random port
channel = transport.open_channel("direct-tcpip",
(self.host, int(self.port)),
(local_addr))
remote_client = paramiko.SSHClient()
remote_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# Tunnel to overcloud
try:
remote_client.connect('127.0.0.1', port=22, username=self.user,
key_filename=self.localkey, sock=channel)
except paramiko.SSHException:
logger.error("Authentication failed for "
"host: {0}".format(self.host))
except paramiko.AuthenticationException:
logger.error("Authentication failed for "
"host: {0}".format(self.host))
except socket.error:
logger.error("Socker Connection failed for "
"undercloud host: {0}".format(self.host))
# Download the reports
sftp = remote_client.open_sftp()
logger.info("Downloading \"{0}\"...".format(reportname))
sftp.get(reportfile, ('{0}/{1}'.format(dl_folder, reportname)))
logger.info("Downloading \"{0}\"...".format(resultsname))
sftp.get(reportfile, ('{0}/{1}'.format(dl_folder, resultsname)))
sftp.close()
transport.close()
|
from django.conf.urls import include, url
from rest_framework.routers import SimpleRouter
from . import views
discovery = SimpleRouter()
discovery.register('discovery', views.DiscoveryViewSet, base_name='discovery')
urlpatterns = [
url(r'', include(discovery.urls)),
]
|
from django.http import HttpResponseRedirect, JsonResponse,HttpResponse
from django.shortcuts import get_object_or_404, render,redirect
from django.urls import reverse, reverse_lazy
from django.contrib.auth.models import User
from datetime import datetime
from django.core import serializers
import requests
import random
from django.views.generic import ListView, DetailView, CreateView
from .models import Schools, Classes, Courses, Questions ,Answers,MyAnswers
from .forms import SchoolsForm, ClassesForm, CoursesForm, QuestionsForm, AnswersForm, MyAnswersForm
class IndexView(ListView):
template_name = 'education/index.html'
def get_queryset(self):
return Schools.objects.all()
class SchoolsIndexView(ListView):
template_name = 'education/schools_index.html'
context_object_name = 'schools_list'
def get_queryset(self):
return Schools.objects.all()
class ClassesIndexView(ListView):
template_name = 'education/class_index.html'
def get_queryset(self):
classes = Classes.objects.all()
def get_context_data(self, **kwargs):
context = super(ClassesIndexView, self).get_context_data(**kwargs)
classes = Classes.objects.all()
newClassObj = []
for classroom in classes:
schools = Schools.objects.filter(schools_id=classroom.schools_id)
newClassObj.append({"schoolsList":schools,"class":classroom})
context['classes_list'] = newClassObj
return context
class CoursesIndexView(ListView):
template_name = 'education/courses_index.html'
def get_queryset(self):
return Courses.objects.all()
def get_context_data(self, **kwargs):
context = super(CoursesIndexView, self).get_context_data(**kwargs)
courses = Courses.objects.all()
newClassObj = []
for course in courses:
schools = Schools.objects.filter(schools_id=course.schools_id)
classes = Classes.objects.filter(schools_id=course.classes_id)
newClassObj.append({"schoolsList":schools,"classList":classes,"courses":course})
context['courses_list'] = newClassObj
return context
def schools_create(request):
if request.method == 'POST':
form = SchoolsForm(request.POST)
if form.is_valid():
form.instance.userId = request.user.id
form.instance.pub_date = datetime.now()
form.save()
return redirect('education:schools_index')
form = SchoolsForm()
return render(request,'education/schools_create.html',{'form': form})
def class_create(request):
if request.method == 'POST':
form = ClassesForm(request.POST)
if form.is_valid():
form.instance.userId = request.user.id
form.instance.pub_date = datetime.now()
form.save()
return redirect('education:class_index')
form = ClassesForm()
return render(request,'education/class_create.html',{'form': form})
def schools_edit(request, pk, template_name='education/schools_edit.html'):
school = get_object_or_404(Schools, pk=pk)
form = SchoolsForm(request.POST or None, instance=school)
if form.is_valid():
form.instance.userId = request.user.id
form.save()
return redirect('education:schools_index')
return render(request, template_name, {'form':form})
def class_edit(request, pk, template_name='education/class_edit.html'):
classes = get_object_or_404(Classes, pk=pk)
form = ClassesForm(request.POST or None, instance=classes)
if form.is_valid():
form.instance.userId = request.user.id
form.save()
return redirect('education:class_index')
return render(request, template_name, {'form':form})
def courses_create(request):
if request.method == 'POST':
form = CoursesForm(request.POST)
if form.is_valid():
form.instance.userId = request.user.id
form.instance.pub_date = datetime.now()
form.save()
return redirect('education:courses_index')
form = CoursesForm()
return render(request,'education/courses_create.html',{'form': form})
def courses_edit(request, pk, template_name='education/courses_edit.html'):
courses = get_object_or_404(Courses, pk=pk)
form = CoursesForm(request.POST or None, instance=courses)
if form.is_valid():
form.instance.userId = request.user.id
form.save()
return redirect('education:courses_index')
return render(request, template_name, {'form':form})
class CoursesDetailsView(DetailView):
model = Courses
template_name = 'education/courses_details.html'
def get_context_data(self, **kwargs):
context = super(CoursesDetailsView, self).get_context_data(**kwargs)
context['question_datails'] = Questions.objects.filter(courses_id=self.kwargs.get('pk'))
return context
def schools_delete(request, pk, template_name='education/schools_confirm_delete.html'):
school = get_object_or_404(Schools, pk=pk)
if request.method=='POST':
school.delete()
return redirect('education:schools_index')
return render(request, template_name, {'object':school})
def class_delete(request, pk, template_name='education/class_confirm_delete.html'):
classes = get_object_or_404(Classes, pk=pk)
if request.method=='POST':
classes.delete()
return redirect('education:class_index')
return render(request, template_name, {'object':classes})
def courses_delete(request, pk, template_name='education/courses_confirm_delete.html'):
courses = get_object_or_404(Courses, pk=pk)
if request.method=='POST':
courses.delete()
return redirect('education:courses_index')
return render(request, template_name, {'object':courses})
def set_user_type(request):
request.session["name"] = request.POST['accessType']
request.session["test_session"] = random.randint(2345678909800, 9923456789000)
return redirect('education:index')
# Questions section
class QuestionsIndexView(ListView):
template_name = 'education/questions_index.html'
context_object_name = 'questions_list'
def get_queryset(self):
return Questions.objects.all()
class QuestionsCreateView(CreateView):
model = Questions
form_class = QuestionsForm
template_name = 'education/questions_create.html'
def get_success_url(self):
return reverse_lazy('education:courses_details', kwargs={'pk': self.kwargs['courses_id']})
def form_valid(self, form):
form.instance.userId = self.request.user.id
form.instance.pub_date = datetime.now()
form.instance.courses_id = self.kwargs['courses_id']
return super().form_valid(form)
class QuestionsDetailsView(DetailView):
model = Questions
template_name = 'education/questions_details.html'
def get_context_data(self, **kwargs):
context = super(QuestionsDetailsView, self).get_context_data(**kwargs)
context['answers_datails'] = Answers.objects.filter(questions_id=self.kwargs.get('pk'))
return context
def questions_edit(request, pk, template_name='education/questions_edit.html'):
questions = get_object_or_404(Questions, pk=pk)
form = QuestionsForm(request.POST or None, instance=questions)
if form.is_valid():
form.instance.userId = request.user.id
form.save()
return redirect(reverse_lazy('education:courses_details', kwargs={'pk': questions.courses_id}))
return render(request, template_name, {'form':form})
def questions_Correct_answer(request, pk, template_name='education/questions_correctanswer.html'):
questions = get_object_or_404(Questions, pk=pk)
obj = Questions.objects.get(id=questions.id)
obj.correct_answer = request.POST["correct_answer"]
obj.save()
return redirect(reverse_lazy('education:questions_details', kwargs={'pk': questions.id}))
def questions_delete(request, pk, template_name='education/questions_confirm_delete.html'):
questions = get_object_or_404(Questions, pk=pk)
if request.method=='POST':
questions.delete()
return redirect(reverse_lazy('education:courses_details', kwargs={'pk': questions.courses_id}))
return render(request, template_name, {'object':questions})
# Answers section
class AnswersIndexView(ListView):
template_name = 'education/answers_index.html'
context_object_name = 'answers_list'
def get_queryset(self):
return Answers.objects.all()
class AnswersCreateView(CreateView):
model = Answers
form_class = AnswersForm
template_name = 'education/answers_create.html'
def get_success_url(self):
return reverse_lazy('education:questions_details', kwargs={'pk': self.kwargs['questions_id']})
def form_valid(self, form):
form.instance.userId = self.request.user.id
form.instance.pub_date = datetime.now()
form.instance.questions_id = self.kwargs['questions_id']
return super().form_valid(form)
def answers_edit(request, pk, template_name='education/answers_edit.html'):
answers = get_object_or_404(Answers, pk=pk)
form = AnswersForm(request.POST or None, instance=answers)
if form.is_valid():
form.instance.userId = request.user.id
form.save()
return redirect(reverse_lazy('education:questions_details', kwargs={'pk': answers.questions_id}))
return render(request, template_name, {'form':form})
def answers_delete(request, pk, template_name='education/answers_confirm_delete.html'):
answers = get_object_or_404(Answers, pk=pk)
if request.method=='POST':
answers.delete()
return redirect(reverse_lazy('education:questions_details', kwargs={'pk': answers.questions_id}))
return render(request, template_name, {'object':answers})
#Get Exams settings
def get_schools(request):
if request.is_ajax and request.method == "GET":
school_query = "SELECT * from education_schools "
schools = Schools.objects.raw(school_query)
context = {}
schools_data = serializers.serialize('json', schools)
return JsonResponse({'schools_data': schools_data})
return JsonResponse(data={'error':'error'})
def get_classes(request, pk):
schools = get_object_or_404(Schools, pk=pk)
if request.is_ajax and request.method == "GET":
classes = Classes.objects.filter(schools_id=schools.schools_id);
context = {}
classes_data = serializers.serialize('json', classes)
return JsonResponse({'classes_data': classes_data})
return JsonResponse(data={'error':'error'})
def get_courses(request, classes_pk, schools_pk):
schools = get_object_or_404(Schools, pk=schools_pk)
classes = get_object_or_404(Classes, pk=classes_pk)
if request.is_ajax and request.method == "GET":
courses = Courses.objects.filter(classes_id=classes.id, schools_id=schools.schools_id);
context = {}
courses_data = serializers.serialize('json', courses)
return JsonResponse({'courses_data': courses_data})
return JsonResponse(data={'error':'error'})
class MyExamsView(ListView):
template_name = 'education/my_exams.html'
context_object_name = 'questions_list'
def get_queryset(self):
if not self.request.session['test_session']:
self.request.session["test_session"] = random.randint(2345678909800, 9923456789000)
return Questions.objects.filter(courses_id=self.kwargs.get('courses_pk'))
def get_context_data(self, **kwargs):
context = super(ListView,self).get_context_data(**kwargs)
context['courses'] = Courses.objects.filter(id=self.kwargs.get('courses_pk'))
return context
def get_answers(request, questions_pk):
questions = get_object_or_404(Questions, pk=questions_pk)
if request.is_ajax and request.method == "GET":
if request.session.get("finished"):
return JsonResponse(data={'error':'error'})
else:
answers = Answers.objects.filter(questions_id=questions.id);
context = {}
answers_data = serializers.serialize('json', answers)
return JsonResponse({'answers_data': answers_data})
else:
return JsonResponse(data={'error':'error'})
def save_answers(request):
if request.method == 'POST':
form = MyAnswersForm(request.POST)
if form.is_valid():
form.instance.answers_id = request.POST['answers_id']
form.instance.questions_id = request.POST['questions_id']
form.instance.test_session = request.session['test_session']
form.instance.userId = request.user.id
form.instance.pub_date = datetime.now()
updated_rows = MyAnswers.objects.filter(test_session=request.session['test_session'],questions_id=request.POST['questions_id'])
if not updated_rows:
form.save()
return HttpResponse("success")
else:
MyAnswers.objects.filter(test_session=request.session['test_session'],questions_id=request.POST['questions_id']).update(answers_id=request.POST['answers_id'])
return HttpResponse("Already exist")
else:
errors = form.errors
return HttpResponse(simplejson.dumps(errors))
else:
return HttpResponse("error")
def finish_answers(request, schools_id,classes_id,courses_id,session_id, template_name='education/finish.html'):
schools = get_object_or_404(Schools, pk=schools_id)
if request.method=='GET':
request.session["finished"] = "true";
courses = Courses.objects.filter(id=courses_id)
questions = Questions.objects.filter(courses_id=courses_id)
myanswers = MyAnswers.objects.filter(test_session=session_id)
newObj = []
correctAnswers = 0
wrongAnswers = 0
for question in questions:
count = 1
isCorrect = "false"
for myanswer in myanswers:
if int(question.correct_answer) == int(myanswer.answers_id):
newObj.append({"question":question.questions_name, "status":"Correct"})
isCorrect = "true"
correctAnswers+=1
else:
if int(question.correct_answer) != int(myanswer.answers_id) and int(count) == len(myanswers) and isCorrect=="false":
newObj.append({"question":question.questions_name, "status":"Wrong"})
wrongAnswers+=1
count+=1
scoretotal = 0
score = (correctAnswers/len(questions))*100
if score > 0:
scoretotal = score
context = {}
context['score'] = int(scoretotal)
context['newObj'] = newObj
context['questions'] = questions
context['myanswers'] = myanswers
context['courses'] = courses
return render(request, template_name, {'context':context})
return render(request, template_name, {'object':schools})
def print_certificate(request, schools_id,classes_id,courses_id,session_id,fullname, template_name='education/print_certificate.html'):
schools = get_object_or_404(Schools, pk=schools_id)
if request.method=='GET':
courses = Courses.objects.filter(id=courses_id)
questions = Questions.objects.filter(courses_id=courses_id)
myanswers = MyAnswers.objects.filter(test_session=session_id)
newObj = []
correctAnswers = 0
wrongAnswers = 0
for question in questions:
count = 1
isCorrect = "false"
for myanswer in myanswers:
if int(question.correct_answer) == int(myanswer.answers_id):
newObj.append({"question":question.questions_name, "status":"Correct"})
isCorrect = "true"
correctAnswers+=1
else:
if int(question.correct_answer) != int(myanswer.answers_id) and int(count) == len(myanswers) and isCorrect=="false":
newObj.append({"question":question.questions_name, "status":"Wrong"})
wrongAnswers+=1
count+=1
scoretotal = 0
score = (correctAnswers/len(questions))*100
if score > 0:
scoretotal = score
context = {}
context['score'] = int(scoretotal)
context['name'] = fullname
context['newObj'] = newObj
context['questions'] = questions
context['myanswers'] = myanswers
context['courses'] = courses
context['schools'] = schools
return render(request, template_name, {'context':context})
return render(request, template_name, {'object':schools})
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from umd import api
from umd.base.security import utils as sec_utils
from umd.common import qc
from umd import config
from umd import utils
class Security(object):
def __init__(self):
self.cfgtool = config.CFG["cfgtool"]
self.need_cert = config.CFG["need_cert"]
# exceptions
# 'known_worldwritable_filelist': already-known world writable files
self.exceptions = config.CFG["exceptions"]
@qc.qcstep("QC_SEC_2", "SHA-2 Certificates Support")
def qc_sec_2(self):
"""SHA-2 Certificates Support."""
if self.need_cert:
if not self.cfgtool:
api.warn(("SHA-2 management not tested: configuration tool "
"not defined."))
else:
if not self.cfgtool.has_run:
r = self.cfgtool.run()
if r.failed:
api.fail("Configuration failed with SHA-2 certs",
stop_on_error=True)
else:
api.ok("Product services can manage SHA-2 certs.")
else:
api.na("Product does not need certificates.")
@qc.qcstep("QC_SEC_5", "World Writable Files")
def qc_sec_5(self):
"""World Writable Files check."""
_logfile = "qc_sec_5"
r = utils.runcmd(("find / -not \\( -path \"/proc\" -prune \\) "
"-not \\( -path \"/sys\" -prune \\) "
"-type f -perm -002 -exec ls -l {} \;"),
log_to_file=_logfile)
if r:
ww_filelist = sec_utils.get_filelist_from_find(r)
try:
known_ww_filelist = self.exceptions[
"known_worldwritable_filelist"]
except KeyError:
known_ww_filelist = []
if set(ww_filelist).difference(set(known_ww_filelist)):
api.fail("Found %s world-writable file/s." % len(ww_filelist),
logfile=r.logfile)
else:
api.warn("Found world-writable file/s required for operation.",
logfile=r.logfile)
else:
api.ok("Found no world-writable file.")
# if self.pkgtool.os == "sl5":
# pkg_wwf_files = local(("rpm -qalv | egrep '^[-d]([-r][-w][-xs])"
# "{2}[-r]w'"))
# if pkg_wwf_files:
# print(yellow("Detected package world-writable files:\n%s"
# % pkg_wwf_files))
@qc.qcstep_request
def run(self, steps, *args, **kwargs):
if steps:
for method in steps:
method()
else:
self.qc_sec_2()
self.qc_sec_5()
|
#====================================================================
# Driver program
#====================================================================
import sys,os
sys.path.append('../../src/Python/Postprocessing/')
sys.path.append('../../src/Python/Stage_0/')
sys.path.append('../../src/Python/Stage_1/')
sys.path.append('../../src/Python/Stage_1/afdd/')
sys.path.append('../../src/Python/Stage_1/engines/')
sys.path.append('../../src/Python/Stage_3/')
sys.path.append(os.getcwd())
from noise_generator import noise
#====================================================================
# get design ID
#====================================================================
if len(sys.argv) < 2:
print(" -- Error: noise_prediction.py requires a design ID")
print(" -- Usage: 'python noise_prediction.py <ID> '")
quit()
else:
n = sys.argv[1]
filename = 'output/logs/log'+n+'.txt'
footprint = noise(filename)
#====================================================================
# draw noise footprint
#====================================================================
|
import os
import redis
from rq import Worker, Queue, Connection
listen = ['default']
redis_url = os.getenv('REDISTOGO_URL', 'redis://localhost:6379')
conn = redis.from_url(redis_url)
q = Queue(connection=conn)
if __name__ == '__main__':
with Connection(conn):
worker = Worker(list(map(Queue, listen)))
worker.work()
|
#des_reg_BSTree.py
#DEVELOPER: Israel Bond
#implementation of classes needed for assignment
class BSTNode:
def __init__(self,number):
self.left = None
self.right = None
self.num = number
class BSTree:
def __init__(self):
self.root = None
def insert(self,number):
if(self.root == None):
self.root = BSTNode(number)
else:
self._insert(number, self.root)
def _insert(self, number, BSTNode):
if(number < BSTNode.num):
if(BSTNode.left != None):
self._insert(number,BSTNode.left)
else:
BSTNode.left = BSTNode(number)
else:
if(BSTNode.right != None):
self._insert(number,BSTNode.right)
else:
BSTNode.right = BSTNode(number)
def deleteBSTree(self):
self.root = None
def displayBSTree(self):
if(self.root != None):
self._displayBSTree(self.root)
def _displayBSTree(self, node):
if(node != None):
self._displayBSTree(node.left)
print(str(node.num) + ' ')
self._displayBSTree(node.right)
|
import numpy as np
from kipoi.data import Dataset
class SeqDataloader(Dataset):
"""
Args:
fasta_file: file path; Protein sequence(s)
"""
def __init__(self, fasta_file, split_char=' ', id_field=0):
seq_dict = self.read_fasta(fasta_file, split_char, id_field)
self.length = len(seq_dict)
sequences = sorted(seq_dict.items(), key=lambda kv: len(seq_dict[kv[0]]))
self.identifier, self.seqs = zip(*sequences)
self.seqs = [np.asarray([seq]) for seq in self.seqs]
def read_fasta(self, fasta_file, split_char, id_field):
'''
Reads in fasta file containing multiple sequences.
Returns dictionary of holding multiple sequences or only single
sequence, depending on input file.
'''
sequences = dict()
with open(fasta_file, 'r') as fasta_f:
for line in fasta_f:
# get uniprot ID from header and create new entry
if line.startswith('>'):
uniprot_id = line.replace('>', '').strip().split(split_char)[id_field]
sequences[uniprot_id] = ''
else:
# repl. all whie-space chars and join seqs spanning multiple lines
sequences[uniprot_id] += ''.join(line.split()).upper()
return sequences
def __len__(self):
return self.length
def __getitem__(self, idx):
return {
"inputs": self.seqs[idx],
"metadata": {
"id": self.identifier[idx]
}
}
|
#!/usr/bin/env python
import sys
import yaml
with open(sys.argv[1]) as fd:
print(yaml.safe_load(fd))
|
import cv2
import numpy as np
from scipy.ndimage import label
from src.tools.hog_window_search import generate_heatmap, combined_window_search, draw_labeled_bboxes
class VideoProcessor:
def __init__(self, svc, X_scaler):
self.heatmap_history = []
self.svc = svc
self.scaler = X_scaler
def heatmap(self, image):
image = np.copy(image)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
rects_1, rects_2, rects_3, rects_4, rects_5, rects = combined_window_search(image, self.svc, self.scaler)
heatmap = generate_heatmap(rects)
heatmap[heatmap <= 4] = 0
# print(heatmap.shape)
self.heatmap_history.append(heatmap)
m = 255 / np.max(heatmap)
return cv2.cvtColor(np.uint8(heatmap.reshape(720, 1280, 1) * m), cv2.COLOR_GRAY2RGB)
def labeled(self, image):
image = np.copy(image)
labels = label(self.heatmap_history_combined())
labeled_img = draw_labeled_bboxes(image, labels)
return labeled_img
def heatmap_history_combined(self, window=10, required=6):
# trim everyting except the last N values to avoid a memory overload
self.heatmap_history = self.heatmap_history[-window:]
combined = np.zeros(self.heatmap_history[0].shape)
for heatmap in self.heatmap_history:
combined[heatmap == 0] -= 1
combined[combined < 0] = 0
combined[heatmap > 0] += 1
combined[combined <= required] = 0
return combined
|
# import code from your package here to make them available outside the module like:
from cythonbuilder.cython_builder import cy_init, cy_clean, cy_build, cy_list, cy_interface
from cythonbuilder.logs import LoggerSettings, create_logger, set_logger_debug_mode
__version__ = "0.1.13"
__last_build_datetime__ ="2022-06-01 12:08:26"
__last_publish_version__ ="0.1.13"
__last_publish_datetime__ ="2022-06-01 12:08:36"
|
from spaceone.core.error import *
class ERROR_JOB_STATE(ERROR_UNKNOWN):
_message = 'Only running jobs can be canceled. (job_state = {job_state})'
class ERROR_DUPLICATE_JOB(ERROR_UNKNOWN):
_message = 'The same job is already running. (data_source_id = {data_source_id})'
|
# Put whatever you want in this module and do whatever you want with it.
# It exists here as a place where you can "try out" things without harm.
# Joseph Law
##LOCK LABEL: 112
##Combo: 03-28-33
|
from datetime import date, timedelta
# from PyQt5.QtCore import QDate
from pandas_datareader import data
from pandas_datareader._utils import RemoteDataError
from numpy import polyval
from PyQt5.QtWidgets import QApplication, QMainWindow, QSizePolicy
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
# from scipy import stats
import random, sys
# import statsmodels.formula.api as sm
from PyQt5 import uic
# MainUI = "../UI/testMainUI.ui"
## 메인 클래스
from testMainUI import Ui_MainWindow
# 참고..
# pyinstaller --onefile -p _dllfiles testMain_old.py
class Main(QMainWindow, Ui_MainWindow):
# class Main(QMainWindow):
def __init__(self):
super().__init__()
# uic.loadUi(MainUI,self)
self.setupUi(self)
self.iniUI()
self.mainState = 0
self.setUI()
def iniUI(self):
self.m = PlotCanvas(self, width=5, height=4)
self.m.move(20, 200)
self.show()
# 콜솔 출력부프린트
def ConsolePrint(slope=0, intersecept=0, r_value=0, p_value=0):
print('기울기 :', slope)
print('절편 :', intersecept)
print('상관계수 :', r_value)
print('유의수준 :', p_value)
# 그래프 출력부
def MPlot(plt, tsd , csd, slope, intersecept):
# print(tsd , csd, slope, intersecept)
ry = polyval([slope, intersecept], tsd)
plt.plot(tsd, csd, 'k.')
plt.plot(tsd, ry, 'r')
# plt.title('{}/{}'.format(targetStockCode, compStockCode))
# plt.xlabel(targetStockCode)
# plt.ylabel(compStockCode)
# plt.legend(['price', 'polyval'])
# plt.show()
#===============================================================================
def setUI(self):
now = date.today()
print(type(now ))
self.dateEdit.setDate(date.today() + timedelta(days=-30))
#time2 + timedelta(days=-3)
self.dateEdit_2.setDate(date.today() )
self.btn_start.setEnabled(True)
self.lineEdit_1.setText('AMD')
self.lineEdit_2.setText('AAPL')
self.lineEdit_1.setFocus()
## UI slots
def aStart(self):
self.btn_start.setEnabled(False)
self.lineEdit_1.setEnabled(False)
self.lineEdit_2.setEnabled(False)
print(self.lineEdit_1.text())
print(self.lineEdit_2.text())
input1 = self.lineEdit_1.text()
input2 = self.lineEdit_2.text()
# d = date.today()
# start = date(2019, 1, 1)
# end = date(d.year, d.month, d.day)
qstart = self.dateEdit.date()
start = qstart.toPyDate()
qend = self.dateEdit_2.date()
end = qend.toPyDate()
if (input1 ,input2 != None) and (len(input1) > 0 and len(input2) > 0):
self.inputData(input1, input2, start, end)
else:
print('입력코드값 오류')
def cancel(self):
self.btn_start.setEnabled(True)
self.lineEdit_1.setEnabled(True)
self.lineEdit_2.setEnabled(True)
self.lineEdit_1.setText('AMD')
self.lineEdit_2.setText('AAPL')
self.lineEdit_1.setFocus()
self.m.plotClear()
def inputData(self, targetStockCode, compStockCode, start, end ):
print('inputData')
if targetStockCode == None :
targetStockCode = 'AMD'
if compStockCode == None :
compStockCode = 'AAPL'
try:
targetStock_df = data.DataReader(targetStockCode, "yahoo", start, end) # start ~ end 까지
compStock_df = data.DataReader(compStockCode, "yahoo", start, end) # start ~ end 까지
except RemoteDataError as ede:
self.textEdit_info.append('애러발생 {}'.format(ede))
print('애러발생 {}'.format(ede))
return
except Exception as err:
print(type(err))
return
if targetStock_df is None or compStock_df is None:
self.textEdit_info.append('오류발생 !!!!!')
return
tsd = targetStock_df['Close']
csd = compStock_df['Close']
#===================================================================================
## from scipy import stats 사용할경우
# slope, intersecept, r_value, p_value, stderr = stats.linregress(tsd, csd)
# ConsolePrint(slope, intersecept, r_value, p_value)
# MPlot(plt, tsd, csd, slope, intersecept)
# print(tsd, type(tsd))
# ry = polyval([slope, intersecept], tsd)
# print(targetStock_df['Close'])
#========================================================================
# print('tsd',tsd)
# print('csd',csd)
if len(tsd) != len(csd):
self.textEdit_info.append('데이터의 길이가 다릅니다. !!!!!')
return
corr = tsd.corr(csd)
print('corr:',corr)
self.textEdit_info.append('종목:{}/{}'.format(targetStockCode,compStockCode ))
self.textEdit_info.append('기간:{}/{}'.format(start, end))
if corr > 0:
self.textEdit_info.append('상관관계 : {}'.format(corr))
self.m.plot(list(tsd), list(csd), markup='k.')
# self.m.plot(tsd, ry, markup='r')
class PlotCanvas(FigureCanvas):
def __init__(self, parent=None, width=5, height=4, dpi=100):
fig = Figure(figsize=(width, height), dpi=dpi)
# self.axes = fig.add_subplot(111)
FigureCanvas.__init__(self, fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QSizePolicy.Expanding,
QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
self.ax = self.figure.add_subplot(111)
###
def plot(self, *data, markup = None, title = 'NoTitle', legend = None, xLabel = None, yLabel = None):
if len(data) == 0:
data = [random.random() for i in range(25)]
if markup is None:
markup = 'k.'
# ax = self.figure.add_subplot(111)
self.ax.plot(data[0], data[1], markup)
self.ax.set_title(title)
if legend != None and len(legend) > 0 :
self.ax.legend(['11','22'])
self.draw()
def plotClear(self):
self.ax.set_title("NoTitle")
self.draw()
app = QApplication([])
ex = Main()
# ex.show()
sys.exit(app.exec_())
|
from typing import Dict, List, Optional, Union
from urllib.parse import urljoin
import json
import requests
from tqdm import tqdm
HOST = "http://lookup-service-prod.mlb.com"
def get_teams(
season: Union[int, str],
sport_code: Optional[str] = "'mlb'",
all_star_sw: Optional[str] = "'N'",
sort_order: Optional[str] = None,
) -> List[Dict[str, str]]:
full_path = urljoin(HOST, "/json/named.team_all_season.bam")
r = requests.get(
full_path,
params=dict(
sport_code=sport_code,
season=season,
all_star_sw=all_star_sw,
sort_order=sort_order,
),
)
return r.json()["team_all_season"]["queryResults"]["row"]
def get_roster(team_id: Union[int, str], **kwargs) -> List[Dict[str, str]]:
full_path = urljoin(HOST, "/json/named.roster_40.bam")
kwargs["team_id"] = team_id
kwargs["roster_40.col_in"] = kwargs.pop("col_in", None)
kwargs["roster_40.col_ex"] = kwargs.pop("col_ex", None)
r = requests.get(full_path, params=kwargs)
return r.json()["roster_40"]["queryResults"]["row"]
def get_season_hitting_stats(
player_id: Union[int, str],
season: Union[int, str],
game_type: Optional[str] = "'R'",
league_list_id: Optional[str] = "'mlb'",
**kwargs,
):
full_path = urljoin(HOST, "/json/named.sport_hitting_tm.bam")
kwargs["sport_hitting_tm.col_in"] = kwargs.pop("col_in", None)
kwargs["sport_hitting_tm.col_ex"] = kwargs.pop("col_ex", None)
kwargs.update(
dict(
player_id=player_id,
season=season,
game_type=game_type,
league_list_id=league_list_id,
)
)
r = requests.get(full_path, params=kwargs)
return r.json()["sport_hitting_tm"]["queryResults"].get("row", {})
def get_season_pitching_stats(
player_id: Union[int, str],
season: Union[int, str],
game_type: Optional[str] = "'R'",
league_list_id: Optional[str] = "'mlb'",
**kwargs,
):
full_path = urljoin(HOST, "/json/named.sport_pitching_tm.bam")
kwargs["sport_pitching_tm.col_in"] = kwargs.pop("col_in", None)
kwargs["sport_pitching_tm.col_ex"] = kwargs.pop("col_ex", None)
kwargs.update(
dict(
player_id=player_id,
season=season,
game_type=game_type,
league_list_id=league_list_id,
)
)
r = requests.get(full_path, params=kwargs)
return r.json()["sport_pitching_tm"]["queryResults"].get("row", {})
def main():
SEASON = 2019
teams = get_teams(SEASON)
players = []
for team in tqdm(teams):
roster = get_roster(
team["team_id"],
col_in=[
"name_full",
"team_name",
"team_id",
"primary_position",
"player_id",
],
)
players.extend(roster)
hitting_stats = []
pitching_stats = []
for player in tqdm(players):
try:
position_code = int(player["primary_position"])
except ValueError:
# non numeric position codes ("O" = outfield, "D" = designated hitter)
position_code = 10
if position_code == 1:
stats = get_season_pitching_stats(
player["player_id"],
season=SEASON,
col_in=[
"w",
"sv",
"hld",
"so",
"er",
"bb",
"h",
"ip",
"g",
"gs",
"team_full",
],
)
if isinstance(stats, list):
for team_stats in stats:
team_stats.update(player)
pitching_stats.append(team_stats)
else:
player.update(stats)
pitching_stats.append(player)
else:
stats = get_season_hitting_stats(
player["player_id"],
season=SEASON,
col_in=["r", "hr", "rbi", "h", "ab", "sb", "team_full"],
)
if isinstance(stats, list):
for team_stats in stats:
team_stats.update(player)
hitting_stats.append(team_stats)
else:
player.update(stats)
hitting_stats.append(player)
with open(f"data/hitting-stats-{SEASON}.json", "w") as f:
json.dump(hitting_stats, f)
with open(f"data/pitching-stats-{SEASON}.json", "w") as f:
json.dump(pitching_stats, f)
if __name__ == "__main__":
main()
|
import logging
from abc import ABC, abstractmethod
import torch.nn
logger = logging.getLogger(__name__)
class ArchitectureFactory(ABC):
""" Factory object that returns architectures (untrained models) for training. """
@abstractmethod
def new_architecture(self, **kwargs) -> torch.nn.Module:
"""
Returns a new architecture (untrained model)
:return: an untrained torch.nn.Module
"""
pass
def __eq__(self, other):
"""
Compares two Architecture factories by comparing the string representations of the Architectures
returned by the new_architecture() function
:param other: the ArchitectureFactory to compare against
:return: boolean indicating whether the architectures are the same or not
"""
my_arch_instance = self.new_architecture()
other_arch_instance = other.new_architecture()
# only keep the unique elements that are not part of the nn.Module
dir_nn_module = set(dir(torch.nn.Module))
dir_my_arch = set(dir(my_arch_instance)) - dir_nn_module
dir_other_arch = set(dir(other_arch_instance)) - dir_nn_module
if len(dir_my_arch) == len(dir_other_arch):
for item in dir_my_arch:
if item in dir_other_arch:
if item[0] != '_':
# compare the actual objects
my_item = getattr(my_arch_instance, item)
other_item = getattr(other_arch_instance, item)
# NOTE: here, we check whether the arch-factory is the same based on the string representation
# of a returned architecture.
# this could easily be error-prone, need to revisit how to make this more robust
if str(my_item) != str(other_item):
return False
else:
return False
else:
return False
return True
|
import json
import sys
from pins.rsconnect.api import _HackyConnect
OUT_FILE = sys.argv[1]
def get_api_key(user, password, email):
rsc = _HackyConnect("http://localhost:3939")
return rsc.create_first_admin(user, password, email).api_key
api_keys = {
"admin": get_api_key("admin", "admin0", "admin@example.com"),
"susan": get_api_key("susan", "susan", "susan@example.com"),
"derek": get_api_key("derek", "derek", "derek@example.com"),
}
json.dump(api_keys, open(OUT_FILE, "w"))
|
import logging
import os
from time import sleep
from urllib.parse import urljoin
import requests
import telegram
from dotenv import load_dotenv
logger = logging.getLogger('Devman notify logger')
class MyLogsHandler(logging.Handler):
def __init__(self, bot, tg_chat_id):
super().__init__()
self.bot = bot
self.tg_chat_id = tg_chat_id
def emit(self, record):
log_entry = self.format(record)
self.bot.send_message(chat_id=self.tg_chat_id, text=log_entry)
def get_response(url, params=None, headers=None):
response = requests.get(url, params=params, headers=headers)
response.raise_for_status()
return response
def check_verified_work(dvmn_token, tg_chat_id, bot, logger):
dvmn_api_url = 'https://dvmn.org/api/long_polling/'
headers = {
'Authorization': f'Token {dvmn_token}',
}
params = {
'timestamp': None,
}
while True:
try:
response = get_response(dvmn_api_url, headers=headers, params=params)
response_detail = response.json()
status = response_detail['status']
if status == 'found':
params['timestamp'] = response_detail['last_attempt_timestamp']
verified_works = response_detail['new_attempts']
for work in verified_works:
is_negative = work['is_negative']
lesson_title = work['lesson_title']
lesson_path = work['lesson_url']
send_tg_message(bot,
tg_chat_id,
is_negative,
lesson_title,
lesson_path)
elif status == 'timeout':
params['timestamp'] = response_detail['timestamp_to_request']
except Exception:
logger.exception(msg='Бот упал с ошибкой:')
sleep(30)
def send_tg_message(bot, tg_chat_id, is_negative, lesson_title, lesson_path):
base_url = 'https://dvmn.org/modules/'
lesson_url = urljoin(base_url, lesson_path)
if is_negative:
text = f'У вас проверили работу ["{lesson_title}"]({lesson_url})\n' \
'К сожалению, в работе нашлись ошибки\.'
else:
text = f'У вас проверили работу ["{lesson_title}"]({lesson_url})\n' \
'Преподавателю всё понравилось, ' \
'можно приступать к следующему уроку\!'
bot.send_message(
chat_id=tg_chat_id,
text=text,
parse_mode='MarkdownV2',
disable_web_page_preview=True
)
def main():
load_dotenv()
dvmn_token = os.getenv('DEVMAN_TOKEN')
tg_token = os.getenv('TG_NOTIFY_BOT_TOKEN')
tg_chat_id = os.getenv('TG_CHAT_ID')
bot = telegram.Bot(token=tg_token)
logger.setLevel(logging.INFO)
logger.addHandler(MyLogsHandler(bot, tg_chat_id))
logger.info('Бот запущен')
check_verified_work(dvmn_token, tg_chat_id, bot, logger)
if __name__ == '__main__':
main()
|
import os
import sys
import numpy as np
from flask import Flask, request, Response
sys.path.append("..")
from MCTS import MCTS
from dotsandboxes.DotsAndBoxesGame import DotsAndBoxesGame
from dotsandboxes.keras.NNet import NNetWrapper
from dotsandboxes.DotsAndBoxesPlayers import GreedyRandomPlayer
from utils import dotdict
app = Flask(__name__)
mcts = None
g = None
# curl -d "board=0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0" -X POST http://localhost:8888/predict
@app.route('/predict', methods=['POST'])
def predict():
board = np.fromstring(request.form['board'], sep=',').reshape(g.getBoardSize())
use_alpha_zero = True
if use_alpha_zero:
action = np.argmax(mcts.getActionProb(board, temp=0))
else:
action = GreedyRandomPlayer(g).play(board)
resp = Response(str(action))
# https://stackoverflow.com/questions/5584923/a-cors-post-request-works-from-plain-javascript-but-why-not-with-jquery
# https://stackoverflow.com/questions/25860304/how-do-i-set-response-headers-in-flask
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp
if __name__ == '__main__':
g = DotsAndBoxesGame(n=3)
n1 = NNetWrapper(g)
mcts = MCTS(g, n1, dotdict({'numMCTSSims': 50, 'cpuct': 1.0}))
n1.load_checkpoint(os.path.join('..', 'pretrained_models', 'dotsandboxes', 'keras', '3x3'), 'best.pth.tar')
app.run(debug=False, host='0.0.0.0', port=8888)
|
import os.path
from datetime import datetime
from whoosh.index import create_in
from whoosh.fields import Schema, TEXT, ID, DATETIME
schema = Schema(title=TEXT(stored=True), url=TEXT(stored=True), date=DATETIME(stored=True), content=TEXT, hash=ID(stored=True, unique=True))
if not os.path.exists("index"):
os.mkdir("index")
ix = create_in("index", schema)
import re
import requests
from bs4 import BeautifulSoup as bs4
with open('Safari Bookmarks.html') as f:
r = f.read()
h = bs4(r, "lxml")
links = h.find_all('a', href=re.compile('^http'))
print(len(links))
writer = ix.writer()
for i,link in enumerate(links):
try:
url = link.get('href')
print(i, url)
r = requests.get(url, timeout=5)
writer.add_document(title=link.text, url=url, date=datetime.utcnow(), content=r.text)
except:
print('FAIL', i, url)
writer.commit()
|
from enum import IntEnum
from tests.testcase import BaseTestCase
from clickhouse_driver import errors
class A(IntEnum):
hello = -1
world = 2
class B(IntEnum):
foo = -300
bar = 300
class EnumTestCase(BaseTestCase):
def test_simple(self):
columns = (
"a Enum8('hello' = -1, 'world' = 2), "
"b Enum16('foo' = -300, 'bar' = 300)"
)
data = [(A.hello, B.bar), (A.world, B.foo), (-1, 300), (2, -300)]
with self.create_table(columns):
self.client.execute(
'INSERT INTO test (a, b) VALUES', data
)
query = 'SELECT * FROM test'
inserted = self.emit_cli(query)
self.assertEqual(
inserted, (
'hello\tbar\n'
'world\tfoo\n'
'hello\tbar\n'
'world\tfoo\n'
)
)
inserted = self.client.execute(query)
self.assertEqual(
inserted, [
('hello', 'bar'), ('world', 'foo'),
('hello', 'bar'), ('world', 'foo')
]
)
def test_enum_by_string(self):
columns = "a Enum8('hello' = 1, 'world' = 2)"
data = [('hello', ), ('world', )]
with self.create_table(columns):
self.client.execute(
'INSERT INTO test (a) VALUES', data
)
query = 'SELECT * FROM test'
inserted = self.emit_cli(query)
self.assertEqual(
inserted, (
'hello\n'
'world\n'
)
)
inserted = self.client.execute(query)
self.assertEqual(inserted, data)
def test_errors(self):
columns = "a Enum8('test' = 1, 'me' = 2)"
data = [(A.world, )]
with self.create_table(columns):
with self.assertRaises(errors.LogicalError):
self.client.execute(
'INSERT INTO test (a) VALUES', data
)
columns = "a Enum8('test' = 1, 'me' = 2)"
data = [(3, )]
with self.create_table(columns):
with self.assertRaises(errors.LogicalError):
self.client.execute(
'INSERT INTO test (a) VALUES', data
)
def test_quote_in_name(self):
columns = "a Enum8(' \\' t = ' = -1, 'test' = 2)"
data = [(-1, ), (" \\' t = ", )]
with self.create_table(columns):
self.client.execute(
'INSERT INTO test (a) VALUES', data
)
query = 'SELECT * FROM test'
inserted = self.emit_cli(query)
self.assertEqual(
inserted, (
" \\' t = \n"
" \\' t = \n"
)
)
inserted = self.client.execute(query)
self.assertEqual(inserted, [(" \\' t = ", ), (" \\' t = ", )])
def test_nullable(self):
columns = "a Nullable(Enum8('hello' = -1, 'world' = 2))"
data = [(None, ), (A.hello, ), (None, ), (A.world, )]
with self.create_table(columns):
self.client.execute(
'INSERT INTO test (a) VALUES', data
)
query = 'SELECT * FROM test'
inserted = self.emit_cli(query)
self.assertEqual(
inserted, (
'\\N\nhello\n\\N\nworld\n'
)
)
inserted = self.client.execute(query)
self.assertEqual(
inserted, [
(None, ), ('hello', ), (None, ), ('world', ),
]
)
|
"""
Example website in Flask, with web forms.
This is a fully working example showing how to use web forms on a Flask-based website.
I've found this basic structure very useful in setting up annotation tools or demos.
"""
# Flask-related imports:
from flask import Flask, request, render_template
# Other imports go here:
from collections import Counter
from nltk.tokenize import sent_tokenize, word_tokenize
import csv
################################################################################
# Load data
with open('./static/data/Concreteness_ratings_Brysbaert_et_al_BRM.txt') as f:
reader = csv.DictReader(f, delimiter='\t')
concreteness_values = {entry['Word']: float(entry['Conc.M']) for entry in reader}
################################################################################
# Set up general functions
def type_token_ratio(text):
"Computes the type-token ratio and returns a dictionary with the results."
c = Counter()
for sentence in sent_tokenize(text.lower()):
c.update([word for word in word_tokenize(sentence) if len(word) > 1])
num_tokens = sum(c.values())
num_types = len(c)
return {'Types': num_types,
'Tokens': num_tokens,
'Ratio': float(num_types)/num_tokens}
def average_concreteness(text):
"""
Measures the average concreteness of a text by taking the average concreteness
of the word forms. Probably not a very good idea!
"""
words = [word for sentence in sent_tokenize(text.lower())
for word in word_tokenize(sentence)
if word in concreteness_values]
value = sum(concreteness_values[w] for w in words)/float(len(words))
return {'concreteness': value,
'words': set(words)}
################################################################################
# Set up app:
app = Flask(__name__)
################################################################################
# Webpage-related functions
@app.route('/', methods=['GET'])
def main_page():
"""
Function to display the main page. This is the first thing you see when you
load the website.
"""
return render_template('index.html')
@app.route('/type-token-ratio/', methods=['GET'])
def type_token_ratio_form():
"""
Display webpage with a form.
"""
return render_template('ttr.html')
@app.route('/type-token-results/', methods=['POST'])
def type_token_results():
"""
Show the page with submitted information.
"""
text = request.form['textfield']
ttr_dict = type_token_ratio(text)
return render_template('ttr-results.html', ttr_data= ttr_dict)
@app.route('/concreteness/', methods=['GET'])
def concreteness_form():
"""
Display webpage with a form.
"""
return render_template('concreteness.html')
@app.route('/concreteness-results/', methods=['POST'])
def concreteness_results():
"""
Show the page with submitted information.
"""
text = request.form['textfield']
results = average_concreteness(text)
return render_template('concreteness-results.html', concreteness_data= results)
################################################################################
# Running the website
if __name__ == '__main__':
app.debug = True
app.run()
|
from django.apps import apps
from django.db.models.signals import post_migrate
from django.utils.translation import ugettext_lazy as _
from mayan.apps.acls.classes import ModelPermission
from mayan.apps.acls.links import link_acl_list
from mayan.apps.acls.permissions import permission_acl_edit, permission_acl_view
from mayan.apps.common.apps import MayanAppConfig
from mayan.apps.common.classes import (
MissingItem, ModelCopy, ModelField, ModelFieldRelated, ModelProperty,
ModelQueryFields
)
from mayan.apps.common.menus import (
menu_facet, menu_list_facet, menu_main, menu_object, menu_return,
menu_secondary, menu_setup, menu_multi_item
)
from mayan.apps.common.signals import signal_post_initial_setup
from mayan.apps.converter.classes import AppImageErrorImage
from mayan.apps.converter.layers import layer_decorations
from mayan.apps.converter.links import link_transformation_list
from mayan.apps.converter.permissions import (
permission_transformation_create,
permission_transformation_delete, permission_transformation_edit,
permission_transformation_view,
)
from mayan.apps.dashboards.dashboards import dashboard_main
from mayan.apps.events.classes import EventModelRegistry, ModelEventType
from mayan.apps.events.permissions import permission_events_view
from mayan.apps.file_caching.links import link_cache_partition_purge
from mayan.apps.file_caching.permissions import permission_cache_partition_purge
from mayan.apps.navigation.classes import SourceColumn
from mayan.apps.rest_api.fields import DynamicSerializerField
from mayan.apps.templating.classes import AJAXTemplate
from mayan.apps.views.html_widgets import TwoStateWidget
from .dashboard_widgets import (
DashboardWidgetDocumentFilePagesTotal, DashboardWidgetDocumentsInTrash,
DashboardWidgetDocumentsNewThisMonth,
DashboardWidgetDocumentsPagesNewThisMonth, DashboardWidgetDocumentsTotal,
DashboardWidgetDocumentsTypesTotal,
)
# Documents
from .events import (
event_document_created, event_document_edited, event_document_viewed,
event_trashed_document_deleted, event_trashed_document_restored
)
# Document files
from .events import (
event_document_file_created, event_document_file_deleted,
event_document_file_downloaded, event_document_file_edited
)
# Document types
from .events import (
event_document_type_changed, event_document_type_edited,
event_document_type_quick_label_created,
event_document_type_quick_label_deleted,
event_document_type_quick_label_edited
)
# Document versions
from .events import (
event_document_version_created, event_document_version_deleted,
event_document_version_edited, event_document_version_exported,
event_document_version_page_created, event_document_version_page_deleted,
event_document_version_page_edited,
)
from .handlers import (
handler_create_default_document_type,
handler_create_document_file_page_image_cache,
handler_create_document_version_page_image_cache
)
from .html_widgets import ThumbnailWidget
from .links.document_links import (
link_document_type_change, link_document_properties_edit,
link_document_list, link_document_recently_accessed_list,
link_document_recently_created_list, link_document_multiple_type_change,
link_document_preview, link_document_properties
)
from .links.document_file_links import (
link_document_file_delete, link_document_file_delete_multiple,
link_document_file_download_quick, link_document_file_edit,
link_document_file_list, link_document_file_preview,
link_document_file_print_form, link_document_file_properties,
link_document_file_return_to_document, link_document_file_return_list,
link_document_file_transformations_clear,
link_document_file_multiple_transformations_clear,
link_document_file_transformations_clone
)
from .links.document_file_page_links import (
link_document_file_multiple_page_count_update,
link_document_file_page_count_update, link_document_file_page_list,
link_document_file_page_navigation_first,
link_document_file_page_navigation_last,
link_document_file_page_navigation_next,
link_document_file_page_navigation_previous,
link_document_file_page_return_to_document,
link_document_file_page_return_to_document_file,
link_document_file_page_return_to_document_file_page_list,
link_document_file_page_rotate_left, link_document_file_page_rotate_right,
link_document_file_page_view, link_document_file_page_view_reset,
link_document_file_page_zoom_in, link_document_file_page_zoom_out
)
from .links.document_type_links import (
link_document_type_create, link_document_type_delete,
link_document_type_edit, link_document_type_filename_create,
link_document_type_filename_delete, link_document_type_filename_edit,
link_document_type_filename_list, link_document_type_filename_generator,
link_document_type_list, link_document_type_policies,
link_document_type_setup
)
from .links.document_version_links import (
link_document_version_active, link_document_version_create,
link_document_version_delete, link_document_version_edit,
link_document_version_export, link_document_version_list,
link_document_version_multiple_delete, link_document_version_return_list,
link_document_version_return_to_document, link_document_version_preview,
link_document_version_print_form,
link_document_version_transformations_clear,
link_document_version_multiple_transformations_clear,
link_document_version_transformations_clone
)
from .links.document_version_page_links import (
link_document_version_page_delete, link_document_version_page_list,
link_document_version_page_list_remap,
link_document_version_page_list_reset,
link_document_version_page_navigation_first,
link_document_version_page_navigation_last,
link_document_version_page_navigation_next,
link_document_version_page_navigation_previous,
link_document_version_page_return_to_document,
link_document_version_page_return_to_document_version,
link_document_version_page_return_to_document_version_page_list,
link_document_version_page_rotate_left,
link_document_version_page_rotate_right, link_document_version_page_view,
link_document_version_page_view_reset,
link_document_version_page_zoom_in, link_document_version_page_zoom_out
)
from .links.favorite_links import (
link_document_favorites_add, link_document_favorites_remove,
link_document_list_favorites, link_document_multiple_favorites_add,
link_document_multiple_favorites_remove
)
from .links.trashed_document_links import (
link_document_delete, link_document_list_deleted,
link_document_multiple_delete, link_document_multiple_restore,
link_document_multiple_trash, link_document_restore, link_document_trash,
link_trash_can_empty
)
from .literals import (
IMAGE_ERROR_NO_ACTIVE_VERSION, IMAGE_ERROR_NO_VERSION_PAGES
)
from .menus import menu_documents
# Documents
from .permissions import (
permission_document_create, permission_document_edit,
permission_document_properties_edit, permission_document_tools,
permission_document_trash, permission_document_view
)
# DocumentFile
from .permissions import (
permission_document_file_delete, permission_document_file_download,
permission_document_file_edit, permission_document_file_new,
permission_document_file_print, permission_document_file_tools,
permission_document_file_view
)
# DocumentType
from .permissions import (
permission_document_type_delete, permission_document_type_edit,
permission_document_type_view
)
# DocumentVersion
from .permissions import (
permission_document_version_create, permission_document_version_delete,
permission_document_version_edit, permission_document_version_export,
permission_document_version_print, permission_document_version_view
)
# TrashedDocument
from .permissions import (
permission_trashed_document_delete, permission_trashed_document_restore
)
from .statistics import * # NOQA
class DocumentsApp(MayanAppConfig):
app_namespace = 'documents'
app_url = 'documents'
has_rest_api = True
has_tests = True
name = 'mayan.apps.documents'
verbose_name = _('Documents')
def ready(self):
super().ready()
Document = self.get_model(model_name='Document')
DocumentSearchResult = self.get_model(
model_name='DocumentSearchResult'
)
DocumentFile = self.get_model(model_name='DocumentFile')
DocumentFilePage = self.get_model(model_name='DocumentFilePage')
DocumentFileSearchResult = self.get_model(
model_name='DocumentFileSearchResult'
)
DocumentFilePageSearchResult = self.get_model(
model_name='DocumentFilePageSearchResult'
)
DocumentType = self.get_model(model_name='DocumentType')
DocumentTypeFilename = self.get_model(
model_name='DocumentTypeFilename'
)
DocumentVersion = self.get_model(model_name='DocumentVersion')
DocumentVersionSearchResult = self.get_model(
model_name='DocumentVersionSearchResult'
)
DocumentVersionPage = self.get_model(model_name='DocumentVersionPage')
DocumentVersionPageSearchResult = self.get_model(
model_name='DocumentVersionPageSearchResult'
)
DownloadFile = apps.get_model(
app_label='storage', model_name='DownloadFile'
)
FavoriteDocument = self.get_model(
model_name='FavoriteDocument'
)
RecentlyAccessedDocument = self.get_model(
model_name='RecentlyAccessedDocument'
)
RecentlyCreatedDocument = self.get_model(
model_name='RecentlyCreatedDocument'
)
TrashedDocument = self.get_model(model_name='TrashedDocument')
AppImageErrorImage(
name=IMAGE_ERROR_NO_ACTIVE_VERSION,
template_name='documents/errors/no_valid_version.html'
)
AppImageErrorImage(
name=IMAGE_ERROR_NO_VERSION_PAGES,
template_name='documents/errors/no_version_pages.html'
)
AJAXTemplate(
name='invalid_document',
template_name='documents/invalid_document.html'
)
link_decorations_list = link_transformation_list.copy(
layer=layer_decorations
)
link_decorations_list.text = _('Decorations')
DownloadFile.objects.register_content_object(model=Document)
DynamicSerializerField.add_serializer(
klass=Document,
serializer_class='mayan.apps.documents.serializers.document_serializers.DocumentSerializer'
)
EventModelRegistry.register(model=Document)
EventModelRegistry.register(model=DocumentFile)
EventModelRegistry.register(model=DocumentFilePage)
EventModelRegistry.register(model=DocumentType)
EventModelRegistry.register(model=DocumentTypeFilename)
EventModelRegistry.register(model=DocumentVersion)
EventModelRegistry.register(model=DocumentVersionPage)
EventModelRegistry.register(model=TrashedDocument)
MissingItem(
label=_('Create a document type'),
description=_(
'Every uploaded document must be assigned a document type, '
'it is the basic way Mayan EDMS categorizes documents.'
), condition=lambda: not DocumentType.objects.exists(),
view='documents:document_type_list'
)
ModelCopy(model=DocumentTypeFilename).add_fields(
field_names=(
'document_type', 'filename', 'enabled'
)
)
ModelCopy(
model=DocumentType, bind_link=True, register_permission=True
).add_fields(
field_names=(
'label', 'trash_time_period', 'trash_time_unit',
'delete_time_period', 'delete_time_unit', 'filenames'
)
)
ModelCopy(
model=DocumentVersion, bind_link=True, register_permission=True
).add_fields(
field_names=(
'document', 'timestamp', 'comment', 'version_pages',
)
)
ModelCopy(
model=DocumentVersionPage, bind_link=True, register_permission=True
).add_fields(
field_names=(
'document_version', 'page_number', 'content_type', 'object_id',
)
)
ModelEventType.register(
model=Document, event_types=(
event_document_edited, event_document_type_changed,
event_document_file_deleted, event_document_version_deleted,
event_document_viewed, event_trashed_document_restored
)
)
ModelEventType.register(
model=DocumentFile, event_types=(
event_document_file_created, event_document_file_downloaded,
event_document_file_edited
)
)
ModelEventType.register(
model=DocumentType, event_types=(
event_document_created,
event_document_type_edited,
event_document_type_quick_label_created,
event_trashed_document_deleted
)
)
ModelEventType.register(
model=DocumentTypeFilename, event_types=(
event_document_type_quick_label_deleted,
event_document_type_quick_label_edited
)
)
ModelEventType.register(
model=DocumentVersion, event_types=(
event_document_version_created,
event_document_version_edited,
event_document_version_exported,
event_document_version_page_created
)
)
ModelEventType.register(
model=DocumentVersionPage, event_types=(
event_document_version_page_deleted,
event_document_version_page_edited
)
)
ModelField(model=Document, name='description')
ModelField(model=Document, name='datetime_created')
ModelField(model=Document, name='trashed_date_time')
ModelField(
model=Document, name='document_type'
)
ModelField(model=Document, name='in_trash')
ModelField(model=Document, name='is_stub')
ModelField(model=Document, name='label')
ModelField(model=Document, name='language')
ModelField(model=Document, name='uuid')
ModelFieldRelated(model=Document, name='document_type__label')
ModelFieldRelated(
model=Document,
name='files__checksum'
)
ModelFieldRelated(
model=Document, label=_('File comments'),
name='files__comment'
)
ModelFieldRelated(
model=Document, label=_('File encodings'),
name='files__encoding'
)
ModelFieldRelated(
model=Document, label=_('File mime types'),
name='files__mimetype'
)
ModelFieldRelated(
model=Document, label=_('File timestamps'),
name='files__timestamp'
)
ModelField(
model=DocumentFilePage, label=_('Document file'),
name='document_file'
)
ModelField(
model=DocumentFilePage, label=_('Page number'),
name='page_number'
)
ModelProperty(
description=_('Return the latest file of the document.'),
model=Document, label=_('Latest file'), name='latest_file'
)
ModelProperty(
description=_('Return the document instance.'),
model=DocumentFilePage, label=_('Document'), name='document'
)
ModelPermission.register(
model=Document, permissions=(
permission_acl_edit, permission_acl_view,
permission_document_edit, permission_document_file_new,
permission_document_properties_edit,
permission_document_tools,
permission_document_trash, permission_document_view,
permission_document_version_create, permission_events_view,
permission_trashed_document_delete,
permission_trashed_document_restore,
)
)
ModelPermission.register(
model=DocumentFile, permissions=(
permission_acl_edit, permission_acl_view,
permission_cache_partition_purge,
permission_document_file_delete,
permission_document_file_download,
permission_document_file_edit,
permission_document_file_print,
permission_document_file_tools,
permission_document_file_view,
permission_events_view, permission_transformation_create,
permission_transformation_delete,
permission_transformation_edit,
permission_transformation_view
)
)
ModelPermission.register(
model=DocumentType, permissions=(
permission_document_create, permission_document_type_delete,
permission_document_type_edit, permission_document_type_view,
permission_acl_edit, permission_acl_view,
permission_events_view,
)
)
ModelPermission.register(
model=DocumentVersion, permissions=(
permission_acl_edit, permission_acl_view,
permission_cache_partition_purge,
permission_document_version_delete,
permission_document_version_edit,
permission_document_version_export,
permission_document_version_print,
permission_document_version_view,
permission_events_view, permission_transformation_create,
permission_transformation_delete,
permission_transformation_edit,
permission_transformation_view
)
)
ModelPermission.register_inheritance(
model=Document, related='document_type',
)
ModelPermission.register_inheritance(
model=DocumentFile, related='document',
)
ModelPermission.register_inheritance(
model=DocumentFile, related='document__document_type',
)
ModelPermission.register_inheritance(
model=DocumentFilePage, related='document_file',
)
ModelPermission.register_inheritance(
model=DocumentVersion, related='document',
)
ModelPermission.register_inheritance(
model=DocumentVersion, related='document__document_type',
)
ModelPermission.register_inheritance(
model=RecentlyAccessedDocument, related='document',
)
ModelPermission.register_inheritance(
model=DocumentVersionPage, related='document_version',
)
ModelPermission.register_inheritance(
model=DocumentTypeFilename, related='document_type',
)
ModelPermission.register_inheritance(
model=FavoriteDocument, related='document',
)
model_query_fields_document = ModelQueryFields(model=Document)
model_query_fields_document.add_prefetch_related_field(
field_name='files'
)
model_query_fields_document.add_prefetch_related_field(
field_name='files__file_pages'
)
model_query_fields_document.add_select_related_field(
field_name='document_type'
)
model_query_fields_document_file = ModelQueryFields(model=DocumentFile)
model_query_fields_document_file.add_prefetch_related_field(
field_name='file_pages'
)
model_query_fields_document_file.add_select_related_field(
field_name='document'
)
model_query_fields_document_file_page = ModelQueryFields(
model=DocumentFilePage
)
model_query_fields_document_file_page.add_select_related_field(
field_name='document_file'
)
model_query_fields_document_version = ModelQueryFields(
model=DocumentVersion
)
model_query_fields_document_version.add_prefetch_related_field(
field_name='version_pages'
)
model_query_fields_document_version.add_select_related_field(
field_name='document'
)
# Document
SourceColumn(
attribute='get_label', is_object_absolute_url=True,
is_identifier=True, is_sortable=True, sort_field='label',
source=Document
)
SourceColumn(
html_extra_classes='text-center document-thumbnail-list',
label=_('Thumbnail'), order=-99, source=Document,
widget=ThumbnailWidget
)
SourceColumn(
attribute='document_type', include_label=True, is_sortable=True,
label=_('Type'), order=-9, source=Document
)
SourceColumn(
func=lambda context: context['object'].pages.count(),
label=_('Pages'), include_label=True, order=-8, source=Document
)
# RecentlyCreatedDocument
SourceColumn(
attribute='datetime_created', include_label=True,
is_sortable=True, source=RecentlyCreatedDocument
)
# DocumentFile
SourceColumn(
source=DocumentFile, attribute='filename', is_identifier=True,
is_object_absolute_url=True
)
SourceColumn(
html_extra_classes='text-center document-thumbnail-list',
label=_('Thumbnail'), order=-99, source=DocumentFile,
widget=ThumbnailWidget
)
SourceColumn(
func=lambda context: context['object'].pages.count(),
include_label=True, label=_('Pages'), order=-6,
source=DocumentFile
)
SourceColumn(
attribute='comment', is_sortable=True, order=-7,
source=DocumentFile
)
SourceColumn(
attribute='encoding', include_label=True, is_sortable=True,
order=-8, source=DocumentFile
)
SourceColumn(
attribute='mimetype', include_label=True, is_sortable=True,
order=-9, source=DocumentFile
)
# DocumentFilePage
SourceColumn(
attribute='get_label', is_identifier=True,
is_object_absolute_url=True, source=DocumentFilePage,
)
SourceColumn(
html_extra_classes='text-center document-thumbnail-list',
label=_('Thumbnail'), order=-99, source=DocumentFilePage,
widget=ThumbnailWidget
)
# DocumentType
SourceColumn(
attribute='label', is_identifier=True, is_sortable=True,
source=DocumentType
)
SourceColumn(
func=lambda context: context['object'].get_document_count(
user=context['request'].user
), include_label=True, label=_('Documents'), source=DocumentType
)
SourceColumn(
attribute='filename', is_identifier=True, is_sortable=True,
source=DocumentTypeFilename
)
SourceColumn(
attribute='enabled', include_label=True, is_sortable=True,
source=DocumentTypeFilename, widget=TwoStateWidget
)
# DocumentVersion
SourceColumn(
source=DocumentVersion, attribute='get_label', is_identifier=True,
is_object_absolute_url=True
)
SourceColumn(
html_extra_classes='text-center document-thumbnail-list',
label=_('Thumbnail'), order=-99, source=DocumentVersion,
widget=ThumbnailWidget
)
SourceColumn(
func=lambda context: context['object'].pages.count(),
include_label=True, label=_('Pages'), order=-8,
source=DocumentVersion
)
SourceColumn(
attribute='active', include_label=True, is_sortable=True,
order=-9, source=DocumentVersion, widget=TwoStateWidget
)
SourceColumn(
attribute='comment', include_label=True, is_sortable=True,
order=-7, source=DocumentVersion
)
# DocumentVersionPage
SourceColumn(
attribute='get_label', is_identifier=True,
is_object_absolute_url=True, source=DocumentVersionPage,
)
SourceColumn(
html_extra_classes='text-center document-thumbnail-list',
label=_('Thumbnail'), order=-99, source=DocumentVersionPage,
widget=ThumbnailWidget
)
# TrashedDocument
SourceColumn(
attribute='label', is_identifier=True, is_sortable=True,
source=TrashedDocument
)
SourceColumn(
attribute='trashed_date_time', include_label=True, order=99,
source=TrashedDocument
)
dashboard_main.add_widget(
widget=DashboardWidgetDocumentsTotal, order=0
)
dashboard_main.add_widget(
widget=DashboardWidgetDocumentFilePagesTotal, order=1
)
dashboard_main.add_widget(
widget=DashboardWidgetDocumentsInTrash, order=2
)
dashboard_main.add_widget(
widget=DashboardWidgetDocumentsTypesTotal, order=3
)
dashboard_main.add_widget(
widget=DashboardWidgetDocumentsNewThisMonth, order=4
)
dashboard_main.add_widget(
widget=DashboardWidgetDocumentsPagesNewThisMonth, order=5
)
menu_documents.bind_links(
links=(
link_document_recently_accessed_list,
link_document_recently_created_list, link_document_list_favorites,
link_document_list, link_document_list_deleted
)
)
menu_main.bind_links(links=(menu_documents,), position=0)
menu_setup.bind_links(links=(link_document_type_setup,))
# Document
menu_facet.bind_links(
links=(link_acl_list,), sources=(Document,)
)
menu_facet.bind_links(
links=(link_document_preview,), sources=(Document,), position=0
)
menu_facet.bind_links(
links=(link_document_properties,), sources=(Document,), position=2
)
menu_facet.bind_links(
links=(
link_document_file_list, link_document_version_list
), sources=(Document,), position=2
)
menu_object.bind_links(
links=(
link_document_favorites_add, link_document_favorites_remove,
link_document_properties_edit, link_document_type_change,
link_document_trash
), sources=(Document,)
)
menu_multi_item.bind_links(
links=(
link_document_multiple_favorites_add,
link_document_multiple_favorites_remove,
link_document_multiple_trash,
link_document_multiple_type_change
), sources=(Document,)
)
menu_secondary.bind_links(
links=(link_document_version_create,),
sources=(
'documents:document_version_create',
'documents:document_version_list'
)
)
# DocumentFile
menu_list_facet.bind_links(
links=(
link_document_file_page_list, link_document_file_properties,
link_document_file_preview, link_acl_list
), sources=(DocumentFile,)
)
menu_multi_item.bind_links(
links=(
link_document_file_delete_multiple,
link_document_file_multiple_page_count_update,
link_document_file_multiple_transformations_clear,
), sources=(DocumentFile,)
)
menu_object.bind_links(
links=(
link_cache_partition_purge,
link_document_file_delete,
link_document_file_download_quick,
link_document_file_edit,
link_document_file_page_count_update,
link_document_file_print_form,
link_document_file_transformations_clear,
link_document_file_transformations_clone
),
sources=(DocumentFile,)
)
menu_return.bind_links(
links=(
link_document_file_return_list,
link_document_file_return_to_document,
), sources=(DocumentFile,)
)
# DocumentFilePages
menu_facet.add_unsorted_source(source=DocumentFilePage)
menu_facet.bind_links(
links=(
link_document_file_page_rotate_left,
link_document_file_page_rotate_right, link_document_file_page_zoom_in,
link_document_file_page_zoom_out, link_document_file_page_view_reset
), sources=('documents:document_file_page_view',)
)
menu_facet.bind_links(
links=(
link_document_file_page_view,
link_document_file_page_navigation_first,
link_document_file_page_navigation_previous,
link_document_file_page_navigation_next,
link_document_file_page_navigation_last
), sources=(DocumentFilePage,)
)
menu_list_facet.bind_links(
links=(link_decorations_list, link_transformation_list),
sources=(DocumentFilePage,)
)
menu_return.bind_links(
links=(
link_document_file_page_return_to_document,
link_document_file_page_return_to_document_file,
link_document_file_page_return_to_document_file_page_list
),
sources=(DocumentFilePage,)
)
# DocumentType
menu_list_facet.bind_links(
links=(
link_document_type_filename_list,
link_document_type_policies,
link_document_type_filename_generator, link_acl_list
), sources=(DocumentType,)
)
menu_object.bind_links(
links=(
link_document_type_delete, link_document_type_edit
), sources=(DocumentType,)
)
menu_object.bind_links(
links=(
link_document_type_filename_edit,
link_document_type_filename_delete
), sources=(DocumentTypeFilename,)
)
menu_secondary.bind_links(
links=(link_document_type_list, link_document_type_create),
sources=(
DocumentType, 'documents:document_type_create',
'documents:document_type_list'
)
)
menu_secondary.bind_links(
links=(link_document_type_filename_create,),
sources=(
DocumentTypeFilename, 'documents:document_type_filename_list',
'documents:document_type_filename_create'
)
)
menu_secondary.bind_links(
links=(link_trash_can_empty,),
sources=(
'documents:document_list_deleted', 'documents:trash_can_empty'
)
)
# DocumentVersion
menu_list_facet.bind_links(
links=(
link_document_version_page_list,
link_document_version_preview, link_acl_list
),
sources=(DocumentVersion,)
)
menu_multi_item.bind_links(
links=(
link_document_version_multiple_delete,
link_document_version_multiple_transformations_clear,
), sources=(DocumentVersion,)
)
menu_object.bind_links(
links=(
link_document_version_active,
link_cache_partition_purge,
link_document_version_delete, link_document_version_edit,
link_document_version_export,
link_document_version_page_list_remap,
link_document_version_page_list_reset,
link_document_version_print_form,
link_document_version_transformations_clear,
link_document_version_transformations_clone
),
sources=(DocumentVersion,)
)
menu_return.bind_links(
links=(
link_document_version_return_list,
link_document_version_return_to_document,
), sources=(DocumentVersion,)
)
# DocumentVersionPage
menu_facet.add_unsorted_source(source=DocumentVersionPage)
menu_facet.bind_links(
links=(
link_document_version_page_rotate_left,
link_document_version_page_rotate_right, link_document_version_page_zoom_in,
link_document_version_page_zoom_out, link_document_version_page_view_reset
), sources=('documents:document_version_page_view',)
)
menu_facet.bind_links(
links=(link_document_version_page_view,),
sources=(DocumentVersionPage,)
)
menu_facet.bind_links(
links=(
link_document_version_page_navigation_first,
link_document_version_page_navigation_previous,
link_document_version_page_navigation_next,
link_document_version_page_navigation_last
), sources=(DocumentVersionPage,)
)
menu_list_facet.bind_links(
links=(
link_decorations_list, link_transformation_list,
), sources=(DocumentVersionPage, DocumentVersionPageSearchResult)
)
menu_object.bind_links(
links=(
link_document_version_page_delete,
), sources=(DocumentVersionPage, DocumentVersionPageSearchResult)
)
menu_return.bind_links(
links=(
link_document_version_page_return_to_document,
link_document_version_page_return_to_document_version,
link_document_version_page_return_to_document_version_page_list
), sources=(DocumentVersionPage,)
)
# Trashed documents
menu_object.bind_links(
links=(link_document_restore, link_document_delete),
sources=(TrashedDocument,)
)
menu_multi_item.bind_links(
links=(
link_document_multiple_restore, link_document_multiple_delete
), sources=(TrashedDocument,)
)
# RecentlyAccessedDocument
menu_multi_item.add_proxy_inclusions(source=RecentlyAccessedDocument)
# RecentlyCreatedDocument
menu_multi_item.add_proxy_inclusions(source=RecentlyCreatedDocument)
# Search proxies
menu_multi_item.add_proxy_inclusions(source=DocumentSearchResult)
menu_multi_item.add_proxy_inclusions(source=DocumentFileSearchResult)
menu_multi_item.add_proxy_inclusions(source=DocumentFilePageSearchResult)
menu_multi_item.add_proxy_inclusions(source=DocumentVersionSearchResult)
menu_multi_item.add_proxy_inclusions(source=DocumentVersionPageSearchResult)
post_migrate.connect(
dispatch_uid='documents_handler_create_document_file_page_image_cache',
receiver=handler_create_document_file_page_image_cache,
)
post_migrate.connect(
dispatch_uid='documents_handler_create_document_version_page_image_cache',
receiver=handler_create_document_version_page_image_cache,
)
signal_post_initial_setup.connect(
dispatch_uid='documents_handler_create_default_document_type',
receiver=handler_create_default_document_type
)
|
from .slack import KolgaSlackPlugin as Plugin # noqa: F401
|
#!/usr/bin/env python
c = 33675163320727339095014837582281323840051859854289916825117629551395370823659955814681282315463016124619404443441584421368620981293595007310032397095407298209578830182966433145009839000500503505979590280517585607094925442822091170832715599815430442356320114799995161041937865531772135980336997473773429429086
d = 74278208785024315749807300218924629732502923680228619511462236558183776751361343293993621339853496766331791519725013394800726769158182794087434911520481799246725921165954305171248575241522445758705527418961719889196166171796655679663085169489959240948138263073235805073099696838798038921104605272921914036481
n = 109765067287743547347068051916563247396312072633681549458222295819831116303726540744063224914879222024737572906451514190963837522161036906719615364412226129324176248131661408793388229223225663229915266782027908206640058835805543815172008872943671770113736018905952701500189580152812093796295555869648045713209
m = pow(c, d, n)
print(hex(m)[2:-1].decode('hex'))
|
from django import forms
class ContactForm(forms.Form):
name = forms.CharField(max_length=20, required=True, label='name', widget=forms.TextInput(attrs={'class':'form-field-control-above', 'placeholder':'Name'}))
email = forms.EmailField(required=True, label='email', widget=forms.TextInput(attrs={'class':'form-field-control-above', 'placeholder':'Email'}))
subject = forms.CharField(max_length=100, required=True, label='subject', widget=forms.TextInput(attrs={'class':'form-field-control-above','placeholder':'Subject'}))
message = forms.CharField(widget=forms.Textarea(attrs={'class':'form-field-control-below','placeholder':'Your message...'}), required=True, label='message')
|
from sqlalchemy.inspection import inspect
from sqlalchemy.orm import session
from sqlalchemy.sql.expression import null, table
from sqlalchemy.sql.functions import count
class GenericRepo:
def __init__(self, session, table):
self.session = session
self.table = table
def commit(self):
"""
Commit the change session to the DB
"""
self.session.commit()
def getAll(self):
"""
Get All table \n
Return: list of rows
"""
return self.session.query(self.table).all()
def getById(self, id):
"""
Get one row by the ID \n
Parm: the ID (Primary key) \n
Return: one row
"""
return self.session.query(self.table).get(id)
def getAllFilter(self, filter):
"""
Get elements with a filter \n
Param: the filter \n
Return list of rows
"""
return self.session.query(self.table).filter(filter)
def getFirst(self, filter):
"""
Get the first element with a filter \n
Param: the filter \n
Return: one row
"""
return self.session.query(self.table).filter(filter).first()
def count(self):
"""
Count the all table \n
Return: an integer
"""
return self.session.query(self.table).count()
def countFilter(self, filter):
"""
Count the rows match with the filter \n
Param: the filter \n
Return an integer
"""
return self.session.query(self.table).filter(filter).count()
def insert(self, object, commit = True):
"""
Insert in the table the now object \n
Param: the new object \n
\t commit (default `True`) \n
Return: If commit is True, return the new object with the new ID \n
\t else return null
"""
self.session.add(object)
if(commit):
self.session.commit()
self.session.refresh(object)
return object
return None
def delete(self, filter, commit = True):
"""
Delete rows with filter \n
Param: the filter \n
\t commit (default `True`) \n
Return: list if rows delete
"""
del_ = self.session.query(self.table).filter(filter)
for row in del_:
self.session.delete(row)
if (commit):
self.session.commit()
return del_
def deleteById(self, id, commit = True):
"""
Delete rows with the ID \n
Param: ID (Primary key)\n
\t commit (default `True`) \n
Return: Object will be deleted
"""
object = self.getById(id)
self.session.delete(object)
if (commit):
self.session.commit()
return object
def update(self, update, filter = None, commit = True):
"""
Update the table with a filter \n
Param: field to update \n
\t the filter (can be null) \n
\t the commit (default `True`) \n
Return: list of rows update
"""
if(filter is None):
up = self.session.query(self.table).all()
else :
up = self.session.query(self.table).filter(filter)
for row in up:
for key in update.keys():
print(update[key])
setattr(row, key.key, update[key])
self.session.add(row)
if (commit):
self.session.commit()
return up
|
'''
This module has our implementation of CG with error estimates
'''
import numpy as np
from scipy import linalg
def cgqs(A, b, x, mu = None, tol = 1e-6, max_it = None,\
delay = 5,reorth = False, NormA = None, xTrue = None):
'''Iteratively computes solution to Ax = b with error estimates.
This program iteratively solves a symmetric positive definite system
of linear equations with a with the Conjugate Gradient method.
Additionally, it computes the S statistic mean and standard deviation
and a Gauss-Radau error estimate. If a lower bound to the
smallest eigenvalue of A is provided, the Gauss-Radau error bound is
computed with the CGQ algorithm [1]. If a bound is not provided, then
the approximation to the Gauss-Radau bound is computed [2].
Parameters
----------
A : function
Function that computes matvec of matrix A
b : numpy array
Vector b
x : numpy array
Initial guess for x
mu : float or None, optional, Default is None
Lower bound for smallest eigenvalue of A
If mu is supplied the Gauss-Radau error bound [1] is computed
If mu is None, the Gauss-Radau approximation [2] is computed
tol : float or None, optional, Default is 1e-6
Convergence tolerance.
If None then program iterates until maximum iterations
max_it : int, optional, default is size of A
Maximum iteration count
delay : int, optional, default is 5
Delay for computing error estimates
reorth : bool, optional, default is False
Whether to reorthogonalize
NormA : float or None, optional, default is None
2-norm of A
If supplied, residual is ||r||/(||A|| ||x_m||)
If not, residual is ||r||/||b||
xTrue : numpy array or None, optional, default is None
True solution of linear system
Returns
-------
x : numpy array
Approximate solution
info : dict
Dictionary containing convergence information
Dictionary keys always returned:
'res' : Residual history
'sExp' : Expected value of S statistic at each iteration
'sSD' : Standard deviation of S statistic at each iteration
'GRApprox' : Gauss Radau approximation from [2]
Dictionary keys returned if mu is supplied:
'GaussRadau' : Gauss Radau bound computed with CGQ [1]
Additional keys if xTrue is supplied
'err' : Error history
'actual_res' : Actual residual, b-Ax, history
(as opposed to recursively computed residual)
References
----------
[1] Meurant, G and Tichy, P. "On computing quadrature-based bounds
for the A-norm of the error in conjugate gradients"
DOI = 10.1007/s11075-012-9591-9
[2] Meurant, G and Tichy, P. "Approximating the extreme Ritz values
and upper bounds for the A-norm of the error in CG"
DOI = 10.1007/s11075-018-0634-8
'''
#
#Here we define the variables
#
#Size of the system
N = len(x)
#Default Maximum Iterations
if max_it is None:
max_it = N
else:
max_it = max_it + delay
#Residual and first search direction
r = b - A(x)
s = np.copy(r)
# Residual Norm
rIP = np.zeros(max_it+1)
rIP[0] = np.inner(r,r)
rNorm = np.sqrt(rIP[0])
if reorth:
r_hist = np.zeros((N,max_it+1))
r_hist[:,0] = r/rNorm
# Calculate first search direction length
As = A(s)
sIP = np.abs(np.inner(s,As))
gamma = np.zeros(max_it+1)
gamma[0] = rIP[0]/sIP
# Gauss-Radau values
g = np.zeros(max_it+1)
if mu is not None:
# CGQ
Emu = np.zeros(max_it+1)
gmu = np.zeros(max_it+1)
gmu[0] = rIP[0]/mu
# G-R approximation by estimating Ritz value
Emu_approx = np.zeros(max_it+1)
rho = gamma[0]
tau = gamma[0]
sigma = 0
t = 0
c = 0
phi = np.zeros(max_it+1)
phi[0] = 1
Emu[0] = gamma[0]*rIP[0]
# S Stat Values
SExp = np.zeros(max_it+1)
SSD = np.zeros(max_it+1)
# Convergence Values
res = np.zeros(max_it+1)
if (NormA is None) or (xTrue is None):
bNorm = linalg.norm(b)
res[0] = rNorm/bNorm
if xTrue is not None:
xNorm = linalg.norm(xTrue)
err_hist = np.zeros(max_it+1)
err_hist[0] = np.inner(x-xTrue,A(x-xTrue))
if NormA is not None:
xNormANorm = linalg.norm(xTrue)*NormA
res[0] = rNorm/xNormANorm
res2 = np.copy(res)
i = 0
#
#Iterating Through Conjugate Gradient
#
while i < max_it or i < delay:
x = x+gamma[i]*s
#Calculate New Residual
r = r - gamma[i]*As
if reorth:
# Reorthogonalize Residual
r = r - r_hist[:,:i+1]@(r_hist[:,:i+1].T@r)
r = r - r_hist[:,:i+1]@(r_hist[:,:i+1].T@r)
# Compute Residual Norms
rIP[i+1] = np.inner(r,r)
rNorm = np.sqrt(rIP[i+1])
if xTrue is not None:
err_hist[i+1] = np.inner(x-xTrue,A(x-xTrue))
rTrueNorm = linalg.norm(b-A(x))
if NormA is not None:
res[i+1] = rNorm/xNormANorm
res2[i+1] = rTrueNorm/xNormANorm
else:
res2[i+1] = rTrueNorm/bNorm
if NormA is None:
res[i+1] = rNorm/bNorm
elif xTrue is None:
res[i+1] = rNorm/NormA/linalg.norm(x)
# Store residual vector if reorthogonalizing
if reorth:
r_hist[:,i+1] = r/rNorm
#Calculate next search direction
delta = rIP[i+1]/rIP[i]
s = r+delta*s
#Calculate next search direction length
As = A(s)
sIP = np.inner(s,As)
gamma[i+1] = rIP[i+1]/sIP
# Update Gauss-Radau values
g[i] = gamma[i]*rIP[i]
if mu is not None:
# CGQ
Deltamu = gmu[i]-g[i]
gmu[i+1] = rIP[i+1]*(Deltamu/(mu*Deltamu+rIP[i+1]))
# Ritz Value Estimate Variables
sigma = -1.0*np.sqrt(gamma[i+1]*delta\
/gamma[i])*(t*sigma+c*tau)
tau = gamma[i+1]*(delta*tau/gamma[i]+1)
chi = np.sqrt((rho-tau)**2+4*sigma**2)
c2 = 0.5*(1-(rho-tau)/chi)
rho = rho + chi*c2
t = np.sqrt(1-c2)
c = np.sqrt(np.abs(c))*np.sign(sigma)
phi[i+1] = phi[i]/(phi[i]+delta) # phi is Ritz value
# 1 based indexing for Error Estimates
i = i+1
# Update Error Bound and S Stat Values
if i >= delay:
ila = i - delay
# S Statistic
SExp[ila] = np.sum(g[ila:i])
SSD[ila] = np.sqrt(2*np.sum(g[ila:i]**2))
if mu is not None:
# CGQ
Emu[ila] = SExp[ila]+gmu[i]
# Gauss-Radau approximation
Emu_approx[ila] = phi[ila]*rho*rIP[ila]
# Evaluate convergence condition
if tol is not None and i >= delay:
if res[i] < tol:
break
#
#Return the results
#
info = {'res':res[:ila+1]}
info['sExp'] = SExp[:ila+1]
info['sSD'] = SSD[:ila+1]
info['GRApprox'] = Emu_approx[:ila+1]
if mu is not None:
info['GaussRadau'] = Emu[:ila+1]
if xTrue is not None:
info['err'] = err_hist[:ila+1]
info['actual_res'] = res2[:ila+1]
return x, info
|
import argparse
import datetime
import os
import pickle
import sys
import pandas as pd
from sklearn.linear_model import LinearRegression
sys.path.append('..')
from import_data import azure_repos
def save_model(model, filename):
with open(filename, 'wb') as fd:
pickle.dump(model, fd)
def load_model(filename):
with open(filename, 'rb') as fd:
return pickle.load(fd)
def train_model(pull_requests):
X = pd.DataFrame()
X['num_files_changed'] = pull_requests['num_files_changed'].fillna(0)
# Assigned reviewers hack
X['assigned_reviewers'] = pull_requests['num_reviewers'].apply(lambda x: x / 2)
model = LinearRegression()
model.fit(X=X, y=pull_requests['ttl'].apply(lambda td: td.days * 24 + td.seconds / 3600))
return model
def display_timedelta(td):
days = td.days
hours = td.seconds // 3600
return f"{days} day{'' if days == 1 else 's'}, {hours} hour{'' if hours == 1 else 's'}"
help = f'Usage: python {os.path.basename(__file__)} num_files_changed num_reviewers'
if __name__ == '__main__':
if len(sys.argv) != 3:
print(help)
exit(1)
try:
num_files = int(sys.argv[1])
num_reviewers = int(sys.argv[2])
except ValueError:
print(help)
exit(1)
# The directory of this script file
this_dir = os.path.dirname(os.path.realpath(__file__))
model_filename = os.path.basename(__file__).rstrip('.py') + '.model'
model_path = os.path.join(this_dir, model_filename)
if os.path.exists(model_path):
model = load_model(model_path)
else:
pull_requests = azure_repos.load_data('../../data/pull-requests.json')
model = train_model(pull_requests)
save_model(model, model_path)
estimate = model.predict([[num_files, num_reviewers]])[0]
td = datetime.timedelta(hours=estimate)
print(f"Estimated time for {num_files} files changed and {num_reviewers} reviewers: {display_timedelta(td)}")
|
import tensorflow as tf
x_data = [[1., 1., 1., 1., 1.],
[1., 0., 3., 0., 5.],
[0., 2., 0., 4., 0.]]
y_data = [1, 2, 3, 4, 5]
W = tf.Variable(tf.random_uniform([1, 3], -1, 1))
hypothesis = tf.matmul(W, x_data)
cost = tf.reduce_mean(tf.square(hypothesis - y_data))
learn_rate = 0.1
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learn_rate)
train = optimizer.minimize(cost)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
for step in range(2001):
sess.run(train)
if step % 20 == 0:
print(step, sess.run(cost), sess.run(W))
|
from django.db import models
from hknweb.academics.models.base_models import AcademicEntity
class Rating(AcademicEntity):
# reference attributes
rating_question = models.ForeignKey(
"Question", on_delete=models.PROTECT, related_name="rating_question"
)
rating_survey = models.ForeignKey(
"Survey", on_delete=models.PROTECT, related_name="rating_survey"
)
# value attributes
question_text = models.TextField(max_length=500)
inverted = models.BooleanField(default=False)
range_max = models.IntegerField(default=7)
rating_value = models.FloatField()
|
from mldesigner import command_component
from azure.ai.ml import TensorFlowDistribution
distribution = TensorFlowDistribution()
distribution.worker_count = 2
@command_component(distribution=distribution)
def basic_component(
port1: str,
param1: int,
):
""" module run logic goes here """
return port1
|
import __builtin__
# Dummy _() function instead of actual one from gettext.
__builtin__._ = lambda x: x
|
"""
Helper classes for creating maps in any Source Engine game that uses dod.fgd.
This file was auto-generated by import_fgd.py on 2020-01-19 09:11:10.324911.
"""
from vmflib2.vmf import *
class DodBombDispenser(Entity):
"""
Auto-generated from dod.fgd, line 255.
Bomb Dispenser Area
"""
def __init__(self, vmf_map: "ValveMap", StartDisabled=0, targetname: str="", dispense_team=0):
Entity.__init__(self, "dod_bomb_dispenser", vmf_map)
# Start Disabled :
self.StartDisabled = StartDisabled
# Name : The name that other entities refer to this entity by.
self.targetname: str = targetname
# Team to give bombs to :
self.dispense_team = dispense_team
self.auto_properties.extend(["StartDisabled", "targetname", "dispense_team"])
class DodBombTarget(Entity):
"""
Auto-generated from dod.fgd, line 236.
Bomb Target
"""
def __init__(self, vmf_map: "ValveMap", origin: "Origin"="0 0 0", StartDisabled=0, angles: "Origin"="0 0 0", targetname: str="", target_control_point: str="", bombing_team=2, add_timer_seconds: int=0):
Entity.__init__(self, "dod_bomb_target", vmf_map)
# Origin : This entity's location in 3D space.
self.origin: "Origin" = origin
# Start Disabled :
self.StartDisabled = StartDisabled
# Pitch Yaw Roll (Y Z X) : This entity's orientation in the world. Pitch is rotation around the Y axis,
self.angles: "Origin" = angles
# Name : The name that other entities refer to this entity by.
self.targetname: str = targetname
# Target CP :
self.target_control_point: str = target_control_point
# Bombing Team :
self.bombing_team = bombing_team
# Add To Timer (sec) :
self.add_timer_seconds: int = add_timer_seconds
self.auto_properties.extend(["origin", "StartDisabled", "angles", "targetname", "target_control_point", "bombing_team", "add_timer_seconds"])
class DodCaptureArea(Entity):
"""
Auto-generated from dod.fgd, line 202.
Capture Area
"""
def __init__(self, vmf_map: "ValveMap", StartDisabled=0, targetname: str="", area_allies_cancap=1, area_axis_cancap=1, area_allies_numcap: int=1, area_axis_numcap: int=1, area_time_to_cap: int=5, area_cap_point: str=""):
Entity.__init__(self, "dod_capture_area", vmf_map)
# Start Disabled :
self.StartDisabled = StartDisabled
# Name : The name that other entities refer to this entity by.
self.targetname: str = targetname
# Can Allies Cap? :
self.area_allies_cancap = area_allies_cancap
# Can Axis Cap? :
self.area_axis_cancap = area_axis_cancap
# Number of Allies to cap :
self.area_allies_numcap: int = area_allies_numcap
# Number of Axis to cap :
self.area_axis_numcap: int = area_axis_numcap
# Time to cap (sec) :
self.area_time_to_cap: int = area_time_to_cap
# Name of the control point this area is linked to :
self.area_cap_point: str = area_cap_point
self.auto_properties.extend(["StartDisabled", "targetname", "area_allies_cancap", "area_axis_cancap", "area_allies_numcap", "area_axis_numcap", "area_time_to_cap", "area_cap_point"])
class DodControlPoint(Entity):
"""
Auto-generated from dod.fgd, line 126.
Control Point
"""
def __init__(self, vmf_map: "ValveMap", origin: "Origin"="0 0 0", StartDisabled=0, targetname: str="", angles: "Origin"="0 0 0", spawnflags="", point_printname: str="TODO", point_timedpoints_allies: int=0, point_timedpoints_axis: int=0, point_default_owner=0, point_axis_capsound: str="", point_allies_capsound: str="", point_resetsound: str="", point_allies_model: str="models/mapmodels/flags.mdl", point_allies_model_bodygroup: int=1, point_axis_model: str="models/mapmodels/flags.mdl", point_axis_model_bodygroup: int=0, point_reset_model: str="models/mapmodels/flags.mdl", point_reset_model_bodygroup: int=3, point_group: int=0, point_index: int=0, point_hud_icon_neutral: str="sprites/obj_icons/icon_obj_neutral", point_hud_icon_axis: str="sprites/obj_icons/icon_obj_axis", point_hud_icon_allies: str="sprites/obj_icons/icon_obj_allies", point_hud_icon_timercap: str="sprites/obj_icons/icon_obj_neutral", point_hud_icon_bombed: str="sprites/obj_icons/icon_obj_neutral", point_num_bombs=0):
Entity.__init__(self, "dod_control_point", vmf_map)
# Origin : This entity's location in 3D space.
self.origin: "Origin" = origin
# Start Disabled :
self.StartDisabled = StartDisabled
# Name : The name that other entities refer to this entity by.
self.targetname: str = targetname
# Pitch Yaw Roll (Y Z X) : This entity's orientation in the world. Pitch is rotation around the Y axis,
self.angles: "Origin" = angles
# TODO: Replace this filler. :
self.spawnflags = spawnflags
# LOCALIZED name to print on the hud :
self.point_printname: str = point_printname
# Time based point value for Allies :
self.point_timedpoints_allies: int = point_timedpoints_allies
# Time based point value for Axis :
self.point_timedpoints_axis: int = point_timedpoints_axis
# Default Owner of the control point :
self.point_default_owner = point_default_owner
# Sound Made when Axis captures :
self.point_axis_capsound: str = point_axis_capsound
# Sound Made when Allies captures :
self.point_allies_capsound: str = point_allies_capsound
# Sound Made when point resets :
self.point_resetsound: str = point_resetsound
# Allies Model : Model when Allies own point
self.point_allies_model: str = point_allies_model
# Allies model bodygroup :
self.point_allies_model_bodygroup: int = point_allies_model_bodygroup
# Axis Model : Model when Axis own point
self.point_axis_model: str = point_axis_model
# Axis model bodygroup :
self.point_axis_model_bodygroup: int = point_axis_model_bodygroup
# Reset Model : Model when point reset
self.point_reset_model: str = point_reset_model
# Reset model bodygroup :
self.point_reset_model_bodygroup: int = point_reset_model_bodygroup
# Group Index :
self.point_group: int = point_group
# Index of this point ( unique ) :
self.point_index: int = point_index
# Hud icon material when noone owns point :
self.point_hud_icon_neutral: str = point_hud_icon_neutral
# Hud icon material when Axis own point :
self.point_hud_icon_axis: str = point_hud_icon_axis
# Hud icon material when Allies own point :
self.point_hud_icon_allies: str = point_hud_icon_allies
# Hud icon - bomb planted :
self.point_hud_icon_timercap: str = point_hud_icon_timercap
# Hud icon - point destroyed :
self.point_hud_icon_bombed: str = point_hud_icon_bombed
# Number of Bombs required to destroy :
self.point_num_bombs = point_num_bombs
self.auto_properties.extend(["origin", "StartDisabled", "targetname", "angles", "spawnflags", "point_printname", "point_timedpoints_allies", "point_timedpoints_axis", "point_default_owner", "point_axis_capsound", "point_allies_capsound", "point_resetsound", "point_allies_model", "point_allies_model_bodygroup", "point_axis_model", "point_axis_model_bodygroup", "point_reset_model", "point_reset_model_bodygroup", "point_group", "point_index", "point_hud_icon_neutral", "point_hud_icon_axis", "point_hud_icon_allies", "point_hud_icon_timercap", "point_hud_icon_bombed", "point_num_bombs"])
class DodControlPointMaster(Entity):
"""
Auto-generated from dod.fgd, line 98.
Control Point Master
"""
def __init__(self, vmf_map: "ValveMap", origin: "Origin"="0 0 0", targetname: str="", StartDisabled=0, cpm_use_timer=0, cpm_timer_length: int=300, cpm_timer_team=0):
Entity.__init__(self, "dod_control_point_master", vmf_map)
# Origin : This entity's location in 3D space.
self.origin: "Origin" = origin
# Name : The name that other entities refer to this entity by.
self.targetname: str = targetname
# Start Disabled :
self.StartDisabled = StartDisabled
# Use round timer? :
self.cpm_use_timer = cpm_use_timer
# Round timer length in seconds :
self.cpm_timer_length: int = cpm_timer_length
# Which team wins when timer expires :
self.cpm_timer_team = cpm_timer_team
self.auto_properties.extend(["origin", "targetname", "StartDisabled", "cpm_use_timer", "cpm_timer_length", "cpm_timer_team"])
class DodLocation(Entity):
"""
Auto-generated from dod.fgd, line 121.
Location
"""
def __init__(self, vmf_map: "ValveMap", origin: "Origin"="0 0 0", targetname: str="", location_name: str=""):
Entity.__init__(self, "dod_location", vmf_map)
# Origin : This entity's location in 3D space.
self.origin: "Origin" = origin
# Name : The name that other entities refer to this entity by.
self.targetname: str = targetname
# Name of this location :
self.location_name: str = location_name
self.auto_properties.extend(["origin", "targetname", "location_name"])
class DodScoring(Entity):
"""
Auto-generated from dod.fgd, line 87.
Custom Scoring
"""
def __init__(self, vmf_map: "ValveMap", origin: "Origin"="0 0 0", targetname: str="", TeamNum=0, point_give_delay: int=60, point_give_amount: int=1, point_give_max_times: int=10):
Entity.__init__(self, "dod_scoring", vmf_map)
# Origin : This entity's location in 3D space.
self.origin: "Origin" = origin
# Name : The name that other entities refer to this entity by.
self.targetname: str = targetname
# Team Number (int) :
self.TeamNum = TeamNum
# Seconds between point gives :
self.point_give_delay: int = point_give_delay
# Points to give each time :
self.point_give_amount: int = point_give_amount
# Max number of times to give points :
self.point_give_max_times: int = point_give_max_times
self.auto_properties.extend(["origin", "targetname", "TeamNum", "point_give_delay", "point_give_amount", "point_give_max_times"])
class FilterActivatorTeam(Entity):
"""
Auto-generated from dod.fgd, line 31.
A filter that filters by the team of the activator.
"""
def __init__(self, vmf_map: "ValveMap", origin: "Origin"="0 0 0", targetname: str="", Negated="Allow entities that match criteria", filterteam=2):
Entity.__init__(self, "filter_activator_team", vmf_map)
# Origin : This entity's location in 3D space.
self.origin: "Origin" = origin
# Name : The name that other entities refer to this entity by.
self.targetname: str = targetname
# Filter mode : If set to Allow, only entities who match the criteria will pass the filter.
self.Negated = Negated
# Filter Team Number : The team number to filter by. If the filter mode is Allow, only entities whose
self.filterteam = filterteam
self.auto_properties.extend(["origin", "targetname", "Negated", "filterteam"])
class FuncLadder(Entity):
"""
Auto-generated from dod.fgd, line 195.
Ladder. Players will be able to freely along this brush, as if it was a ladder. If you are using a model prop for the visual representation of the ladder in the map, apply the toolsinvisibleladder material to the func_ladder brush.
"""
def __init__(self, vmf_map: "ValveMap"):
Entity.__init__(self, "func_ladder", vmf_map)
class FuncTeamWall(Entity):
"""
Auto-generated from dod.fgd, line 265.
Team Blocker Wall
"""
def __init__(self, vmf_map: "ValveMap", blockteam=0):
Entity.__init__(self, "func_team_wall", vmf_map)
# Team to block :
self.blockteam = blockteam
self.auto_properties.extend(["blockteam"])
class FuncTeamblocker(Entity):
"""
Auto-generated from dod.fgd, line 275.
Walls that players of a certain team are unable to pass through
"""
def __init__(self, vmf_map: "ValveMap", targetname: str="", parentname: str="", TeamNum=0, spawnflags=""):
Entity.__init__(self, "func_teamblocker", vmf_map)
# Name : The name that other entities refer to this entity by.
self.targetname: str = targetname
# Parent : The name of this entity's parent in the movement hierarchy. Entities with parents move with their parent.
self.parentname: str = parentname
# Team Number (int) :
self.TeamNum = TeamNum
# TODO: Replace this filler. :
self.spawnflags = spawnflags
self.auto_properties.extend(["targetname", "parentname", "TeamNum", "spawnflags"])
class InfoDoddetect(Entity):
"""
Auto-generated from dod.fgd, line 59.
DoD Gamerules
"""
def __init__(self, vmf_map: "ValveMap", origin: "Origin"="0 0 0", StartDisabled=0, detect_allies_respawnfactor: str="1.0", detect_axis_respawnfactor: str="1.0", detect_allies_startroundvoice=0, detect_axis_startroundvoice=0):
Entity.__init__(self, "info_doddetect", vmf_map)
# Origin : This entity's location in 3D space.
self.origin: "Origin" = origin
# Start Disabled :
self.StartDisabled = StartDisabled
# Allies Respawn delay factor :
self.detect_allies_respawnfactor: str = detect_allies_respawnfactor
# Axis Respawn delay factor :
self.detect_axis_respawnfactor: str = detect_axis_respawnfactor
# Start round command for Allies :
self.detect_allies_startroundvoice = detect_allies_startroundvoice
# Start round command for Axis :
self.detect_axis_startroundvoice = detect_axis_startroundvoice
self.auto_properties.extend(["origin", "StartDisabled", "detect_allies_respawnfactor", "detect_axis_respawnfactor", "detect_allies_startroundvoice", "detect_axis_startroundvoice"])
class InfoPlayerAllies(Entity):
"""
Auto-generated from dod.fgd, line 49.
This entity marks the start point for Allied players.
"""
def __init__(self, vmf_map: "ValveMap", origin: "Origin"="0 0 0", angles: "Origin"="0 0 0", targetname: str="", StartDisabled=0):
Entity.__init__(self, "info_player_allies", vmf_map)
# Origin : This entity's location in 3D space.
self.origin: "Origin" = origin
# Pitch Yaw Roll (Y Z X) : This entity's orientation in the world. Pitch is rotation around the Y axis,
self.angles: "Origin" = angles
# Name : The name that other entities refer to this entity by.
self.targetname: str = targetname
# Start Disabled :
self.StartDisabled = StartDisabled
self.auto_properties.extend(["origin", "angles", "targetname", "StartDisabled"])
class InfoPlayerAxis(Entity):
"""
Auto-generated from dod.fgd, line 54.
This entity marks the start point for Axis players.
"""
def __init__(self, vmf_map: "ValveMap", origin: "Origin"="0 0 0", angles: "Origin"="0 0 0", targetname: str="", StartDisabled=0):
Entity.__init__(self, "info_player_axis", vmf_map)
# Origin : This entity's location in 3D space.
self.origin: "Origin" = origin
# Pitch Yaw Roll (Y Z X) : This entity's orientation in the world. Pitch is rotation around the Y axis,
self.angles: "Origin" = angles
# Name : The name that other entities refer to this entity by.
self.targetname: str = targetname
# Start Disabled :
self.StartDisabled = StartDisabled
self.auto_properties.extend(["origin", "angles", "targetname", "StartDisabled"])
|
import torch
import numpy as np
import torchvision.transforms as trans
import math
from scipy.fftpack import dct, idct
from torch.distributions import Beta
# mean and std for different datasets
IMAGENET_SIZE = 224
IMAGENET_MEAN = [0.485, 0.456, 0.406]
IMAGENET_STD = [0.229, 0.224, 0.225]
IMAGENET_TRANSFORM = trans.Compose([
trans.Scale(256),
trans.CenterCrop(224),
trans.ToTensor()])
INCEPTION_SIZE = 299
INCEPTION_TRANSFORM = trans.Compose([
trans.Scale(342),
trans.CenterCrop(299),
trans.ToTensor()])
CIFAR_SIZE = 32
CIFAR_MEAN = [x / 255 for x in [125.3, 123.0, 113.9]]# [0.4914, 0.4822, 0.4465]
CIFAR_STD = [x / 255 for x in [63.0, 62.1, 66.7]]#[0.2023, 0.1994, 0.2010]
CIFAR_TRANSFORM = trans.Compose([
trans.ToTensor()])
MNIST_SIZE = 28
MNIST_MEAN = [0.5]
MNIST_STD = [1.0]
MNIST_TRANSFORM = trans.Compose([
trans.ToTensor()])
# reverses the normalization transformation
def invert_normalization(imgs, dataset):
if dataset == 'imagenet':
mean = IMAGENET_MEAN
std = IMAGENET_STD
elif dataset == 'cifar':
mean = CIFAR_MEAN
std = CIFAR_STD
elif dataset == 'mnist':
mean = MNIST_MEAN
std = MNIST_STD
imgs_trans = imgs.clone()
if len(imgs.size()) == 3:
for i in range(imgs.size(0)):
imgs_trans[i, :, :] = imgs_trans[i, :, :] * std[i] + mean[i]
else:
for i in range(imgs.size(1)):
imgs_trans[:, i, :, :] = imgs_trans[:, i, :, :] * std[i] + mean[i]
return imgs_trans
# applies the normalization transformations
def apply_unnormalization(imgs, dataset):
if dataset == 'imagenet':
mean = IMAGENET_MEAN
std = IMAGENET_STD
elif dataset == 'cifar':
mean = CIFAR_MEAN
std = CIFAR_STD
elif dataset == 'mnist':
mean = MNIST_MEAN
std = MNIST_STD
else:
mean = [0, 0, 0]
std = [1, 1, 1]
imgs_tensor = imgs.clone()
if dataset == 'mnist':
imgs_tensor = (imgs_tensor - mean[0]) / std[0]
else:
if imgs.dim() == 3:
for i in range(imgs_tensor.size(0)):
imgs_tensor[i, :, :] = imgs_tensor[i, :, :] * std[i]+ mean[i]
else:
for i in range(imgs_tensor.size(1)):
imgs_tensor[:, i, :, :] = imgs_tensor[:, i, :, :]* std[i]+ mean[i]
return imgs_tensor
# applies the normalization transformations
def apply_normalization(imgs, dataset):
if dataset == 'imagenet':
mean = IMAGENET_MEAN
std = IMAGENET_STD
elif dataset == 'cifar':
mean = CIFAR_MEAN
std = CIFAR_STD
elif dataset == 'mnist':
mean = MNIST_MEAN
std = MNIST_STD
else:
mean = [0, 0, 0]
std = [1, 1, 1]
imgs_tensor = imgs.clone()
if dataset == 'mnist':
imgs_tensor = (imgs_tensor - mean[0]) / std[0]
else:
if imgs.dim() == 3:
for i in range(imgs_tensor.size(0)):
imgs_tensor[i, :, :] = (imgs_tensor[i, :, :] - mean[i]) / std[i]
else:
for i in range(imgs_tensor.size(1)):
imgs_tensor[:, i, :, :] = (imgs_tensor[:, i, :, :] - mean[i]) / std[i]
return imgs_tensor
|
from config import DB_URI
uri = DB_URI
if uri.startswith("postgres://"):
uri = uri.replace("postgres://", "postgresql://", 1)
# rest of connection code using the connection string `uri`
|
from athena import gpu_ops as ad
from athena import initializers as init
def logreg(x, y_):
'''
Logistic Regression model, for MNIST dataset.
Parameters:
x: Variable(athena.gpu_ops.Node.Node), shape (N, dims)
y_: Variable(athena.gpu_ops.Node.Node), shape (N, num_classes)
Return:
loss: Variable(athena.gpu_ops.Node.Node), shape (1,)
y: Variable(athena.gpu_ops.Node.Node), shape (N, num_classes)
'''
print("Build logistic regression model...")
weight = init.zeros((784, 10), name='logreg_weight')
bias = init.zeros((10,), name='logreg_bias')
x = ad.matmul_op(x, weight)
y = x + ad.broadcastto_op(bias, x)
loss = ad.softmaxcrossentropy_op(y, y_)
loss = ad.reduce_mean_op(loss, [0])
return loss, y
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
The sentence-meter takes TEI documents, removes unnecessary elements from it,
divides the plain text into sentences, and outputs a graph.
By Andreas Dittrich, 2017
"""
import re,glob,textwrap,statistics,numpy
from bs4 import BeautifulSoup
import matplotlib.pyplot as plt
def splitter(text):
""" split text in sentences """
vorpunkt = "(?<!\d\.)(?<!\b\w\.)(?<!Mag\.)(?<!Dr\.)(?<!M[rs]\.)(?<!Mrs\.)(?<!usw\.)(?<!etc\.)(?<!bzw\.)(?<!usf\.)(?<!z\.B\.)(?<!\b[SsPp]\.)(?<!\bca\.)(?<!\bsen\.)(?<!Sep\.)(?<!Sept\.)(?<!Nr\.)(?<!\bmin\.)(?<!\bmind\.)(?<!\bmax\.)"
s_regex = vorpunkt + "(?<=…|\!|\?|\:|\;|\.|\n|»|«|›|‹) +(?![a-zöäü])"
sentences = re.split(s_regex,text)
return sentences
def printlongest(sentences):
longest= max(sentences, key=len)
print( "~" * 57)
print( "Longest sentence ("+str(len(longest.split()))+" words):" )
print( textwrap.fill( longest , width=57, subsequent_indent=" ") )
print( "~" * 57)
def printgraph(sentences,filename):
""" print graph """
x=range(0,len(sentences))
y=[len(s.split()) for s in sentences]
plt.title(re.split("/",filename)[-1])
plt.ylabel("words")
# plt.plot(y) # line plot (faster)
plt.bar(x,y) # bar plot
plt.show()
######################################################################
## MAIN
# filename=sorted(glob.glob('*.xml'))
filename=sorted(glob.glob('/home/dia/Dokumente/Studium/TextKopien/Corpus_Bernhard/*.xml'))
filecount=len(filename)
counter=1
for file in filename:
with open(file, "r", encoding="utf-8") as fh:
textinput=re.sub("(\r)?\n","", fh.read())
textinput=re.sub('<lb break="no"/>',"", textinput)
textinput=re.sub('<lb/>'," ", textinput)
soup=BeautifulSoup(textinput, "lxml-xml")
for div in soup.body.find_all("div", type=True):
for match in div.find_all("fw"):
match.decompose() # remove fw-tags
for match in div.find_all("pb"):
match.decompose() # remove pb-tags
for match in div.find_all("speaker"):
match.decompose()
for match in div.find_all("stage"):
match.decompose()
for match in div.find_all("figure"):
match.decompose() # remove figure-tags (paragraphs in it are comments)
for match in div.find_all("note"):
match.decompose() # remove note-tags (footnotes etc)
for match in div.find_all("lb"): # remove lb-tags
try:
if match["break"]:
match.unwrap()
except:
match.string=" "
match.unwrap()
head=div.find_next("head").get_text()
text=div.get_text()
text=re.sub("(\r)?\n+"," ",text)
text=re.sub(" +"," ",text)
sentences=splitter( text )
sentencelengths=list()
for s in sentences:
sentencelengths.append(len(s))
# statistics:
satzanzahl =str( len(sentencelengths) )
modelength =str( statistics.median(sentencelengths) )
longestlength=str( max(sentencelengths) )
optitle=re.sub("\W","_", head)
plt.close('all')
# plot
plt.bar( range(0,len(sentencelengths)), sentencelengths , edgecolor='blue', color='blue')
plt.title(head)
plt.suptitle( "Anzahl der Sätze: "+satzanzahl+", mediane Satzlänge: "+modelength+", Längster Satz: "+longestlength )
plt.xlabel("sentence")
plt.ylabel("characters")
plt.savefig(str(counter)+"_"+optitle + '.pdf')
# plt.show()
print("Done "+ file)
counter+=1
print("Done!")
|
from collections import Counter
import copy
import hashlib
import json
from urllib.parse import quote, unquote, urlparse
from arrow import now
from base64 import b64encode
from boto3 import Session as BotoSession
from botocore.client import Config as BotoConfig
from clarifai.rest import ClarifaiApp
from iiif.request import IIIFRequest
from pysolr import Solr
from redis import Redis
from requests import get, head, post, put
from .celery import app
# NOTE: for documentation of the `message` and `config` arguments of each task,
# see the constructor of `mgap.mgap.MGAP`.
@app.task
def get_image_url(message, config):
'''Returns an image URL that conforms to the IIIF Image API.'''
# The last path component is a URL-encoded identifier.
iiif_image_server_base_url, url_encoded_identifier = message['iiif_image_info_url'].rsplit('/', maxsplit=1)
# The `baseurl` keyword argument must have a trailing slash.
image_api_request = IIIFRequest(baseurl=iiif_image_server_base_url + '/')
image_api_params = {
**config['iiif']['image_api_default_params'],
'size': '640,',
'identifier': unquote(url_encoded_identifier)
}
return image_api_request.url(**image_api_params)
@app.task
def send_to_amazon_rekognition(image_url, config, message):
'''Sends an image to Amazon Rekognition.
Args:
image_url: The URL of the image to send to Amazon Rekognition.
Returns:
A dictionary containing the response payload from the computer vision
service, a vendor identifier, and a timestamp.
'''
amazon_rekognition_client = BotoSession(profile_name=config['aws']['profile_name']).client('rekognition')
timestamp = now('US/Pacific').isoformat()
amazon_rekognition_response = amazon_rekognition_client.detect_labels(Image={'Bytes': get(image_url).content})
return {
'results': amazon_rekognition_response,
'vendor': 'amazon_rekognition',
'timestamp': timestamp
}
@app.task
def send_to_clarifai(image_url, config, message):
'''Sends an image to Clarifai.
Args:
image_url: The URL of the image to send to Clarifai.
Returns:
A dictionary containing the response payload from the computer vision
service, a vendor identifier, and a timestamp.
'''
clarifai_client = ClarifaiApp(api_key=config['clarifai']['api_key'])
clarifai_model = clarifai_client.public_models.general_model
timestamp = now('US/Pacific').isoformat()
clarifai_response = clarifai_model.predict_by_url(url=image_url)
return {
'results': clarifai_response,
'vendor': 'clarifai',
'timestamp': timestamp
}
@app.task
def send_to_google_vision(image_url, config, message):
'''Sends an image to Google Vision service.
Args:
image_url: The URL of the image to send to Google.
Returns:
A dictionary containing the response payload from the computer vision
service, a vendor identifier, and a timestamp.
'''
ENDPOINT_URL = 'https://vision.googleapis.com/v1/images:annotate'
api_key = config['google_vision']['api_key']
img_request = []
ctxt = b64encode(get(image_url).content).decode()
img_request.append({
'image': {'content': ctxt},
'features': [{
'type': 'OBJECT_LOCALIZATION',
'maxResults': 50
}]
})
json_data = json.dumps({"requests": img_request }).encode()
timestamp = now('US/Pacific').isoformat()
google_response = post(ENDPOINT_URL,
data=json_data,
params={'key': api_key},
headers={'Content-Type': 'application/json'})
google_json = google_response.json()['responses']
return {
'results': google_json,
'vendor': 'google_vision',
'timestamp': timestamp
}
@app.task
def save_to_redis(computer_vision_results, config, message):
'''Saves computer vision results to Redis as JSON.
Args:
computer_vision_results: A dictionary containing the response payload
from the computer vision service, a vendor identifier, and a
timestamp.
Returns:
The key under which the results were stored in Redis.
'''
# TODO: don't instantiate a Redis client for every task
redis_instance = Redis(
host=config['redis']['host'],
port=config['redis']['port'],
db=config['redis']['db']['computer_vision_results']
)
redis_key = message['iiif_image_info_url'] + '-' + computer_vision_results['vendor']
redis_value = json.dumps({**message, **computer_vision_results})
redis_instance.set(redis_key, redis_value)
return redis_key
@app.task
def collect_computer_vision_results(redis_keys, config, message):
'''
Reads computer vision results from Redis, deserializes them, and stores
them in a dictionary.
Args:
redis_keys: A list of Redis keys under which computer vision service
results for the current image are stored.
Returns:
A dictionary representation of the computer vision results.
'''
redis_instance = Redis(
host=config['redis']['host'],
port=config['redis']['port'],
db=config['redis']['db']['computer_vision_results']
)
return {redis_key: json.loads(redis_instance.get(redis_key)) for redis_key in redis_keys}
@app.task
def construct_annotation(computer_vision_results, config, message):
'''Constructs a dictionary representing a WebAnnotation.
Args:
computer_vision_results: A dictionary representation of the computer
vision results.
Returns:
A dictionary representing an annotation.
'''
# Start with base templates for the annotation and annotation body.
anno = config['web_annotation']['annotation_seed']
anno_body_seed = config['web_annotation']['annotation_body_seed']
# FIXME: set id properly
anno['id'] = 'FIXME'
anno['body'] = []
# Each annotation has multiple bodies, one for each CV service result.
for k, v in computer_vision_results.items():
anno_body = copy.deepcopy(anno_body_seed)
if v['vendor'] == 'amazon_rekognition':
image_tags = list(map(
lambda x: x['Name'],
v['results']['Labels']
))
cv_service_name = 'Amazon Rekognition'
cv_service_homepage = 'https://aws.amazon.com/rekognition'
elif v['vendor'] == 'clarifai':
image_tags = list(map(
lambda x: x['name'],
v['results']['outputs'][0]['data']['concepts']
))
cv_service_name = 'Clarifai Predict'
cv_service_homepage = 'https://www.clarifai.com/predict'
elif v['vendor'] == 'google_vision':
image_tags = list(map(
lambda x: x['name'],
v['results'][0]['localizedObjectAnnotations']
))
cv_service_name = 'Google Computer Vision'
cv_service_homepage = 'https://cloud.google.com/vision/'
anno_body['value'] = json.dumps(image_tags)
# Creator and generator is the same agent.
anno_body['creator']['name'] = anno_body['generator']['name'] = cv_service_name
anno_body['creator']['homepage'] = anno_body['generator']['homepage'] = cv_service_homepage
anno_body['created'] = anno_body['generated'] = v['timestamp']
# TODO: conditionally add modified field if annotation for current image already exists
anno['body'].append(anno_body)
anno['target']['source'] = message['iiif_image_info_url']
anno['target']['selector']['region'] = '640,'
anno['created'] = anno['generated'] = now('US/Pacific').isoformat()
return anno
@app.task
def save_to_elucidate(annotation, config, message):
'''Sends a request to Elucidate to create or update an annotation and its container.
Args:
annotation: A dictionary representation of a WebAnnotation.
Returns:
The URL of the annotation on Elucidate.
'''
elucidate_headers_seed = config['elucidate']['request_headers_seed']
elucidate_base_url = '{}:{}/annotation/{}/'.format(
config['elucidate']['host'],
config['elucidate']['port'],
config['elucidate']['annotation_model']
)
annotation_container_slug = hashlib.md5(message['item_ark'].encode("utf-8")).hexdigest()
annotation_container_url = '{}{}/'.format(
elucidate_base_url,
annotation_container_slug
)
annotation_container_response = get(annotation_container_url)
# If container doesn't exist for the ARK, create it.
if annotation_container_response.status_code != 200:
annotation_container = {
**config['web_annotation']['annotation_container_seed'],
'label': message['item_ark']
}
create_annotation_container_response = post(
elucidate_base_url,
headers={
**elucidate_headers_seed,
'Slug': annotation_container_slug
},
data=json.dumps(annotation_container, indent=4, sort_keys=True)
)
# Annotation couldn't have existed without a container, so create it.
create_annotation_response = post(
annotation_container_url,
headers=elucidate_headers_seed,
data=json.dumps(annotation, indent=4, sort_keys=True)
)
annotation_url = create_annotation_response.json().get('id')
else:
# Annotation container and annotation already exist, so update.
annotation_url = annotation_container_response.json()['first']['items'][0]['id']
# Extract the inner contents of the weak ETag (W/"...").
etag = head(annotation_url).headers['etag'][3:-1]
# FIXME: don't overwrite the annotation! Much of it might not have changed, so retain timestamps, etc.
update_annotation_response = put(
annotation_url,
headers={
**elucidate_headers_seed,
'Content-Type': 'application/ld+json;profile="http://www.w3.org/ns/anno.jsonld"',
'If-Match': etag
},
data=json.dumps(annotation, indent=4, sort_keys=True)
)
return annotation_url
@app.task
def save_to_blacklight_solr(computer_vision_results, config, message):
'''Creates or updates the tags field on a image's Solr doc in each index.
Args:
computer_vision_results: A dictionary representation of the computer
vision results.
Returns:
None
'''
def add_tags_to_solr_doc(index_id, document_id, tags):
'''Adds a list of tags to the Solr document.
Args:
index_id: The name of the Solr index.
document_id: The value of the id field of the Solr document to
update.
tags: A list of strings.
Returns:
None
'''
solr_client = Solr(config['solr']['indexes'][index_id], always_commit=True)
tags_field = config['solr']['tags_field']
copy_fields = config['solr']['copy_fields']
# Get the value of each field to copy to the new document.
src_fields = map(lambda x: x['src'], copy_fields)
src_field_values = solr_client.search('id:{}'.format(document_id), fl=list(src_fields)).docs[0]
# Only set the fields that are already set on the document.
existing_src_fields = src_field_values.keys()
# Add copy fields to the new Solr doc.
solr_doc = {
'id': document_id,
tags_field: tags,
**{copy_field['dst']: src_field_values[copy_field['src']] for copy_field in copy_fields if copy_field['src'] in existing_src_fields}
}
solr_client.add(
[solr_doc],
commitWithin='1000',
fieldUpdates={
tags_field: 'set',
**{copy_field['dst']: 'set' for copy_field in copy_fields if copy_field['src'] in existing_src_fields}
},
overwrite=True
)
# Get the Solr id by transforming the reversed item ARK.
solr_identifier = '-'.join(list(map(lambda x: x[::-1], message['item_ark'].split('/')))[1:][::-1])
# Build up a list of combined image
all_image_tags = []
for k, v in computer_vision_results.items():
# TODO: abstract this repeated code between here and `construct_annotation`
if v['vendor'] == 'amazon_rekognition':
image_tags = list(map(
lambda x: x['Name'],
v['results']['Labels']
))
elif v['vendor'] == 'clarifai':
image_tags = list(map(
lambda x: x['name'],
v['results']['outputs'][0]['data']['concepts']
))
elif v['vendor'] == 'google_vision':
image_tags = list(map(
lambda x: x['name'],
v['results'][0]['localizedObjectAnnotations']
))
# If we're pointing to a service-specific index, write to it.
index_name = v['vendor']
if index_name in config['solr']['indexes']:
add_tags_to_solr_doc(index_name, solr_identifier, image_tags)
all_image_tags += image_tags
# Write a combined list of tags to the combined index (all computer vision services).
index_name = 'combined'
if index_name in config['solr']['indexes']:
add_tags_to_solr_doc(index_name, solr_identifier, list(Counter(all_image_tags)))
|
import os
from flask import Flask, render_template, request, send_file, flash
from flask_uploads import UploadSet, configure_uploads, IMAGES
from mosaic import get_result_image
app = Flask(__name__)
photos = UploadSet('photos', IMAGES)
app.config['UPLOADED_PHOTOS_DEST'] = 'static/img'
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 1
app.secret_key = "super secret key"
configure_uploads(app, photos)
@app.route('/', methods=['GET'])
def index():
return render_template('index.html')
@app.route('/upload', methods=['POST'])
def upload():
filename = photos.save(request.files['photo'])
full_filename = os.path.join(app.config['UPLOADED_PHOTOS_DEST'], filename)
return send_file(get_result_image(full_filename), as_attachment=True)
if __name__ == '__main__':
app.run(debug=True)
|
from abc import ABC
from typing import TypeVar, Generic, List, Optional
from tudelft.utilities.listener.Listenable import Listenable
from uri.uri import URI
from geniusweb.references.Reference import Reference
INTYPE = TypeVar('INTYPE')
OUTTYPE = TypeVar('OUTTYPE')
class ConnectionEnd(Listenable[INTYPE], Generic[INTYPE,OUTTYPE]):
'''
One end of a general two-way connection. incoming data is reported through
the {@link Listenable} channel, sending events of the INTYPE.
<p>
The connection mechanism assumed here is fundamentally asymmetric. One side
is "Connectable", the other side is a ConnectionEnd created (usually from a
{@link Reference}. This matches the typical client-server system (web
architecture).
<p>
If an internal error occurs, eg a socket failure, timeout, or parser error, a
null event is sent into the Listenable. {@link #getError()} can be called to
find out about the error.
@param <INTYPE> the type of incoming messages (incoming for the user of this
connection end). Incoming messages are received through
Listener#not. Incoming messages are usually asynchronous.
@param <OUTTYPE> the type of outgoing messages (outgoing for the user of this
connection end). Outgoing messages can be sent directly with
#send.
'''
def send(self,data:OUTTYPE ):
'''
Send data out (and flush the output so that there are no excessive delays
in sending the data). This call is assumed to return immediately (never
block, eg on synchronized, Thread.sleep, IO, etc). When this is called
multiple times in sequence, the data should arrive at the receiver end in
the same order.
@param data the data to be sent.
@throws ConnectionError if the data failed to be sent.
'''
def getReference(self) -> Reference:
'''
@return Reference that was used to create this connection
'''
def getRemoteURI(self)->URI:
'''
@return the URI of the remote endpoint that makes up the connection. This
is a URI that uniquely identifies the remote object.
'''
def close(self):
'''
Close the connection. Should return immediately (not block). Before
really closing, this should attempt to send out possibly cached messages
before closing the connection.
'''
def getError(self) -> Optional[Exception]:
'''
@return the latest internal error, or null if no error occured. If the
channel is closed, this is set to {@link SocketException} "Socket
closed", even if the close was a "normal" termination eg when
{@link #close()} was called.
'''
|
#!/usr/bin/env python3
import json
import sys
from augmentation_instance import *
from util.instance_parser import *
from util.file_manager import *
from learning_task import *
if __name__ == '__main__':
if len(sys.argv) == 1:
params = json.load(open('params.json'))
else:
params = json.load(open(sys.argv[1]))
learning_data_filename = params['learning_data_filename']
augmentation_learning_data_filename = params['augmentation_learning_data_filename']
augmentation_instances = parse_augmentation_instances(learning_data_filename)
print('Done parsing instances')
learning_features = []
learning_targets = []
i = 0
for instance in augmentation_instances:
learning_features.append(instance.generate_features())
learning_targets.append(instance.compute_gain_in_r2_score())
i += 1
if (i % 100 == 0):
print(i)
dump_learning_instances(augmentation_learning_data_filename, learning_features, learning_targets)
print('done processing augmentation instances and creating data')
|
from PyQt4 import QtCore, QtGui
from gui import Ui_MainWindow
from ImageUpdate import ImageUpdate
from DataProcessing.Writer import Writer
from DataProcessing.Controller import Controller
from multiprocessing import Manager, Event, Queue
from subgui import Ui_Dialog
import sys
from subprocess import Popen, PIPE
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
class MainWindow(QtGui.QMainWindow):
"""
Main GUI class, creates the GUI, its buttons and output and ties them all
to the controller classes. The class inherits from a QT QMainwindow widget
which defines the specifications of the buttons, GUI and other aspects.
"""
def __init__(self):
"""
Creates the GUI itself and loads all of the sections needed for the
program to run properly.
"""
super(MainWindow, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.guiSetup()
self.controller = None
self.state = False
self.live = False
#Manager meant to manage the multiprocessed variables and prevents issues
self.manager = Manager()
self.file_available = self.manager.list()
self.play_pause_list = self.manager.list()
self.raw_images_ready, self.lane_images_ready, self.visual_images_ready = self.manager_event_creator()
self.raw_q, self.lane_q, self.visual_q = self.manager_queue_creator()
# Creates the image updaters which will update the image whenever the controller process sends a signal
self.raw_image_updater = ImageUpdate(self.raw_images_ready, self.raw_q, True, self)
self.lane_image_updater = ImageUpdate(self.lane_images_ready, self.lane_q, True, self)
self.visual_image_updater = ImageUpdate(self.visual_images_ready, self.visual_q, False, self)
self.image_updater()
# Creates the writer and the list that the time and dots will be placed in.
self.time_dots = []
self.writer = Writer(self.time_dots)
def guiSetup(self):
"""
Sets up additional necessary parts of the GUI that could not be set natively in Qt
"""
self.ui.playButton.setIcon(self.style().standardIcon(QtGui.QStyle.SP_MediaPlay))
self.setWindowTitle("Lane Detection")
def image_updater(self):
"""
Connects the signals with the functions that are to be called, also starts the image updater processes
"""
self.raw_image_updater.image_signal.connect(self.set_raw_image)
self.lane_image_updater.image_signal.connect(self.set_lane_image)
self.visual_image_updater.image_signal.connect(self.set_visual_image)
self.raw_image_updater.start()
self.lane_image_updater.start()
self.visual_image_updater.start()
def manager_event_creator(self):
"""
Creates the events that the image updater processes use to know when an image is ready to be displayed
"""
raw_images_event = Event()
lane_images_event = Event()
visual_images_event = Event()
return raw_images_event, lane_images_event, visual_images_event,
def manager_queue_creator(self):
"""
Creates a queue which houses the images to be displayed
"""
raw_images_queue = Queue()
lane_images_queue = Queue()
visual_images_queue = Queue()
return raw_images_queue, lane_images_queue, visual_images_queue
def run(self):
"""
Connect buttons and starts the multi-threaded processes
"""
self.select_source()
self.on_play_click()
self.on_quit_click()
self.on_writetobag_click()
self.controller = Controller(self.ui.rosCoreWidget,
self.ui.rosPlayWidget, self.file_available, self.raw_images_ready,
self.lane_images_ready, self.visual_images_ready, self.raw_q, self.lane_q,
self.visual_q, self.time_dots, self.play_pause_list)
def select_source(self):
window = SubWindow(self)
window.run()
window.show()
window.exec_()
def on_writetobag_click(self):
"""
Connects the write to bag button to the Writer write_dots
function.
"""
self.ui.saveMenuButton.triggered.connect(self.write_to_bag)
def write_to_bag(self):
"""
Calls the write dots function in the Writer to write the processed
lanes into the specified bag file
"""
self.controller.kill_terminals()
self.writer.write_lanes(path = self.file_available[0])
self.quit_program()
def on_play_click(self):
"""
Connect the play button to its function
"""
self.ui.playButton.clicked.connect(self.play_file)
def on_quit_click(self):
"""
Connect the quit button in the menu to its function
"""
self.ui.quitMenuButton.triggered.connect(self.quit_program)
def set_visual_image(self, image):
"""
Sets the given image to its corresponding UI element
"""
self.ui.visualisedVideo.setPixmap(QtGui.QPixmap.fromImage(image))
def set_lane_image(self, image):
"""
Sets the given image to its corresponding UI element
"""
self.ui.processedVideo.setPixmap(QtGui.QPixmap.fromImage(image))
def set_raw_image(self, image):
"""
Sets the given image to its corresponding UI element
"""
self.ui.rawVideo.setPixmap(QtGui.QPixmap.fromImage(image))
def quit_program(self):
"""
Closes PyQt in an elegant way, destroys sub-windows
"""
self.controller.kill_terminals()
self.close()
def play_file(self):
"""
Plays the file, will be updated once the GUI works with other things
"""
if len(self.file_available) > 0:
p = Popen(['xdotool', 'search', '--pid', str(self.play_pause_list[0])], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = p.communicate(b'Xdotool output')
p.wait()
Popen(['xdotool', 'key', '--window', str(output), 'space'])
if self.state:
self.ui.playButton.setIcon(self.style().standardIcon(QtGui.QStyle.SP_MediaPlay))
self.state = False
else:
self.ui.playButton.setIcon(self.style().standardIcon(QtGui.QStyle.SP_MediaPause))
self.state = True
class SubWindow(QtGui.QDialog):
def __init__(self, parent=None):
"""
Sub-window designed to allow the user to select their source for the neural network
"""
super(SubWindow, self).__init__(parent)
self.ui = Ui_Dialog()
self.ui.setupUi(self)
def run(self):
self.ui.liveSource.clicked.connect(self.live)
self.ui.loadBag.clicked.connect(self.bag)
def live(self):
self.parent().live = True
self.close()
def bag(self):
"""
Used when a file is selected, asserts whether the item is a bag file or not.
Sets its parent window value to the file's name
"""
file_opener = QtGui.QFileDialog
file = file_opener.getOpenFileName(self, "Please select a bag file", filter="bag(*.bag)")
assert(str(file).endswith(".bag"))
self.parent().file_available.append(file)
self.close()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os.path
import argparse
import sys
import tarfile
import multiprocessing
import json
import shutil
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
from random import random
from string import ascii_letters as letters
from basic_modules.workflow import Workflow
from basic_modules.metadata import Metadata
from utils import logger
from utils import remap
from tool.common import CommandLineParser
from tool.common import format_utils
from tool.tb_model import tbModelTool
if '/opt/COMPSs/Bindings/python' in sys.path:
sys.path.pop(sys.path.index('/opt/COMPSs/Bindings/python'))
# ------------------------------------------------------------------------------
class tadbit_model(Workflow): # pylint: disable=invalid-name,too-few-public-methods
"""
Wrapper for the VRE form TADbit model.
It has two main sections:
- looks for optimal parameters for modeling a region
- models a region for a given optimal parameters
.
"""
configuration = {}
def __init__(self, configuration=None):
"""
Initialise the tool with its configuration.
Parameters
----------
configuration : dict
a dictionary containing parameters that define how the operation
should be carried out, which are specific to each Tool.
"""
tool_extra_config = json.load(open(os.path.dirname(
os.path.abspath(__file__))+'/tadbit_wrappers_config.json'))
os.environ["PATH"] += os.pathsep + format_utils.convert_from_unicode(
tool_extra_config["bin_path"])
if configuration is None:
configuration = {}
self.configuration.update(format_utils.convert_from_unicode(configuration))
# Number of cores available
num_cores = multiprocessing.cpu_count()
self.configuration["ncpus"] = num_cores
tmp_name = ''.join([letters[int(random()*52)]for _ in range(5)])
if 'execution' in self.configuration:
self.configuration['project'] = self.configuration['execution']
self.configuration['workdir'] = self.configuration['project']+'/_tmp_tadbit_'+tmp_name
if not os.path.exists(self.configuration['workdir']):
os.makedirs(self.configuration['workdir'])
self.configuration["optimize_only"] = "generation:num_mod_comp" not in self.configuration
if "optimization:max_dist" in self.configuration and \
not self.configuration["optimize_only"]:
del self.configuration["optimization:max_dist"]
del self.configuration["optimization:upper_bound"]
del self.configuration["optimization:lower_bound"]
del self.configuration["optimization:cutoff"]
self.configuration.update(
{(key.split(':'))[-1]: val for key, val in self.configuration.items()}
)
if self.configuration["gen_pos_chrom_name"] == 'all':
self.configuration["gen_pos_chrom_name"] = ""
self.configuration["gen_pos_begin"] = ""
self.configuration["gen_pos_end"] = ""
if "gen_pos_begin" not in self.configuration:
self.configuration["gen_pos_begin"] = ""
if "gen_pos_end" not in self.configuration:
self.configuration["gen_pos_end"] = ""
def run(self, input_files, metadata, output_files):
"""
Parameters
----------
files_ids : list
List of file locations
metadata : list
Required meta data
output_files : list
List of output file locations
Returns
-------
outputfiles : list
List of locations for the output bam files
"""
logger.info(
"PROCESS MODEL - FILES PASSED TO TOOLS: {0}".format(
str(input_files["hic_contacts_matrix_norm"]))
)
m_results_meta = {}
m_results_files = {}
if "norm" in metadata['hic_contacts_matrix_norm'].meta_data:
if metadata['hic_contacts_matrix_norm'].meta_data["norm"] != 'norm':
clean_temps(self.configuration['workdir'])
logger.fatal("Only normalized matrices can be used to build 3D models.\nExiting")
raise ValueError('Missing normalized input matrix.')
input_metadata = remap(self.configuration,
"optimize_only", "gen_pos_chrom_name", "resolution", "gen_pos_begin",
"gen_pos_end", "max_dist", "upper_bound", "lower_bound", "cutoff",
"workdir", "project", "ncpus")
in_files = [format_utils.convert_from_unicode(input_files['hic_contacts_matrix_norm'])]
input_metadata["species"] = "Unknown"
input_metadata["assembly"] = "Unknown"
if "assembly" in metadata['hic_contacts_matrix_norm'].meta_data:
input_metadata["assembly"] = metadata['hic_contacts_matrix_norm'].meta_data["assembly"]
if metadata['hic_contacts_matrix_norm'].taxon_id:
dt_json = json.load(urlopen(
"http://www.ebi.ac.uk/ena/data/taxonomy/v1/taxon/tax-id/" +
str(metadata['hic_contacts_matrix_norm'].taxon_id)))
input_metadata["species"] = dt_json['scientificName']
input_metadata["num_mod_comp"] = self.configuration["num_mod_comp"]
input_metadata["num_mod_keep"] = self.configuration["num_mod_keep"]
tm_handler = tbModelTool()
tm_files, _ = tm_handler.run(in_files, input_metadata, [])
m_results_files["modeling_stats"] = self.configuration['project']+"/model_stats.tar.gz"
tar = tarfile.open(m_results_files["modeling_stats"], "w:gz")
tar.add(tm_files[0], arcname='modeling_files_and_stats')
tar.close()
if not self.configuration["optimize_only"]:
m_results_files["tadkit_models"] = self.configuration['project'] + "/" + \
os.path.basename(tm_files[1])
os.rename(tm_files[1], m_results_files["tadkit_models"])
m_results_meta["tadkit_models"] = Metadata(
data_type="chromatin_3dmodel_ensemble",
file_type="JSON",
file_path=m_results_files["tadkit_models"],
sources=in_files,
meta_data={
"description": "Ensemble of chromatin 3D structures",
"visible": True,
"assembly": input_metadata["assembly"]
},
taxon_id=metadata['hic_contacts_matrix_norm'].taxon_id)
# List of files to get saved
logger.info("TADBIT RESULTS: " + ','.join(
[str(m_results_files[k]) for k in m_results_files]))
m_results_meta["modeling_stats"] = Metadata(
data_type="tool_statistics",
file_type="TAR",
file_path=m_results_files["modeling_stats"],
sources=in_files,
meta_data={
"description": "TADbit modeling statistics and result files",
"visible": True
})
clean_temps(self.configuration['workdir'])
return m_results_files, m_results_meta
# ------------------------------------------------------------------------------
def main(args):
"""
Main function
"""
from apps.jsonapp import JSONApp
app = JSONApp()
result = app.launch(tadbit_model,
args.config,
args.in_metadata,
args.out_metadata)
return result
def clean_temps(working_path):
"""Cleans the workspace from temporal folder and scratch files"""
for the_file in os.listdir(working_path):
file_path = os.path.join(working_path, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except OSError:
pass
try:
os.rmdir(working_path)
except OSError:
pass
logger.info('[CLEANING] Finished')
def make_absolute_path(files, root):
"""Make paths absolute."""
for role, path in files.items():
files[role] = os.path.join(root, path)
return files
# ------------------------------------------------------------------------------
if __name__ == "__main__":
sys._run_from_cmdl = True # pylint: disable=protected-access
# Set up the command line parameters
PARSER = argparse.ArgumentParser(description="TADbit map")
# Config file
PARSER.add_argument("--config", help="Configuration JSON file",
type=CommandLineParser.valid_file, metavar="config", required=True)
# Metadata
PARSER.add_argument("--in_metadata", help="Project metadata",
metavar="in_metadata", required=True)
# Output metadata
PARSER.add_argument("--out_metadata", help="Output metadata",
metavar="output_metadata", required=True)
# Log file
PARSER.add_argument("--log_file", help="Log file",
metavar="log_file", required=True)
IN_ARGS = PARSER.parse_args()
RESULTS = main(IN_ARGS)
|
from SCons.Script import *
from os import path
def GetVars(vars):
vars.Add(ListVariable('arch', 'Build architectures', 'all',
['ppc','i386','x86_64']))
def DoUniversal(env, command, target, source, *args, **kw):
envs = []
builder = env['BUILDERS'][command]
exe = (command == 'LoadableModule' or command == 'SharedLibrary' or command == 'StaticLibrary' or command == 'Program' or command == 'Library')
if env['PLATFORM'] == 'darwin' and exe:
archs = env.subst('${arch}').split()
outs = []
for arch in archs:
newEnv = env.Clone()
newEnv.Append(CCFLAGS="-arch " + arch, LINKFLAGS="-arch " + arch, OBJPREFIX=arch)
outs += builder(newEnv, target=None, source=source, *args, **kw)
p, f = path.split(target)
target = path.join(p, builder.get_prefix(env) + f + builder.get_suffix(env))
ret = env.Command(target, outs, "lipo -create $SOURCES -output $TARGET" )
else:
ret = builder(env, target, source, *args, **kw)
return ret;
AddMethod(Environment, DoUniversal)
|
# [Backward compatibility]: keep importing modules functions
from ..utils.deprecation import deprecation
from ..utils.importlib import func_name
from ..utils.importlib import module_name
from ..utils.importlib import require_modules
deprecation(
name='ddtrace.contrib.util',
message='Use `ddtrace.utils.importlib` module instead',
version='1.0.0',
)
__all__ = [
'require_modules',
'func_name',
'module_name',
]
|
import argparse
import decimal
import logging
from decimal import Decimal, ROUND_DOWN, ROUND_UP
class CLI:
def __init__(self):
self.args = None
self.requirement = None
self.tax_rate = None
self.tolerance = None
logging.basicConfig(level=logging.INFO)
self.init_argparse()
def init_argparse(self):
parser = argparse.ArgumentParser()
parser.add_argument(
'--money_required',
dest='requirement',
required=True,
help='The value required to be raised before taxes, in decimal '
'format with 2 decimal places',
)
parser.add_argument(
'--tax_rate',
dest="tax_rate",
required=True,
help='The tax rate in decimal format',
)
parser.add_argument(
'--tolerance',
dest='tolerance',
required=False,
help='The tolerance of how much extra tax you are willing to pay',
)
self.args = parser.parse_args()
try:
self.requirement = Decimal(self.args.requirement).quantize(
Decimal('.01'), rounding=ROUND_DOWN
)
except decimal.InvalidOperation:
parser.error("The input value for money required is not a number.")
try:
self.tax_rate = Decimal(self.args.tax_rate)
except decimal.InvalidOperation:
parser.error("The input value for tax rate is not a number.")
if self.args.tolerance:
try:
self.tolerance = Decimal(self.args.tolerance)
except decimal.InvalidOperation:
parser.error("The input value for tolerance is not a number.")
else:
self.tolerance = Decimal(0.01)
def process(self):
initial_tax = self.requirement * (self.tax_rate/100)
total = self.calculate(self.requirement + initial_tax, initial_tax)
total.quantize(
Decimal('.01'), rounding=ROUND_UP
)
logging.info(f"You need to take out {'{:.2f}'.format(total)} to have "
f"{self.requirement} to cover {self.tax_rate}% tax rate "
f"and pay less than {'{:.2f}'.format(self.tolerance)} "
f"yourself.")
def calculate(self, total, last_tax):
tax = last_tax * (self.tax_rate/100)
total += tax
if tax > self.tolerance:
return self.calculate(total, tax)
return total
if __name__ == '__main__':
cli = CLI()
cli.process()
|
# Generated by Django 2.0.5 on 2018-05-14 13:45
from django.db import migrations
import phonenumber_field.modelfields
class Migration(migrations.Migration):
dependencies = [
('zconnect', '0005_org_device_related_name'),
]
operations = [
migrations.AddField(
model_name='user',
name='phone_number',
field=phonenumber_field.modelfields.PhoneNumberField(blank=True, max_length=128),
),
]
|
import pygame,sys,time
SCREEN_WIDTH,SCREEN_HEIGHT = 800,680
BOARD_ORDER,BOARD_SIZE = 19,30
BOARD_X0,BOARD_Y0 = 15,15
GRID_NULL,GRID_BLACK,GRID_WHITE = 0,1,2
SPEED_X = [1,0,1,-1]
SPEED_Y = [0,1,1,1]
def is_five(grid,x,y,flag):
# print(grid)
try:
for i in range(4):
# print(i)
isF = True
cx,cy = x,y
# is = True
sx = SPEED_X[i]
sy = SPEED_Y[i]
# print(sx,sy)
for j in range(5):
# print(j)
if j>=1:
cx+=sx
cy+=sy
# print(cy,cx)
# print(grid[cy][cx])
# print('-------')
if grid[cy][cx] != flag:
isF = False
break
if isF == True:
return True
isF = True
cx,cy = x,y
for j in range(5):
# print(j)
if j>=1:
cx-=sx
cy-=sy
# print(cy,cx)
# print('-----')
if grid[cy][cx] != flag:
isF = False
break
if isF == True:
return True
return False
except:
return False
class CLS_gobang(object):
def __init__(self,fPic,bPic,wPic,x0,y0):
self.facePic,self.bMan,self.wMan = fPic,bPic,wPic
self.x0,self.y0 = x0,y0
self.board = pygame.Surface((570,570))
self.draw_board()
self.grid = []
for y in range(BOARD_ORDER):
line = [GRID_NULL]*BOARD_ORDER
self.grid.append(line)
self.flag = GRID_BLACK
self.font = pygame.font.Font(None,32)
return
def draw_board(self):
self.board.fill((240,200,0))
L = BOARD_X0+(BOARD_ORDER-1)*BOARD_SIZE
for i in range(BOARD_X0,SCREEN_HEIGHT,BOARD_SIZE):
pygame.draw.line(self.board,(0,0,0),
(BOARD_X0,i),(L,i),1)
pygame.draw.line(self.board,(0,0,0),
(i,BOARD_Y0),(i,L),1)
pygame.draw.rect(self.board,(0,0,0),(BOARD_X0-1,BOARD_Y0-1,L+3-BOARD_X0,L+3-BOARD_Y0),1 )
return
def draw(self,scr):
scr.fill((180,140,0))
scr.blit(self.facePic,(0,0))
scr.blit(self.board,(self.x0,self.y0))
for y in range(BOARD_ORDER):
for x in range(BOARD_ORDER):
if self.grid[y][x] == GRID_BLACK:
scr.blit(self.bMan,
(self.x0+x*BOARD_SIZE,self.y0+y*BOARD_SIZE))
elif self.grid[y][x] == GRID_WHITE:
scr.blit(self.wMan,
(self.x0+x*BOARD_SIZE,self.y0+y*BOARD_SIZE))
x = self.x0+BOARD_X0+BOARD_SIZE*BOARD_ORDER+50
txt = self.font.render('NEXT',True,(225,220,0))
scr.blit(txt,(x,self.y0+BOARD_Y0+20))
if self.flag == GRID_BLACK:
scr.blit(self.bMan,(x+15,self.y0+BOARD_Y0+50))
else:
scr.blit(self.wMan,(x+15,self.y0+BOARD_Y0+50))
return
def mouse_down(self,mx,my):
# print(self.x0,self.y0)
gx = (mx-self.x0)//BOARD_SIZE
gy = (my-self.y0)//BOARD_SIZE
if 0<=gx<BOARD_ORDER and 0<=gy<BOARD_ORDER:
if self.grid[gy][gx] == GRID_NULL:
self.grid[gy][gx] = self.flag
if is_five(self.grid,gx,gy,self.flag):
print(self.flag,"Win!!!")
self.flag = GRID_BLACK+GRID_WHITE-self.flag
return
# main program
pygame.init()
screen = pygame.display.set_mode((SCREEN_WIDTH,SCREEN_HEIGHT))
fPic = pygame.image.load('face01.bmp')
fPic.set_colorkey((0,0,0))
wPic = pygame.image.load('WCMan.bmp')
wPic.set_colorkey((255,0,0))
bPic = pygame.image.load('BCMan.bmp')
bPic.set_colorkey((255,0,0))
gobang = CLS_gobang(fPic,bPic,wPic,30,80)
while True:
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button !=1:
continue
mx,my = event.pos
gobang.mouse_down(mx,my)
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
gobang.draw(screen)
pygame.display.update()
|
p = float(input('Digite o seu peso: '))
a = float(input('Digite sua altura: '))
imc = p/a**2
if imc < 18.5:
print('Abaixo do peso!')
elif 18.5 <= imc < 25:
print('Peso ideal!')
elif 25 <= imc < 30:
print('Sobrepeso!')
elif 30 <= imc < 40:
print('Obesidade!')
else:
print('Obesidade mórbida!')
|
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from django.template.defaultfilters import safe
from django.utils.translation import ugettext_lazy as _
from backend.plugins.mailchimp.models import MailchimpPluginModel
from backend.plugins.module_name import MODULE_NAME
@plugin_pool.register_plugin
class MailchimpPlugin(CMSPluginBase):
module = MODULE_NAME
model = MailchimpPluginModel
name = _("Newsletter Subscription (Mailchimp)")
render_template = 'mailchimp/mailchimp_plugin.html'
fieldsets = [
(
None,
{
'description': safe(
"""
In order to find that data:
<ol>
<li>Login into your mailchimp account</li>
<li>In the top menu click <b>Audience</b> and select <b>Signup forms</b> item</li>
<li>Click on <b>Embedded forms</b></li>
<li>Find the <b>Copy/paste onto your site</b> section</li>
</ol>
<p></p>
<p>There you will see an HTML snippet, find the tag <form> there.</p>
<p>It will look as <form action='https://effectiefaltruisme.<b>us14</b>.list-manage.com/subscribe/post?u=<b>0d235948217a55858a0e810c4</b>&id=<b>d652eb1a9c</b>'>.
Here <b>us14</b> is your server location code, <b>0d235948217a55858a0e810c4</b> is your organization id and <b>d652eb1a9c</b> is your list id.</p>
"""
),
'fields': (
'server_location_code',
'organization_id',
'list_id',
),
},
),
]
|
# -*_ coding: utf-8 -*-
# chris 073015
from HTMLParser import HTMLParser
from argparse import ArgumentParser
from contextlib import closing
from os.path import commonprefix
from urllib2 import urlopen
base = 'http://www.fileformat.info/info/unicode/category'
htmlunescape = HTMLParser().unescape
# Get characters.
def getchars(cat):
url = '%s/%s/list.htm' % (base,cat)
chars = []
with closing(urlopen(url)) as page:
for line in page:
if 'U+' not in line: continue
idx = line.index('U+')
idx += 2
idx2 = line.index('<',idx)
point = int(line[idx:idx2],16)
descr = page.next().strip()
# Eliminate <td> and </td>
descr = descr[4:-5]
descr = htmlunescape(descr)
chars.append((point,descr))
return chars
# Consolidate contiguous ranges.
def consolidate(chars):
chars.sort()
ranges = []
last = start = None
descrs = []
for point,descr in chars:
if start is None:
start = point
elif point != last + 1:
end = last
descr2 = commonprefix(descrs)
if not descr2: descr2 = '?'
ranges.append(((start,end),descr2))
start = point
descrs = []
last = point
descrs.append(descr)
return ranges
def output(ranges):
for (start,end),descr in ranges:
out = ''
if start == end:
out += r'"\U%08x"' % start
out += ' ' * 15
else:
out += r'"\U%08x" … "\U%08x"' % (start,end)
out += ' | // %s' % descr
print out
def main():
descr = ('This script takes one or more names of Unicode character '
'categories as command line arguments. It fetches the '
'character listings from fileformat.info and consolidates '
'them into ranges with helpful comments. The comments '
'are generated by computing common prefixes of character '
'names in ranges. If no such common prefix exists, the '
'script will use a question mark character. The output '
'is suitable for use in golang.org/x/exp/ebnf EBNF '
'grammars.')
parser = ArgumentParser(description=descr)
parser.add_argument('cat',nargs='+',help='unicode category name')
args = parser.parse_args()
chars = []
for cat in args.cat: chars.extend(getchars(cat))
ranges = consolidate(chars)
output(ranges)
main()
|
# Parameter modes:
# 0 - position mode (parameter is position)
# 1 - immediate mode (parameter is value)
#
# Opcodes:
# 1 - add: Read three next cells (x, y, z) read memory at adresses x and y,
# add them and store at address z, then move instruction pointer
# 2 - multiply: like 1, but multiply instead of add
# 3 - input and store at address given by parameter
# 4 - output from address given by parameter
# 5 - jump-if-true: if first params is non-zero, jump to second param
# 6 - jump-if-false: if first params is zero, jump to second param
# 7 - less than: store logical value of (first param < second param) to position
# given by third param
# 8 - equals: store logical value of (first param == second param) to position
# given by third param
# 99 - end the program
# otherwise, something went wrong
#
#
# ABCDE
# 1002
#
# DE - two-digit opcode, 02 == opcode 2
# C - mode of 1st parameter, 0 == position mode
# B - mode of 2nd parameter, 1 == immediate mode
# A - mode of 3rd parameter, 0 == position mode,
# omitted due to being a leading zero
# Define opcodes
OP_ADD = 1 # Add
OP_MUL = 2 # Multiply
OP_IN = 3 # Input
OP_OUT = 4 # Output
OP_JNZ = 5 # Jump if non-zero (true)
OP_JZR = 6 # Jump if zero (false)
OP_LT = 7 # Less than
OP_EQ = 8 # Equals
OP_END = 99 # End
# Define modes
MODE_POS = 0 # Position
MODE_IMM = 1 # Immediate
class Instruction:
def __init__(self, instruction, program):
# Store raw instruction and opcode
self.raw = str(instruction)
self.opcode = int(self.raw[-2:])
self.auto_inc = True # Whether ins pointer should auto increase after op
self.params_num = 0
# Set params number
if self.opcode in [OP_ADD, OP_MUL, OP_LT, OP_EQ]:
self.params_num = 3
elif self.opcode in [OP_IN, OP_OUT]:
self.params_num = 1
elif self.opcode in [OP_JZR, OP_JNZ]:
self.params_num = 2
self.auto_inc = False
# Set default modes to 0
self.params_modes = [MODE_POS for i in range(0, self.params_num)]
# Create params array for later use
self.params = [None for i in range(0, self.params_num)]
# If params modes specified
if len(self.raw) > 2:
params_raw = str(int(instruction / 100))
for i in range(0, len(params_raw)):
self.params_modes[i] = int(params_raw[len(params_raw) - i - 1])
# Assignment fix
# E.g.
# pvalue = ins.param[0] + ins.param[1]
# mem[ins.param[2]] = pvalue
#
# If we actually treat param[2] in address mode, self.apply_modes is
# going to break this assignment
#
# Fix instructions whose last param is an assignment address
if self.opcode in [OP_ADD, OP_MUL, OP_EQ, OP_LT, OP_IN]:
self.params_modes[self.params_num - 1] = MODE_IMM
self._apply_modes(program)
# Apply modes to params
def _apply_modes(self, program):
params_start = program.ins_ptr + 1
params = program.mem[params_start : params_start + self.params_num]
for i in range(0, self.params_num):
if self.params_modes[i] == MODE_POS:
# Address mode
self.params[i] = program.mem[params[i]]
else:
# Immediate mode
self.params[i] = params[i]
class Program:
def __init__(self, intcode):
self.mem = intcode.copy()
self.mem_size = len(self.mem)
self.ins_ptr = 0
self.finished = False
self.static_input = None
self.static_input_ind = 0
self.last_output = None
def process(self, verbose=True):
self.ins_ptr = 0
while True:
# Process opcode
ins = Instruction(self.mem[self.ins_ptr], self)
if ins.opcode == OP_ADD:
self._math(ins, lambda x, y: x + y)
elif ins.opcode == OP_MUL:
self._math(ins, lambda x, y: x * y)
elif ins.opcode in [OP_IN, OP_OUT]:
self._io(ins)
elif ins.opcode in [OP_EQ, OP_LT]:
self._logic_cmp(ins)
elif ins.opcode in [OP_JNZ, OP_JZR]:
self._logic_jmp(ins)
elif ins.opcode == OP_END:
self.finished = True
break
else:
print(
"Exception: Unknown instruction %s at address %d"
% (ins.raw, self.ins_ptr)
)
break
# Move instruction pointer forwards
if ins.auto_inc:
self.ins_ptr += ins.params_num + 1
if self.ins_ptr >= self.mem_size:
print("Exception: Reached end of program without OP_END")
break
if verbose:
print("Done.")
print()
def set_input(self, input_arr):
"""
Sets provided array as an input source to read from
"""
self.static_input = input_arr.copy()
def _math(self, ins, op):
self.mem[ins.params[2]] = op(ins.params[0], ins.params[1])
def _io(self, ins):
if ins.opcode == OP_IN:
if self.static_input is None:
# Assume okay input
inp = int(input("IN: "))
else:
inp = self.static_input[self.static_input_ind]
self.static_input_ind += 1
self.mem[ins.params[0]] = inp
else:
# Output and save last output (used for 07/part 1)
if not self.mute_output:
print("OUT >> %d" % ins.params[0])
self.last_output = ins.params[0]
def _logic_jmp(self, ins):
if ins.opcode == OP_JNZ and ins.params[0] != 0:
self.ins_ptr = ins.params[1]
elif ins.opcode == OP_JZR and ins.params[0] == 0:
self.ins_ptr = ins.params[1]
else:
self.ins_ptr += ins.params_num + 1
def _logic_cmp(self, ins):
if ins.opcode == OP_EQ:
self.mem[ins.params[2]] = int(ins.params[0] == ins.params[1])
elif ins.opcode == OP_LT:
self.mem[ins.params[2]] = int(ins.params[0] < ins.params[1])
def print_memory(mem):
mem_size = len(mem)
mem_size_len = len(str(mem_size))
for ins_ptr in range(0, mem_size):
print("%s: %d" % (str(ins_ptr).rjust(mem_size_len), mem[ins_ptr]))
|
from functools import partial
from typing import Any, Dict, Iterable, Union
from sqlalchemy.schema import Table
from todolist.infra.database.models.todo_item import TodoItem
from todolist.infra.database.models.user import User
from todolist.infra.database.sqlalchemy import metadata
ValuesType = Dict[str, Any]
def insert_model(model: Table, values: Union[ValuesType, Iterable[ValuesType]]) -> None:
query = model.insert()
if isinstance(values, Dict):
metadata.bind.execute(query, **values)
else:
metadata.bind.execute(query, list(values))
register_user = partial(insert_model, User)
insert_todo_item = partial(insert_model, TodoItem)
|
class Solution:
def kWeakestRows(self, mat: List[List[int]], k: int) -> List[int]:
ones = [sum(row) for row in mat]
return heapq.nsmallest(k, range(len(mat)), key=lambda x : (ones[x], x))
|
import logging
import numpy as np
from .analysis import Idealizer
from ..constants import CURRENT_UNIT_FACTORS, TIME_UNIT_FACTORS
from ..utils import round_off_tables
debug_logger = logging.getLogger("ascam.debug")
ana_logger = logging.getLogger("ascam.analysis")
class IdealizationCache:
def __init__(
self,
data,
amplitudes,
thresholds=None,
resolution=None,
interpolation_factor=None,
):
self.data = data
self.amplitudes = amplitudes
self.thresholds = thresholds
self.resolution = resolution
self.interpolation_factor = interpolation_factor
@property
def ind_idealized(self):
"""Return the set of numbers of the episodes in the currently selected series
that have been idealized with the current parameters."""
return {
episode.n_episode
for episode in self.data.series
if episode.idealization is not None
}
def idealization(self, n_episode=None):
"""Return the idealization of a given episode or idealize the episode and then return it."""
if n_episode is None:
n_episode = self.data.current_ep_ind
out = [
episode.idealization
for episode in self.data.series
if episode.n_episode == n_episode
]
if out: # if an idealization exists return it
return out[0]
else: # else idealize the episode and then return
self.idealize_episode(n_episode)
self.idealization(n_episode)
def time(self, n_episode=None):
"""Return the time vector corresponding to the idealization of the given episode,
if it is not idealized, idealize it first and then return the time."""
if n_episode is None:
n_episode = self.data.current_ep_ind
out = [
episode.id_time
for episode in self.data.series
if episode.n_episode == n_episode
]
if out:
return out[0]
else:
self.idealize_episode(n_episode)
self.time(n_episode)
@property
def all_ep_inds(self):
return {e.n_episode for e in self.data.series}
def clear_idealization(self):
for series in self.data.values():
for episode in [
episode for episode in series if episode.idealization is not None
]:
episode.idealization = None
episode.id_time = None
def idealize_episode(self, n_episode=None):
if n_episode is None:
n_episode = self.data.current_ep_ind
if n_episode not in self.ind_idealized:
debug_logger.debug(
f"idealizing episode {n_episode} of "
f"series {self.data.current_datakey}"
)
self.data.episode(n_episode).idealize(
self.amplitudes,
self.thresholds,
self.resolution,
self.interpolation_factor,
)
else:
debug_logger.debug(f"episode number {n_episode} already idealized")
def idealize_series(self):
debug_logger.debug(f"idealizing series {self.data.current_datakey}")
to_idealize = self.all_ep_inds - self.ind_idealized
for i in to_idealize:
self.idealize_episode(i)
def get_events(self, time_unit="s", trace_unit="A"):
if self.all_ep_inds != self.ind_idealized:
self.idealize_series()
event_array = np.zeros((0, 5)).astype(object)
for episode in self.data.series:
# create a column containing the episode number
ep_events = Idealizer.extract_events(
self.idealization(episode.n_episode), self.time()
)
episode_number = episode.n_episode * np.ones(len(ep_events[:, 0]))
# glue that column to the event
ep_events = np.concatenate(
(episode_number[:, np.newaxis], ep_events), axis=1
)
event_array = np.concatenate((event_array, ep_events), axis=0)
event_array[:, 1] *= CURRENT_UNIT_FACTORS[trace_unit]
event_array[:, 2:] *= TIME_UNIT_FACTORS[time_unit]
return event_array
def dwell_time_hist(
self, amp, n_bins=None, time_unit="ms", log_times=True, root_counts=True
):
events = self.get_events(time_unit)
debug_logger.debug(f"getting events for amplitude {amp}")
# np.isclose works best on order of unity (with default tolerances
# rather than figure out tolerances for e-12 multiply the
# amp values by the expected units pA
factor = CURRENT_UNIT_FACTORS["pA"]
mask = np.isclose(
np.asarray(events[:, 1], dtype=np.float) * factor, amp * factor
)
debug_logger.debug(f"multiplied amps by pA, amp={amp*factor}")
data = events[:, 2][mask]
if log_times:
data = np.log10(data.astype(float))
debug_logger.debug(f"there are {len(data)} events")
if n_bins is None:
n_bins = int(self.get_n_bins(data))
heights, bins = np.histogram(data, n_bins)
heights = np.asarray(heights, dtype=np.float)
if root_counts:
heights = np.sqrt(heights)
return heights, bins
@staticmethod
def get_n_bins(data):
n = len(data)
std = np.std(data)
return round(3.49 * std * n ** (1 / 3))
def export_events(self, filepath, time_unit="us", trace_unit="pA"):
"""Export a table of events in the current (idealized) series and
duration to a csv file."""
debug_logger.debug(f"export_events")
import pandas as pd
if not filepath.endswith(".csv"):
filepath += ".csv"
header = [
"Episode Number",
f"Amplitude [{trace_unit}]",
f"Duration [{time_unit}]",
f"t_start [{time_unit}]",
f"t_stop [{time_unit}]",
]
params = (
f"amplitudes = {self.amplitudes} [A];"
+ f"thresholds = {self.thresholds} [A];"
+ f"resolution = {self.resolution} [s];"
+ f"interpolation_factor = {self.interpolation_factor}\n"
)
export_array = self.get_events(time_unit, trace_unit)
export_array = pd.DataFrame(export_array, columns=header)
# truncate floats for duration and timestamps to 1 micro second
export_array = round_off_tables(
export_array, ["int", trace_unit, time_unit, time_unit, time_unit]
)
with open(filepath, "w") as f:
f.write(params)
export_array.to_csv(filepath, mode="a")
|
#!/usr/bin/python
import os
import sys
import xml.etree.ElementTree as ET
def read_xml_labels(labelfn):
tree = ET.parse(labelfn)
labs = tree.getroot()
a = labs[1]
b = a.getchildren()
labs = {"ind": [], "name": []}
for tb in b:
labs["ind"].append(int(tb.attrib["index"]))
labs["name"].append(tb.text)
return labs
|
from astropy.timeseries import LombScargle
from hydroDL.post import axplot, figplot
import matplotlib.pyplot as plt
import importlib
import pandas as pd
import numpy as np
import os
import time
# test
xd = np.arange('1990-01-01', '2000-01-01', dtype='datetime64[D]')
xx = (xd-np.datetime64('1990-01-01')).astype(np.float)
x = xx
# x = np.concatenate([x[:700], x[1400:]])
pLst = [0, 0.01, 0.1, 0.5]
# pLst = [0, 0.7, 0.8, 0.9]
fig, axes = plt.subplots(len(pLst), 1, figsize=(8, 6))
for kk, p in enumerate(pLst):
x = x[np.random.rand(len(x)) >= p]
y = 5 * np.cos(x*2*np.pi/365)+4 * np.cos((x+120)*2*np.pi/365*2) + \
4 * np.cos((x+60)*2*np.pi/365/4)
# y = 5*np.sin(x*2*np.pi/365)
y = y-np.mean(y)
ls = LombScargle(x, y)
# freq = np.arange(1, len(xd))/len(xd)
freq = np.fft.fftfreq(len(xd))[1:]
power = ls.power(freq)
ym = np.zeros([len(freq), len(xx)])
yp = np.zeros([len(freq), len(xx)])
for k, f in enumerate(freq):
ym[k, :] = ls.model(xx, f)
# yp[k, :] = ym[k, :]*np.sqrt(ls.power(f))
yp[k, :] = ym[k, :]*ls.power(f)
axes[kk].plot(x, y, '--*')
axes[kk].plot(xx, np.sum(ym/2*(1-p), axis=0), '-r')
axes[kk].set_title('{}% missing data'.format(p*100))
plt.tight_layout()
fig.show()
|
import flask
from groups import *
from constants import *
from sqlalchemy.orm import aliased
@app.route('/plots/statistics', methods=['GET'])
@flask_login.login_required
def get_statistic_plots():
group = flask_login.current_user.group
if group is None:
return 'You are not in a group.'
plots = StatisticBarPlot.query.filter(StatisticBarPlot.group_id==flask_login.current_user.group.id).all()
data = {p.id: {'name': p.name, 'application': p.application_name, 'statistics': p.statistics} for p in plots}
return flask.jsonify(data)
@app.route('/plots/statistics/data', methods=['GET'])
@flask_login.login_required
def get_statistic_plot_data():
if request.args.get('id'):
plot = StatisticBarPlot.query.filter(StatisticBarPlot.id == int(request.args.get('id')),
StatisticBarPlot.group_id == flask_login.current_user.group.id).first()
stats = zip(*db.session.query(Statistic.name.distinct()).filter(Statistic.name.in_(plot.statistics)).all())[0]
if request.args.get('alias_level', ANY) == NONE:
uuids = UUID.query.outerjoin(Alias).filter(UUID.group_id == plot.group_id, Alias.id == None).all()
elif request.args.get('alias_level', ANY) == ONLY:
uuids = UUID.query.join(Alias).filter(UUID.group_id == plot.group_id).all()
else:
uuids = plot.group.uuids
d = []
for s in stats:
d2 = [s]
for u in uuids:
count = db.session.query(Statistic.count).filter(Statistic.uuid_id == u.id, Statistic.name == s).first()
d2.append(count[0] if count else 0)
d.append(d2)
aliases = {a.uuid.id: a.alias for a in plot.group.aliases}
data = {'uuids': [aliases.get(u.id, u.user_identifier) for u in uuids],
'counts': d,
'n_users': len(uuids)}
return flask.jsonify(data)
@app.route('/plots/statistics/add', methods=['POST'])
@flask_login.login_required
def add_statistic_plot():
group = flask_login.current_user.group
if group is None:
return 'You are not in a group.'
data = request.get_json()
fields = data['statistics']
name = data['name']
app = data.get('application', '')
version = data.get('version', '')
q = db.session.query(Statistic.name).filter(Statistic.name.in_(fields))
if app:
q.filter(Statistic.application_name==app)
if version:
q.filter(Statistic.application_version==version)
stats = q.group_by(Statistic.name).all()
if stats:
plot = StatisticBarPlot(name,
flask_login.current_user.group, zip(*stats)[0])
db.session.add(plot)
db.session.commit()
flash("New plot '%s' has been created" % plot.name)
return redirect(request.referrer)
@app.route('/plots/statistics/remove', methods=['POST'])
@flask_login.login_required
def remove_statistic_plot():
group = flask_login.current_user.group
if group is None:
return 'You are not in a group.'
plot_id = int(request.args.get('id', -1))
if plot_id > 0:
plot = StatisticBarPlot.query.filter(StatisticBarPlot.id == plot_id).first()
if plot:
db.session.delete(plot)
db.session.commit()
flash("Plot '%s' has been deleted" % plot.name)
return redirect(request.referrer)
@app.route('/plots/states/data', methods=['GET'])
@flask_login.login_required
def get_state_plot_data():
alias = aliased(State.uuid)
if request.args.get('alias_level', False) == NONE:
_aliases_id = set(u.uuid.id for u in flask_login.current_user.group.aliases)
alias_filter = alias.id.notin_(_aliases_id)
elif request.args.get('alias_level', False) == ONLY:
_aliases_id = set(u.uuid.id for u in flask_login.current_user.group.aliases)
alias_filter = alias.id.in_(_aliases_id)
else:
# ANY
alias_filter = True # Do nothing
data = {'name': request.args.get('name'),
'counts': db.session.query(State.state, func.count(State.id)).join(alias)\
.filter(State.name==request.args.get('name'), alias_filter,
State.group_id==flask_login.current_user.group.id)\
.group_by(State.state).all()
}
return flask.jsonify(data)
|
img_file = open('MARG Python Assignment 3 (Data).csv', 'r')
img = img_file.read()
img_file.close()
img = img[:-1].split('\n')
img = [x.split(',') for x in img]
img = img[::2]
for i in range(len(img)):
img[i] = img[i][::2]
for j in range(len(img[i])):
img[i][j] = int(img[i][j]) / 255
data = ''
for row in img:
for elem in row:
data += str(elem) + ','
data = data[:-1]
new_img_file = open('output.txt', 'w')
new_img_file.write(data)
new_img_file.close()
|
from __future__ import division
from menpo.transform import AlignmentSimilarity, Similarity
import numpy as np
from menpo.visualize import progress_bar_str, print_dynamic
def name_of_callable(c):
try:
return c.__name__ # function
except AttributeError:
return c.__class__.__name__ # callable class
def is_pyramid_on_features(features):
r"""
True if feature extraction happens once and then a gaussian pyramid
is taken. False if a gaussian pyramid is taken and then features are
extracted at each level.
"""
return callable(features)
def create_pyramid(images, n_levels, downscale, features, verbose=False):
r"""
Function that creates a generator function for Gaussian pyramid. The
pyramid can be created either on the feature space or the original
(intensities) space.
Parameters
----------
images: list of :map:`Image`
The set of landmarked images from which to build the AAM.
n_levels: int
The number of multi-resolution pyramidal levels to be used.
downscale: float
The downscale factor that will be used to create the different
pyramidal levels.
features: ``callable`` ``[callable]``
If a single callable, then the feature calculation will happen once
followed by a gaussian pyramid. If a list of callables then a
gaussian pyramid is generated with features extracted at each level
(after downsizing and blurring).
Returns
-------
list of generators :
The generator function of the Gaussian pyramid.
"""
will_take_a_while = is_pyramid_on_features(features)
pyramids = []
for i, img in enumerate(images):
if will_take_a_while and verbose:
print_dynamic(
'Computing top level feature space - {}'.format(
progress_bar_str((i + 1.) / len(images),
show_bar=False)))
pyramids.append(pyramid_of_feature_images(n_levels, downscale,
features, img))
return pyramids
def pyramid_of_feature_images(n_levels, downscale, features, image):
r"""
Generates a gaussian pyramid of feature images for a single image.
"""
if is_pyramid_on_features(features):
# compute feature image at the top
feature_image = features(image)
# create pyramid on the feature image
return feature_image.gaussian_pyramid(n_levels=n_levels,
downscale=downscale)
else:
# create pyramid on intensities image
# feature will be computed per level
pyramid = image.gaussian_pyramid(n_levels=n_levels,
downscale=downscale)
# add the feature generation here
return feature_images(pyramid, features)
# adds feature extraction to a generator of images
def feature_images(images, features):
for feature, level in zip(reversed(features), images):
yield feature(level)
class DeformableModel(object):
def __init__(self, features):
self.features = features
@property
def pyramid_on_features(self):
return is_pyramid_on_features(self.features)
# TODO: Should this be a method on Similarity? AlignableTransforms?
def noisy_align(source, target, noise_std=0.04, rotation=False):
r"""
Constructs and perturbs the optimal similarity transform between source
to the target by adding white noise to its weights.
Parameters
----------
source: :class:`menpo.shape.PointCloud`
The source pointcloud instance used in the alignment
target: :class:`menpo.shape.PointCloud`
The target pointcloud instance used in the alignment
noise_std: float
The standard deviation of the white noise
Default: 0.04
rotation: boolean
If False the second parameter of the Similarity,
which captures captures inplane rotations, is set to 0.
Default:False
Returns
-------
noisy_transform : :class: `menpo.transform.Similarity`
The noisy Similarity Transform
"""
transform = AlignmentSimilarity(source, target, rotation=rotation)
parameters = transform.as_vector()
parameter_range = np.hstack((parameters[:2], target.range()))
noise = (parameter_range * noise_std *
np.random.randn(transform.n_parameters))
return Similarity.init_identity(source.n_dims).from_vector(parameters + noise)
def build_sampling_grid(patch_shape):
r"""
"""
patch_shape = np.array(patch_shape)
patch_half_shape = np.require(np.round(patch_shape / 2), dtype=int)
start = -patch_half_shape
end = patch_half_shape + 1
sampling_grid = np.mgrid[start[0]:end[0], start[1]:end[1]]
return sampling_grid.swapaxes(0, 2).swapaxes(0, 1)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Script de actualizacion automatica para Hurricane Electric DNS
# Testeado en Python 3.4 windows, Debian
#BSD
#Copyright (c) 2016, Ales Daniel alesdaniel77@gmail.com
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
#* Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
#* Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
#* Neither the name of pydns nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
#AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
#IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
#FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
#DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
#SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
#OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# 28/03/2016 - Version Inicial - Daniel.
# 07/04/2016 - Agrega log - Daniel.
import sys
import re
import ssl
import logging
import socket
import urllib.request, urllib.parse, urllib.error
pagina = ''
ips = ''
def actualiza_ip():
global ips
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', filename='py_dns.log',
level=logging.ERROR)
datos = {}
#Datos de pagina a actualizar IPs
datos['hostname'] = 'rsync.petro-tandil.com.ar'
datos['password'] = 'pass'
datos['myip'] = ips[0]
pag = urllib.parse.urlencode(datos)
print(pag)
url='https://dyn.dns.he.net/nic/update?'
urlc=url + pag
context = ssl._create_unverified_context()
print(urlc)
try:
datos = urllib.request.urlopen(urlc, context=context)
except urllib.error.URLError as e:
logging.error("actualiza_ip() " + e)
print(e);
except socket.error as e:
logging.error("actualiza_ip() " + e)
print(e);
except socket.timeout as e:
logging.error("actualiza_ip() " + e)
print(e);
except UnicodeEncodeError as e:
logging.error("actualiza_ip() " + e)
print(e);
except http.client.BadStatusLine as e:
logging.error("actualiza_ip() " + e)
print(e);
except http.client.IncompleteRead as e:
logging.error("actualiza_ip() " + e)
print(e);
except urllib.error.HTTPError as e:
logging.error("actualiza_ip() " + e)
print(e);
#https: // dyn.dns.he.net / nic / update?hostname = dyn.example.com & password = password & myip = 192.168.0.1
#Compara que la ultima ip sea igual a la ultima grabada
def consulta(ips):
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', filename='py_dns.log',
level=logging.ERROR)
try:
a = open('ip.txt', 'r+')
except IOError:
a = open('ip.txt', 'w+')
str = a.read()
if str == ips:
a.closed
return True
else:
a.close()
a = open('ip.txt', 'w+')
a.write(ips)
logging.error("Actualizacion IP: " + ips)
a.closed
return False
# Busca dentro del html o texto devuelto la direccion ip
def busca_ip():
global ips
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', filename='py_dns.log',
level=logging.ERROR)
ips = re.findall(r'[0-9]+(?:\.[0-9]+){3}', pagina)
print(ips[0])
try:
socket.inet_aton(ips[0])
except TypeError:
print("type")
logging.error("busca_ip() type " + ips[0])
exit(1)
except socket.error:
print("sock")
logging.error("busca_ip() sock " + ips[0])
exit(1)
if consulta(ips[0]):
pass
else:
actualiza_ip()
def descarga():
global pagina
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', filename='py_dns.log',
level=logging.ERROR)
try:
#html = urllib.request.urlopen("http://www.see-my-ip.com/")
html = urllib.request.urlopen("http://checkip.dyndns.org/")
pagina = html.read().decode("latin1", 'ignore')
except urllib.error.URLError as e:
print(e);
logging.error("descarga() " + e)
pagina = ''
except socket.error as e:
print(e);
logging.error("descarga() " + e)
pagina = ''
except socket.timeout as e:
print(e);
logging.error("descarga() " + e)
pagina = ''
except UnicodeEncodeError as e:
print(e);
logging.error("descarga() " + e)
pagina = ''
except http.client.BadStatusLine as e:
print(e);
logging.error("descarga() " + e)
pagina = ''
except http.client.IncompleteRead as e:
print(e);
logging.error("descarga() " + e)
pagina = ''
except urllib.error.HTTPError as e:
print(e);
logging.error("descarga() " + e)
pagina = ''
if len(pagina) > 0:
print(pagina)
else:
logging.error("descarga() len(pagina) = 0")
exit(1)
if __name__ == "__main__":
if sys.version_info < (3, 0, 0):
sys.stderr.write("You need python 3.0 or later to run this script\n")
exit(1)
descarga()
busca_ip()
|
import logging
import re
import xml.etree.ElementTree
import requests
from .csl_item import CSL_Item
from .citekey import regexes
from manubot.util import get_manubot_user_agent
class CSL_Item_arXiv(CSL_Item):
def _set_invariant_fields(self):
# Set journal/publisher to arXiv
self["container-title"] = "arXiv"
self["publisher"] = "arXiv"
# Set CSL type to report for preprint
self["type"] = "report"
return self
def log_journal_doi(self, arxiv_id, journal_ref=None):
if "DOI" not in self:
return
msg = f"arXiv article {arxiv_id} published at https://doi.org/{self['DOI']}"
if journal_ref:
msg += f" — {journal_ref}"
logging.info(msg)
def set_identifier_fields(self, arxiv_id):
self.set_id(f"arxiv:{arxiv_id}")
self["URL"] = f"https://arxiv.org/abs/{arxiv_id}"
self["number"] = arxiv_id
_, version = split_arxiv_id_version(arxiv_id)
if version:
self["version"] = version
def split_arxiv_id_version(arxiv_id: str):
"""
Return (versionless_id, version) tuple.
Version refers to the verion suffix like 'v2' or None.
"""
match = re.match(regexes["arxiv"], arxiv_id)
return match.group("versionless_id"), match.group("version")
def get_arxiv_csl_item(arxiv_id: str):
"""
Return csl_item item for an arXiv identifier.
Chooses which arXiv API to use based on whether arxiv_id
is versioned, since only one endpoint supports versioning.
"""
_, version = split_arxiv_id_version(arxiv_id)
if version:
return get_arxiv_csl_item_export_api(arxiv_id)
return get_arxiv_csl_item_oai(arxiv_id)
def query_arxiv_api(url, params):
headers = {"User-Agent": get_manubot_user_agent()}
response = requests.get(url, params, headers=headers)
xml_tree = xml.etree.ElementTree.fromstring(response.text)
return xml_tree
def get_arxiv_csl_item_export_api(arxiv_id):
"""
Return csl_item item for an arXiv record.
arxiv_id can be versioned, like `1512.00567v2`, or versionless, like
`1512.00567`. If versionless, the arXiv API will return metadata for the
latest version. Legacy IDs, such as `cond-mat/0703470v2`, are also
supported.
If arXiv has an associated DOI for the record, a warning is logged to
alert the user that an alternative version of record exists.
References:
- https://arxiv.org/help/api/index
- http://citeproc-js.readthedocs.io/en/latest/csl-json/markup.html
- https://github.com/citation-style-language/schema/blob/master/csl-data.json
"""
xml_tree = query_arxiv_api(
url="https://export.arxiv.org/api/query",
params={"id_list": arxiv_id, "max_results": 1},
)
# XML namespace prefixes
prefix = "{http://www.w3.org/2005/Atom}"
alt_prefix = "{http://arxiv.org/schemas/atom}"
# Parse XML
(entry,) = xml_tree.findall(prefix + "entry")
# Create dictionary for CSL Item
csl_item = CSL_Item_arXiv()
# Extract versioned arXiv ID
url = entry.findtext(prefix + "id")
pattern = re.compile(r"arxiv.org/abs/(.+)")
match = pattern.search(url)
versioned_id = match.group(1)
csl_item.set_identifier_fields(versioned_id)
# Extrat CSL title field
csl_item["title"] = entry.findtext(prefix + "title")
# Extract CSL date field
published = entry.findtext(prefix + "published")
csl_item.set_date(published, variable="issued")
# Extract authors
authors = list()
for elem in entry.findall(prefix + "author"):
name = elem.findtext(prefix + "name")
author = {"literal": name}
authors.append(author)
csl_item["author"] = authors
csl_item._set_invariant_fields()
# Extract abstract
abstract = entry.findtext(prefix + "summary").strip()
if abstract:
# remove newlines that were added to wrap abstract
abstract = remove_newlines(abstract)
csl_item["abstract"] = abstract
# Check if the article has been published with a DOI
doi = entry.findtext(f"{alt_prefix}doi")
if doi:
csl_item["DOI"] = doi
journal_ref = entry.findtext(alt_prefix + "journal_ref")
csl_item.log_journal_doi(arxiv_id, journal_ref)
return csl_item
def get_arxiv_csl_item_oai(arxiv_id):
"""
Generate a CSL Item for an unversioned arXiv identifier
using arXiv's OAI_PMH v2.0 API <https://arxiv.org/help/oa>.
This endpoint does not support versioned `arxiv_id`.
"""
# XML namespace prefixes
ns_oai = "{http://www.openarchives.org/OAI/2.0/}"
ns_arxiv = "{http://arxiv.org/OAI/arXiv/}"
xml_tree = query_arxiv_api(
url="https://export.arxiv.org/oai2",
params={
"verb": "GetRecord",
"metadataPrefix": "arXiv",
"identifier": f"oai:arXiv.org:{arxiv_id}",
},
)
# Create dictionary for CSL Item
csl_item = CSL_Item_arXiv()
# Extract parent XML elements
(header_elem,) = xml_tree.findall(
f"{ns_oai}GetRecord/{ns_oai}record/{ns_oai}header"
)
(metadata_elem,) = xml_tree.findall(
f"{ns_oai}GetRecord/{ns_oai}record/{ns_oai}metadata"
)
(arxiv_elem,) = metadata_elem.findall(f"{ns_arxiv}arXiv")
# Set identifier fields
response_arxiv_id = arxiv_elem.findtext(f"{ns_arxiv}id")
if arxiv_id != response_arxiv_id:
logging.warning(
f"arXiv oai2 query returned a different arxiv_id:"
" {arxiv_id} became {response_arxiv_id}"
)
csl_item.set_identifier_fields(response_arxiv_id)
# Set title and date
title = arxiv_elem.findtext(f"{ns_arxiv}title")
if title:
csl_item["title"] = " ".join(title.split())
datestamp = header_elem.findtext(f"{ns_oai}datestamp")
csl_item.set_date(datestamp, "issued")
# Extract authors
author_elems = arxiv_elem.findall(f"{ns_arxiv}authors/{ns_arxiv}author")
authors = list()
for author_elem in author_elems:
author = {}
given = author_elem.findtext(f"{ns_arxiv}forenames")
family = author_elem.findtext(f"{ns_arxiv}keyname")
if given:
author["given"] = given
if family:
author["family"] = family
authors.append(author)
csl_item["author"] = authors
csl_item._set_invariant_fields()
abstract = arxiv_elem.findtext(f"{ns_arxiv}abstract")
if abstract:
csl_item["abstract"] = remove_newlines(abstract)
license = arxiv_elem.findtext(f"{ns_arxiv}license")
if license:
csl_item.note_append_dict({"license": license})
doi = arxiv_elem.findtext(f"{ns_arxiv}doi")
if doi:
csl_item["DOI"] = doi
journal_ref = arxiv_elem.findtext(f"{ns_arxiv}journal-ref")
csl_item.log_journal_doi(arxiv_id, journal_ref)
return csl_item
def remove_newlines(text):
return re.sub(pattern=r"\n(?!\s)", repl=" ", string=text)
def get_arxiv_csl_item_zotero(arxiv_id):
"""
Generate CSL JSON Data for an arXiv ID using Zotero's translation-server.
"""
from manubot.cite.zotero import get_csl_item
return get_csl_item(f"arxiv:{arxiv_id}")
|
from rest_framework import viewsets
from .models import Account
from .serializers import AccountSerializer
class AccountViewSet(viewsets.ModelViewSet):
queryset = Account.objects.all()
serializer_class = AccountSerializer
|
'''
Created on Sep 13, 2017
@author: mmullero
'''
import os
# configuration
class Config(object):
MONGODB_URI = "mongodb://localhost:27017/"
MONGODB_DB = "stjoern-scrapper"
basedir = os.path.abspath(os.path.dirname(__file__))
logpath = os.path.join(basedir, 'stjoern-scrapper.log')
chromedriver_log = os.path.join(basedir, 'chromedriver.log')
threading = True
chromedriver_path = r"c:\KB\installation_instruction\project\stjoern-scrapper\install\chromedriver.exe"
@staticmethod
def getLogPath(name):
return os.path.join(Config.basedir, '{}.log'.format(name))
|
from pgso.evaluate import error, evaluate, update_velocity, update_position
from multiprocessing import Manager, Process, Lock
from pgso.init_particles import create_n_particles
from sklearn.utils import shuffle
from tqdm import tqdm
from numba import jit
import numpy as np
import copy
def sample_data(X_train, y_train, batch_size, mini_batch_size):
X_train, y_train = shuffle(X_train, y_train)
for i in range(0, mini_batch_size-batch_size+1,batch_size):
yield X_train[i:i+batch_size], y_train[i:i+batch_size]
# @jit
def PSO_purana(classifier,bounds,maxiter,swarm_init=None, train_data=None):
num_dimensions=len(swarm_init[0])
err_best_g=-1 # best error for group
pos_best_g=[] # best position for group
num_particles = len(swarm_init)
# establish the swarm
swarm = create_n_particles(num_particles, num_dimensions, swarm_init)
# begin optimization loop
i=0
while i < maxiter:
#print i,err_best_g
# cycle through particles in swarm and evaluate fitness
for j in range(0,num_particles):
swarm[j]['pos_best_i'], swarm[j]['err_best_i'] = evaluate(classifier, swarm[j], train_data)
# determine if current particle is the best (globally)
if swarm[j]['err_i'] < err_best_g or err_best_g == -1:
pos_best_g=list(swarm[j]['position_i'])
err_best_g=float(swarm[j]['err_i'])
# cycle through swarm and update velocities and position
for j in range(0,num_particles):
swarm[j]['velocity_i'] = update_velocity(pos_best_g, swarm[j])
swarm[j]['position_i'] = update_position(bounds, swarm[j])
i+=1
# print final results
#print ('\n')
#print (pos_best_g,' , ', err_best_g)
return pos_best_g[0], err_best_g
# @jit
def PSO(classifier, bounds, maxiter, shared_list, return_list, l, num_particles=None, swarm_init=None, pso_train_data=None):
# create minibatches inside PSO
num_dimensions=len(swarm_init[0])
err_best_g=-1 # best error for group
pos_best_g=[] # best position for group
num_particles = len(swarm_init)
#print('adress of classifier object is: ', id(classifier))
# establish the swarm
# initialize swarm population
#print('len(swarm_init): ', len(swarm_init), 'shape of swarm_init[0]: ', swarm_init[0].shape, '\n')
swarm = create_n_particles(num_particles, num_dimensions, swarm_init)
# begin optimization loop
i=0
while i < maxiter:
#print i,err_best_g
# cycle through particles in swarm and evaluate fitness
for j in range(0,num_particles):
best_pos, swarm[j]['err_best_i'] = evaluate(classifier, swarm[j], pso_train_data)
swarm[j]['pos_best_i'] = best_pos
# determine if current particle is the best (globally)
if swarm[j]['err_i'] < err_best_g or err_best_g == -1:
pos_best_g=list(swarm[j]['position_i'])
err_best_g=float(swarm[j]['err_i'])
# update the global best in the manager list after k iterations
# we need to add some mutex lock here
if i == maxiter//2:
l.acquire()
best_galactic_pos = shared_list[0]
best_galactic_err = shared_list[1]
#print("best_galactic_err: " ,best_galactic_err)
#print("best_galactic_pos: ", best_galactic_pos)
if err_best_g < best_galactic_err:
shared_list[1] = err_best_g
#print(err_best_g)
shared_list[0] = pos_best_g
else:
#print("changing pos_best_g from", pos_best_g, " to ", best_galactic_pos)
#emp_list = []
err_best_g = float(best_galactic_err)
#emp_list.append(best_galactic_pos)
pos_best_g = [best_galactic_pos]
l.release()
# cycle through swarm and update velocities and position
for j in range(0,num_particles):
swarm[j]['velocity_i'] = update_velocity(pos_best_g, swarm[j])
swarm[j]['position_i'] = update_position(bounds, swarm[j])
i+=1
#print('shape of swarm[0][position_i] is: ', swarm[0]['position_i'].shape)
return_list.append((pos_best_g[0], swarm[:]['position_i']))
def start(process_list):
for p in process_list:
p.start()
def stop(process_list):
for p in process_list:
p.join()
# @jit
def GSO(bounds, num_particles, max_iter, classifier, train_data, epochs, batch_size, mini_batch_size=None):
"""
Galactic Swarm Optimization:
----------------------------
A meta-heuristic algorithm insipred by the interplay
of stars, galaxies and superclusters under the influence
of gravity.
Input:
------
M: integer
number of galaxies
bounds:
bounds of the search space across each dimension
[lower_bound, upper_bound] * dims
We specify only lower_bound and upper_bound
"""
subswarm_bests = []
dims = sum([np.prod(np.array(layer['weights']).shape) for layer in classifier.layers.values()])
print("total number of weights -", dims)
lb = bounds[0]
ub = bounds[1]
# lets set bounds across all dims
bounds = [[lb, ub]]*dims
manager = Manager()
l = Lock()
shared_list = manager.list()
return_list = manager.list()
shared_list = [np.random.uniform(lb, ub, dims), np.inf]
all_processes = []
#pso_batch_size = train_data[0].shape[0]//M
g_best_weights = None
g_best_error = float("inf")
classifiers = [copy.deepcopy(classifier) for _ in range(mini_batch_size//batch_size)]
X_train, y_train = train_data
if not mini_batch_size: mini_batch_size = X_train.shape[0]
print('starting with gso_batch size - {}, mini_batch_size -{} '.format(batch_size, mini_batch_size))
# create N particles here
swarm_inits = []
for j in range(mini_batch_size//batch_size):
swarm_init = []
for _ in range(num_particles):
swarm_init.append(np.random.uniform(lb, ub, (1, dims)))
swarm_inits.append(swarm_init)
for i in tqdm(range(epochs)):
all_processes = []
sampler = sample_data(X_train, y_train, batch_size, mini_batch_size)
for j in range(mini_batch_size//batch_size):
pso_train_data = next(sampler)
#initial= np.random.uniform(-10,10, 2) # initial starting location [x1,x2...]
# swarm_init = []
# for _ in range(num_particles):
# swarm_init.append(np.random.uniform(lb, ub, dims))
#pso_train_data = (data[0][k*batch_size:(k+1)*pso_batch_size], data[1][k*batch_size:(k+1)*pso_batch_size])
# print('started batch :',i)
# print('train_data length :', len(pso_train_data))
#print('shape of swarm_inits[j][0]: ', swarm_inits[j][0].shape)
swarm_init = np.array([item.reshape(dims, 1) for item in swarm_inits[j]])
p = Process(target=PSO, args=(classifiers[j], bounds, max_iter, shared_list, return_list, l, None,swarm_init, pso_train_data))
all_processes.append(p)
start(all_processes)
stop(all_processes)
#print('elements of return list: ', return_list)
main_swarm_init = [item[0] for item in return_list]
#swarm_inits = [item[1] for item in return_list]
swarm_inits = [main_swarm_init for item in return_list]
best_weights, best_error = PSO_purana(classifier, bounds, max_iter, swarm_init=main_swarm_init, train_data=train_data)
if best_error < g_best_error:
g_best_error = best_error
g_best_weights = best_weights
print('completed epoch {} --------> loss_value: {}'.format(i, best_error))
prev_index = 0
for layer_id, layer in classifier.layers.items():
num_elements = np.prod(layer['weights'].shape) # we can cache this and pass it down or store it as layer.num_elements
new_weights = g_best_weights[prev_index:prev_index+num_elements]
layer['weights'] = new_weights.reshape(layer['weights'].shape) # changing value midway can cause some error
prev_index += num_elements
return classifier
# bounds are across each dimension
'''
Suppose you have 3 dims x,y,x
For each dimension you have to specify a range
x -> [-1, 1]
y -> [-10, 10]
z -> [-5, 5]
Your bounds array will look like -
bounds = [[-1, 1], [-10, 10], [-5, 5]]
dims = sum([np.prod(layer['weights'].shape) for layer in classifier.layers])
bounds[0][0]
'''
|
from classes.PathManager import PathManager
temp = PathManager(file_name="index.m3u8")
print(temp.__dict__)
|
#!/usr/bin/env python
import sys
import math
# The PYTHONPATH should contain the location of moose.py and _moose.so
# files. Putting ".." with the assumption that moose.py and _moose.so
# has been generated in ${MOOSE_SOURCE_DIRECTORY}/pymoose/ (as default
# pymoose build does) and this file is located in
# ${MOOSE_SOURCE_DIRECTORY}/pymoose/examples
# sys.path.append('..\..')
try:
import moose
except ImportError:
print "ERROR: Could not import moose. Please add the directory containing moose.py in your PYTHONPATH"
import sys
sys.exit(1)
from channelConstants import *
VKDR = -90.0e-3 # Volts
gransarea = 5e-9 # m^2 default surface area of soma from granule.tem
GKDR = 20*gransarea # Siemens
a0m=0.0035
vhalfm=-50e-3 # V
zetam=0.055e3 # /V
gmm=0.5 # dimensionless
q10=3.0
qt=q10**((CELSIUS-24.0)/10.0) # CELSIUS is a global constant
# (CELSIUS-24)/10 - integer division!!!! Ensure floating point division
# FOR INTEGERS: be careful ^ is bitwise xor in python - used here qt=2, but above qt=3!
def calc_KA_malp(v):
return math.exp(zetam*(v-vhalfm))
def calc_KA_mbet(v):
return math.exp(zetam*gmm*(v-vhalfm))
def calc_KA_mtau(v):
return calc_KA_mbet(v)/(qt*a0m*(1+calc_KA_malp(v))) * 1e-3 # convert to seconds
def calc_KA_minf(v):
return 1/(1 + math.exp(-(v-21e-3)/10e-3))
class KDRChannelMS(moose.HHChannel):
"""K Delayed Rectifier channel translated from Migliore and Shepherd 2007."""
def __init__(self, *args):
"""Setup the KDR channel with defaults"""
moose.HHChannel.__init__(self,*args)
self.Ek = VKDR
self.Gbar = GKDR
self.addField('ion')
self.setField('ion','K')
self.Xpower = 1 # This will create HHGate instance xGate inside the Na channel
#self.Ypower = 0 # This will create HHGate instance yGate inside the Na channel
## Below gates get created after Xpower or Ypower are set to nonzero values
## I don't anymore have to explicitly create these attributes in the class
#self.xGate = moose.HHGate(self.path + "/xGate")
#self.yGate = moose.HHGate(self.path + "/yGate")
self.xGate.A.xmin = VMIN
self.xGate.A.xmax = VMAX
self.xGate.A.xdivs = NDIVS
self.xGate.B.xmin = VMIN
self.xGate.B.xmax = VMAX
self.xGate.B.xdivs = NDIVS
v = VMIN
for i in range(NDIVS+1):
mtau = calc_KA_mtau(v)
self.xGate.A[i] = calc_KA_minf(v)/mtau
self.xGate.B[i] = 1.0/mtau
v = v + dv
|
from django.db.models.signals import post_save
from django.contrib.auth import get_user_model
from django.contrib.auth.signals import user_logged_in, user_logged_out, user_login_failed
from django.dispatch import receiver
from schools.models import AuditEntry
User = get_user_model()
# https://stackoverflow.com/a/37620866/2351696
@receiver(user_logged_in)
def user_logged_in_callback(sender, request, user, **kwargs):
ip = request.META.get('REMOTE_ADDR')
AuditEntry.objects.create(action='user_logged_in', ip=ip, username=user.username)
@receiver(user_logged_out)
def user_logged_out_callback(sender, request, user, **kwargs):
ip = request.META.get('REMOTE_ADDR')
AuditEntry.objects.create(action='user_logged_out', ip=ip, username=user.username)
@receiver(user_login_failed)
def user_login_failed_callback(sender, credentials, **kwargs):
AuditEntry.objects.create(action='user_login_failed', username=credentials.get('username', None))
|
from types import StringType, ListType
from oldowan.polymorphism import Polymorphism
def sites2str(sites):
"""Transform a list of Polymorphisms to a string.
"""
processing = []
for x in sites:
# unnested (unambiguous) sites get directly pushed to the string
if isinstance(x,Polymorphism):
processing.append(str(x))
# ambiguous sites are nested in lists
elif type(x) == ListType:
current = []
for y in x:
if type(y) == ListType:
if len(y) == 1:
current.append(str(y[0]))
elif len(y) > 1:
as_str = list(str(x) for x in y)
interior = ' '.join(as_str)
current.append('(' + interior + ')')
else:
raise Exception('format error in sites2str')
else:
raise Exception('format error in sites2str')
interior = ' or '.join(current)
processing.append('(' + interior + ')')
else:
raise Exception('format error in sites2str')
return ' '.join(processing)
|
from slovodel_bot.view import keyboard
from slovodel_bot.model import word_maker
def test_get_keyboard():
kb_dict_referense = {
"keyboard": [["Существительное", "Прилагательное", "Глагол"]],
"resize_keyboard": True,
"one_time_keyboard": False,
"selective": False,
}
assert keyboard.get_standard(word_maker.wordTypes).to_dict() == kb_dict_referense
|
#! /usr/bin/env python3
"""
Scan blockchain transactions, and find JM transactions.
This script prints JMTXs IDs, and optionally dumps them to a pickle file.
"""
import os
from argparse import ArgumentParser
from jm_unmixer.misc import pkl_append, map_with_progressbar
from jm_unmixer.btccommon import BLOCKCHAIN, Block, Tx, reconnect
from jm_unmixer.jmtx import to_joinmarket_tx, Unpairable
###############################################################################
# functions
ARG_TYPE_BLOCK_ID = 0
ARG_TYPE_BLOCK_HEIGHT = 1
ARG_TYPE_TX_ID = 2
ARG_TYPE_FILE = 3
def gen_txids_from_args(args, num_workers):
arg_types = set([ get_arg_type(arg) for arg in args ])
if len(arg_types) > 1:
raise ValueError('bad usage: ambiguous arg types (%s)' % arg_types)
arg_type, = arg_types
if arg_type == ARG_TYPE_BLOCK_ID:
for bid in args:
block = Block(BLOCKCHAIN.get_block_by_id(bid))
#print('# starting block %s' % block.height)
yield from block.txids
#print('# finished block %s' % block.height)
elif arg_type == ARG_TYPE_BLOCK_HEIGHT:
h1 = int(args[0])
if len(args) == 1:
hs = [ h1 ]
elif len(args) == 2:
h2 = int(args[1])
hs = range(h1, h2)
else:
raise ValueError('Block heights should pe passed as a start/end range')
txids = []
block_txids_gen = map_with_progressbar(get_block_txids, hs, num_workers = num_workers, preserve_order = False)
for block_txids in block_txids_gen:
txids.extend(block_txids)
yield from txids
elif arg_type == ARG_TYPE_TX_ID:
yield from args
elif arg_type == ARG_TYPE_FILE:
for fn in args:
with open(fn) as f:
for line in f:
txid = line.strip()
if txid:
yield txid
def get_block_txids(height):
block = Block(BLOCKCHAIN.get_block_by_height(height))
return block.txids
def get_arg_type(arg):
if os.path.exists(arg):
return ARG_TYPE_FILE
if len(arg) == 64:
if arg.startswith('0000000'):
return ARG_TYPE_BLOCK_ID
else:
return ARG_TYPE_TX_ID
try:
if int(arg) < 10**9:
return ARG_TYPE_BLOCK_HEIGHT
except TypeError:
pass
raise ValueError('Arg not understood: %s' % arg)
###############################################################################
pid_of_connection = os.getpid()
def process_tx(txid):
global pid_of_connection
if os.getpid() != pid_of_connection:
reconnect()
pid_of_connection = os.getpid()
tx = Tx.from_id(txid)
try:
return to_joinmarket_tx(tx)
except Unpairable:
return None
###############################################################################
# MAIN
def main():
args = getopt()
print('collecting txids...')
txids = list(gen_txids_from_args(args.args, num_workers = args.num_workers))
print('%d txs found' % (len(txids)))
print('looking for jmtxs...')
jmtx_gen = map_with_progressbar(process_tx, txids, num_workers = args.num_workers, preserve_order = False)
jmtxs = list(jmtx_gen)
for jmtx in jmtxs:
if jmtx is None:
continue
print('JMTX %s' % ( jmtx.id, ))
if args.outfile:
pkl_append(args.outfile, jmtx)
###############################################################################
def getopt():
parser = ArgumentParser()
parser.add_argument('args', nargs = '+', help = 'either: tx IDs, block IDs, a range of block heights, or filename containing tx IDs')
parser.add_argument('-w', '--num-workers', type = int, default = 8)
parser.add_argument('-o', '--outfile')
return parser.parse_args()
###############################################################################
if __name__ == '__main__':
main()
|
import tensorflow as tf
from tensorflow.keras.layers import Layer
"""
The name of this isn't exactly right, but using `selection` as a placeholder for
now. This file will contain the function for "applying" a selection
score/weights to a sequence (typically the values in standard attention)
"""
class SelectionApply(Layer):
"""Base class for specified 'selection' functions"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.apply_fn = self._apply_fn
def _apply_fn(self, weights, sequence):
raise NotImplementedError(
"Must implement a `_apply_fn(weights, sequence)` in the subclass"
)
def call(self, weights, sequence):
out = self.apply_fn(weights=weights, sequence=sequence)
return out
class MatMul(SelectionApply):
"""Standard matmul"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.apply_fn = self.matmul
def matmul(self, weights, sequence):
"""apply weights to the sequence
weights is commonly the attention weights and sequence is commonly the
values
"""
out = tf.matmul(weights, sequence)
return out
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.