blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c6d6c965204756070013e9e873147cede25d1b53 | 70fd9545d8f273db2126ac8bb0715c90838fe98b | /polls/serializers.py | 42ab6011a5fffa46a4744713a8cd70ab5d82ac49 | [] | no_license | Gohstreck/Backend | 6fd99e0c304054d6b60e44111b56b3453c78109b | 6052e6ffecf3c297d1256f51a695d55b89b3b883 | refs/heads/master | 2020-04-04T12:52:39.966936 | 2018-11-12T04:13:20 | 2018-11-12T04:13:20 | 155,940,576 | 0 | 0 | null | 2018-11-03T01:58:44 | 2018-11-03T01:58:44 | null | UTF-8 | Python | false | false | 2,459 | py | from rest_framework import serializers
from . import models
class PersonSerializer(serializers.HyperlinkedModelSerializer):
groups = serializers.HyperlinkedRelatedField(
many = True,
read_only = True,
view_name = 'group-detail'
)
articles = serializers.HyperlinkedRelatedField(
many = True,
read_only = True,
view_name = 'article-detail'
)
class Meta:
model = models.Person
fields = ('id_person', 'name', 'birthdate', 'mail', 'phone_number', 'articles', 'groups')
class InstitutionSerializer(serializers.HyperlinkedModelSerializer):
branches = serializers.HyperlinkedRelatedField(
many = True,
read_only = True,
view_name = 'branch-detail'
)
class Meta:
model = models.Institution
fields = ('id_institution', 'name', 'branches')
class GroupSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = models.Group
fields = ('id_group', 'name', 'members', 'leader')
class BranchSerializer(serializers.HyperlinkedModelSerializer):
departments = serializers.HyperlinkedRelatedField(
many = True,
read_only = True,
view_name = 'department-detail'
)
class Meta:
model = models.Branch
fields = ('id_branch', 'institution', 'name', 'departments')
class ArticleSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = models.Article
fields = ('id_article', 'title', 'authors')
class DepartmentSerializer(serializers.HyperlinkedModelSerializer):
researchers = serializers.HyperlinkedRelatedField(
many = True,
read_only = True,
view_name = 'researcher-detail'
)
class Meta:
model = models.Department
fields = ('id_department', 'name', 'phone_number', 'adress', 'branch', 'researchers')
class ResearcherSerializer(serializers.HyperlinkedModelSerializer):
students = serializers.HyperlinkedRelatedField(
many = True,
read_only = True,
view_name = 'student-detail'
)
leader = serializers.HyperlinkedRelatedField(
many = True,
read_only = True,
view_name = 'researcher-detail'
)
class Meta:
model = models.Researcher
fields = ('id_researcher', 'person', 'department', 'students', 'leader')
class StudentSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = models.Student
fields = ('id_student', 'person', 'supervisor')
| [
"equiroz@ciencias.unam.mx"
] | equiroz@ciencias.unam.mx |
3a79af3088336b15e677b0d3a821806575a98c86 | 2a466b71eb9f24c58ca6ba52a379870485d19140 | /src/cert_scanner/report/progress_graph_generator.py | ff14fd2a750106e8b5427dfc8ef64f1e2832767a | [
"MIT"
] | permissive | kgarwood/digital_certificate_scanner | 5df0fb47193f838353a0cadb696d49b8acbf8351 | ede1344144fa62349c0076d5618eccfc4131f98f | refs/heads/master | 2020-03-30T07:42:53.860968 | 2018-10-02T16:12:47 | 2018-10-02T16:12:47 | 150,961,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,102 | py | import cert_scanner.util.file_name_utility as file_name_utility
import cert_scanner.util.certificate_scanner_utility as \
certificate_scanner_utility
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import MultipleLocator
import os
def generate(original_df,
output_directory,
expiry_type,
expiry_period_field_name,
expiry_period_phrase,
start_date,
end_date,
xtick_rotation_angle,
show_every_ith_x_label):
title_date_phrase = \
"from {} to {}\n({} {} to Week {})".format(
start_date.strftime("%d %b %Y"),
end_date.strftime("%d %b %Y"),
expiry_period_phrase,
start_date.strftime("%W %Y"),
expiry_period_phrase,
end_date.strftime("%W %Y"))
# print("generate 11111111111111111111111")
# print(original_df.columns.values)
# print("generate 22222222222222222222222")
__generate_num_certs_graph(original_df,
output_directory,
expiry_type,
expiry_period_field_name,
expiry_period_phrase,
start_date,
end_date,
xtick_rotation_angle,
show_every_ith_x_label)
__generate_num_releases_graph(original_df,
output_directory,
expiry_type,
expiry_period_field_name,
expiry_period_phrase,
start_date,
end_date,
xtick_rotation_angle,
show_every_ith_x_label)
__generate_num_locations_graph(original_df,
output_directory,
expiry_type,
expiry_period_field_name,
expiry_period_phrase,
start_date,
end_date,
xtick_rotation_angle,
show_every_ith_x_label)
def __generate_num_locations_graph(original_df,
output_directory,
expiry_type,
expiry_period_field_name,
expiry_period_phrase,
start_date,
end_date,
xtick_rotation_angle,
show_every_ith_x_label):
title_date_phrase = \
__generate_title_phrase(start_date, end_date, expiry_period_phrase)
num_locations_title = \
"Number of Files Containing Expiring Certs\n{}".format(
title_date_phrase)
period_to_period_phrase = \
__generate_period_to_period_phrase(expiry_type,
start_date,
end_date,
expiry_period_phrase)
num_locations_title = \
"Expiring Cert Files {}".format(period_to_period_phrase)
date_to_date_phrase = \
certificate_scanner_utility.generate_date_range_phrase(start_date,
end_date)
ax = original_df.plot.bar(x=expiry_period_field_name,
y='total_locations', width=1.0, rot=0)
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
ax.tick_params(labelsize=6)
legend = ax.legend()
legend.remove()
plt.xlabel('xlabel', fontsize=10)
plt.ylabel('ylabel', fontsize=10)
plt.suptitle(num_locations_title, fontsize=14)
plt.title(date_to_date_phrase, fontsize=10)
ith_label = 0
for label in ax.xaxis.get_ticklabels():
label.set_visible(False)
for label in ax.xaxis.get_ticklabels()[::show_every_ith_x_label]:
label.set_visible(True)
plt.setp(ax.get_xticklabels(),
rotation=xtick_rotation_angle,
horizontalalignment='center')
plt.xlabel('Expiry {}'.format(expiry_period_phrase))
plt.ylabel('Total Files')
base_file_name = "total_{}_locations".format(expiry_type)
file_name = \
file_name_utility.get_time_range_file_name(base_file_name,
None,
start_date,
end_date,
"png")
output_file_path = os.path.join(output_directory, file_name)
plt.savefig(output_file_path)
def __generate_num_certs_graph(original_df,
output_directory,
expiry_type,
expiry_period_field_name,
expiry_period_phrase,
start_date,
end_date,
xtick_rotation_angle,
show_every_ith_x_label):
period_to_period_phrase = \
__generate_period_to_period_phrase(expiry_type,
start_date,
end_date,
expiry_period_phrase)
num_certs_title = \
"Expiring Cert Records {}".format(period_to_period_phrase)
date_to_date_phrase = \
certificate_scanner_utility.generate_date_range_phrase(start_date,
end_date)
ax = original_df.plot.bar(x=expiry_period_field_name,
y='total_expiring_certs', width=1.0, rot=0)
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
legend = ax.legend()
legend.remove()
ax.tick_params(labelsize=6)
plt.xlabel('xlabel', fontsize=12)
plt.ylabel('ylabel', fontsize=12)
plt.suptitle(num_certs_title, fontsize=14)
plt.title(date_to_date_phrase, fontsize=10)
ith_label = 0
for label in ax.xaxis.get_ticklabels():
label.set_visible(False)
for label in ax.xaxis.get_ticklabels()[::show_every_ith_x_label]:
label.set_visible(True)
plt.setp(ax.get_xticklabels(),
rotation=xtick_rotation_angle,
horizontalalignment='center')
plt.xlabel('Expiry {}'.format(expiry_period_phrase))
plt.ylabel('Total Certificates')
base_file_name = "total_{}_certs".format(expiry_type)
file_name = \
file_name_utility.get_time_range_file_name(base_file_name,
None,
start_date,
end_date,
"png")
output_file_path = os.path.join(output_directory, file_name)
plt.savefig(output_file_path)
def __generate_num_releases_graph(original_df,
output_directory,
expiry_type,
expiry_period_field_name,
expiry_period_phrase,
start_date,
end_date,
xtick_rotation_angle,
show_every_ith_x_label):
period_to_period_phrase = \
__generate_period_to_period_phrase(expiry_type,
start_date,
end_date,
expiry_period_phrase)
num_releases_title = \
"Expiring Cert Releases {}".format(period_to_period_phrase)
date_to_date_phrase = \
certificate_scanner_utility.generate_date_range_phrase(start_date,
end_date)
ax = \
original_df.plot.bar(x=expiry_period_field_name,
y='total_releases',
width=1.0,
rot=0)
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
ax.tick_params(labelsize=6)
legend = ax.legend()
legend.remove()
plt.xlabel('xlabel', fontsize=12)
plt.ylabel('ylabel', fontsize=12)
plt.suptitle(num_releases_title, fontsize=14)
plt.title(date_to_date_phrase, fontsize=10)
ith_label = 0
for label in ax.xaxis.get_ticklabels():
label.set_visible(False)
for label in ax.xaxis.get_ticklabels()[::show_every_ith_x_label]:
label.set_visible(True)
plt.setp(ax.get_xticklabels(),
rotation=xtick_rotation_angle,
horizontalalignment='center')
plt.xlabel('Expiry {}'.format(expiry_period_phrase))
plt.ylabel('Total Releases')
base_file_name = "total_{}_releases".format(expiry_type)
file_name = \
file_name_utility.get_time_range_file_name(base_file_name,
None,
start_date,
end_date,
"png")
output_file_path = os.path.join(output_directory, file_name)
plt.savefig(output_file_path)
def __generate_title_phrase(start_date,
end_date,
expiry_period_phrase):
title_date_phrase = \
"from {} to {}\n({} {} to Week {})".format(
start_date.strftime("%d %b %Y"),
end_date.strftime("%d %b %Y"),
expiry_period_phrase,
start_date.strftime("%W %Y"),
expiry_period_phrase,
end_date.strftime("%W %Y"))
return title_date_phrase
def __generate_period_to_period_phrase(expiry_type,
start_date,
end_date,
expiry_period_phrase):
if expiry_type == 'monthly':
return "({} to {})".format(
start_date.strftime("%b %Y"),
end_date.strftime("%b %Y"))
else:
return "(Week {} to Week {})".format(
start_date.strftime("%W %Y"),
end_date.strftime("%W %Y"))
def __generate_date_range_phrase(period_phrase, start_date, end_date):
return "from {} to {}\n({} {} to {} {})".format(
start_date.strftime("%d %b %Y"),
end_date.strftime("%d %b %Y"),
period_phrase,
start_date.strftime("%W %Y"),
period_phrase,
end_date.strftime("%W %Y"))
| [
"kevin.garwood@digital.cabinet-office.gov.uk"
] | kevin.garwood@digital.cabinet-office.gov.uk |
2917ef2371964c06a2bedd3acf1b8cd301c7a4b7 | 216fe95ca1d92c6071155cf59c36789edcb27123 | /languageBot/messengerBot/urls.py | f6e9f82f5d5d8ff77f08b523aed294744dc7ad97 | [
"MIT"
] | permissive | singhvisha/LanguageBot | 16b5b153e55b432bfa108a7add5a087b38af7722 | 9cef316bceb2f6951863af2fa869398fb5242519 | refs/heads/master | 2023-05-31T11:26:41.773724 | 2020-07-12T17:49:15 | 2020-07-12T17:49:15 | 279,117,273 | 0 | 0 | MIT | 2021-06-10T23:09:31 | 2020-07-12T17:46:28 | Python | UTF-8 | Python | false | false | 192 | py | from django.conf.urls import include, url
from .views import messengerBotView
urlpatterns = [
url(r'^21975e0a3c7ab17aa37124158bbda569af363d15eacb576e06/?$', messengerBotView.as_view()),
] | [
"vishalsingh600700@gmail.com"
] | vishalsingh600700@gmail.com |
2ca4b0788bfc54be10e712eabe0884ac23c7bf0a | ba9d6e33133709eb8ef9c643e50646596f8ab98b | /homeworks/hole_detection.py | 79f5583093614b7b290b86aac89e248e3a1b6e74 | [] | no_license | otniel/computer-vision | 2eb5588d7662ada0999001083e5562e3c3e69fd1 | 82430fd60c21d3f6c6609b429b051b25526b0102 | refs/heads/master | 2021-01-25T07:07:51.592712 | 2015-05-18T17:29:10 | 2015-05-18T17:29:10 | 29,542,857 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,639 | py | import Image
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import argrelextrema
from utils.tools import normalize_rgb_image, normalize_grayscale_image
from utils.detect_peaks import detect_peaks
class HoleDetection:
def __init__(self, image):
self.image = normalize_rgb_image(image)
self.pixels = self.image.load()
self.width, self.height = self.image.size
def smooth_list(self, the_list, width):
smoothed_list = []
for index, value in enumerate(the_list):
window = the_list[max(0, index-1):min(index+width, len(the_list))]
new_value = int(sum(window) / len(window))
smoothed_list.append(new_value)
return smoothed_list
def detect_holes(self):
horizontal_histogram = self.get_horizontal_histogram()
vertical_histogram = self.get_vertical_histogram()
smoothed_horizontal = self.smooth_list(horizontal_histogram, 5)
smoothed_horizontal = self.smooth_list(smoothed_horizontal, 5)
smoothed_horizontal = self.smooth_list(smoothed_horizontal, 5)
smoothed_vertical = self.smooth_list(vertical_histogram, 5)
smoothed_vertical = self.smooth_list(smoothed_vertical, 5)
smoothed_vertical = self.smooth_list(smoothed_vertical, 5)
horizontal_candidates = (np.gradient(np.sign(np.gradient(np.array(smoothed_horizontal)))) > 0).nonzero()[0]
vertical_candidates = (np.gradient(np.sign(np.gradient(np.array(smoothed_vertical)))) > 0).nonzero()[0]
# Drawing candidates
for y in xrange(self.height):
for x in horizontal_candidates:
self.pixels[x, y] = (255, 0, 0)
for x in xrange(self.width):
for y in vertical_candidates:
self.pixels[x, y] = (0, 0, 255)
self.image.save('../test-images/holes_intersection.png')
def get_horizontal_histogram(self):
horizontal_histogram = []
for x in range(self.width):
total_row = 0
for y in range(self.height):
total_row += self.pixels[x, y][0]
horizontal_histogram.append(total_row / self.height)
return horizontal_histogram
def get_vertical_histogram(self):
vertical_histogram = []
for y in range(self.height):
total_column = 0
for x in range(self.width):
total_column += self.pixels[x, y][0]
vertical_histogram.append(total_column / self.width)
return vertical_histogram
image = Image.open('../test-images/holes.png')
hd = HoleDetection(image)
hd.detect_holes() | [
"otnieel.aguilar@gmail.com"
] | otnieel.aguilar@gmail.com |
019e554986b56d005e4f2668da030024fbc2998a | 3bfe2238f9337a03780961f756c3317320dbdf7d | /td3.py | 9e349dc284a3a807947fa1644f1cf5e39fd22087 | [] | no_license | herrbilbo/neural-ode-rl | 6cd30f0c9d360a3e131e9a52ff837fb9c1e3f3ab | f30bfa62f93bae527aae981179ef46c50fa64145 | refs/heads/master | 2022-11-08T07:46:16.271687 | 2020-06-24T22:46:15 | 2020-06-24T22:46:15 | 274,783,544 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,262 | py | import gym
import random
import numpy as np
from collections import deque
import torch
import torch.nn.functional as F
import torch.optim as optim
import torch.nn as nn
import pybullet_envs
from torchdiffeq import odeint
from torch.utils.tensorboard import SummaryWriter
import multiprocessing.dummy as multiprocessing
class ConcatLinear(nn.Module):
def __init__(self, dim_in, dim_out):
super(ConcatLinear, self).__init__()
self._layer = nn.Linear(dim_in + 1, dim_out)
nn.init.xavier_normal_(self._layer.weight)
self._layer.bias.data.fill_(0.01)
def forward(self, t, x):
tt = torch.ones_like(x[:, :1]) * t
ttx = torch.cat([tt, x], 1)
return self._layer(ttx)
class MLP_ODE(nn.Module):
def __init__(self, layer_size, INTEGRATION_RIGHT_LIMIT):
super(MLP_ODE, self).__init__()
self.network = ConcatLinear(layer_size, layer_size)
self.integration_time = torch.tensor([0, INTEGRATION_RIGHT_LIMIT]).float()
def forward(self, state):
self.integration_time = self.integration_time.type_as(state)
out = odeint(self.network, state, self.integration_time, method='euler')
return out[1]
class actor(nn.Module):
def __init__(self, state_size, action_size, layer_size, INTEGRATION_RIGHT_LIMIT):
super(actor, self).__init__()
self.fc1 = nn.Linear(state_size, layer_size)
if INTEGRATION_RIGHT_LIMIT == -1.0:
self.fc2 = nn.Linear(layer_size, layer_size)
nn.init.xavier_normal_(self.fc2.weight)
self.fc2.bias.data.fill_(0.01)
else:
self.fc2 = MLP_ODE(layer_size, INTEGRATION_RIGHT_LIMIT)
self.fc3 = nn.Linear(layer_size, action_size)
nn.init.xavier_normal_(self.fc1.weight)
nn.init.xavier_normal_(self.fc3.weight)
self.fc1.bias.data.fill_(0.01)
self.fc3.bias.data.fill_(0.01)
def forward(self, state):
res = F.relu(self.fc1(state))
res = F.relu(self.fc2(res))
res = torch.tanh(self.fc3(res))
return res
class critic(nn.Module):
def __init__(self, state_size, action_size, layer_size, INTEGRATION_RIGHT_LIMIT):
super(critic, self).__init__()
self.fc1 = nn.Linear(state_size + action_size, layer_size)
if INTEGRATION_RIGHT_LIMIT == -1.0:
self.fc2 = nn.Linear(layer_size, layer_size)
nn.init.xavier_normal_(self.fc2.weight)
self.fc2.bias.data.fill_(0.01)
else:
self.fc2 = MLP_ODE(layer_size, INTEGRATION_RIGHT_LIMIT)
self.fc3 = nn.Linear(layer_size, 1)
self.fc4 = nn.Linear(state_size + action_size, layer_size)
if INTEGRATION_RIGHT_LIMIT == -1.0:
self.fc5 = nn.Linear(layer_size, layer_size)
nn.init.xavier_normal_(self.fc5.weight)
self.fc5.bias.data.fill_(0.01)
else:
self.fc5 = MLP_ODE(layer_size, INTEGRATION_RIGHT_LIMIT)
self.fc6 = nn.Linear(layer_size, 1)
nn.init.xavier_normal_(self.fc1.weight)
nn.init.xavier_normal_(self.fc3.weight)
nn.init.xavier_normal_(self.fc4.weight)
nn.init.xavier_normal_(self.fc6.weight)
self.fc1.bias.data.fill_(0.01)
self.fc3.bias.data.fill_(0.01)
self.fc4.bias.data.fill_(0.01)
self.fc6.bias.data.fill_(0.01)
def critic_1(self, state, action):
res = torch.cat((state, action), dim=1)
res = F.relu(self.fc1(res))
res = F.relu(self.fc2(res))
res = self.fc3(res)
return res
def critic_2(self, state, action):
res = torch.cat((state, action), dim=1)
res = F.relu(self.fc4(res))
res = F.relu(self.fc5(res))
res = self.fc6(res)
return res
def forward(self, state, action):
return (self.critic_1(state, action), self.critic_2(state, action))
class replay_buffer:
def __init__(self, max_size, batch_size):
self.max_size = max_size
self.batch_size = batch_size
self.buffer = deque(maxlen=max_size)
def push(self, transition):
self.buffer.append(transition)
def sample(self):
return list(zip(*random.sample(self.buffer, self.batch_size)))
def __len__(self):
return len(self.buffer)
class td3():
def __init__(self, environment_name, state_dim, action_dim, buffer_size, batch_size, gamma, tau, actor_lr, critic_lr, std, std_min, std_decay, c, update_every, sigma, layer_size, INTEGRATION_RIGHT_LIMIT, device):
self.environment_name = environment_name
self.device = device
self.gamma = gamma
self.tau = tau
self.actor_lr = actor_lr
self.critic_lr = critic_lr
self.std = std
self.std_min = std_min
self.std_decay = std_decay
self.c = c
self.update_every = update_every
self.sigma = sigma
self.cur_time = 0
self.actor = actor(state_dim, action_dim, layer_size=layer_size, INTEGRATION_RIGHT_LIMIT=INTEGRATION_RIGHT_LIMIT).to(self.device)
self.critic = critic(state_dim, action_dim, layer_size=layer_size, INTEGRATION_RIGHT_LIMIT=INTEGRATION_RIGHT_LIMIT).to(self.device)
self.actor_target = actor(state_dim, action_dim, layer_size=layer_size, INTEGRATION_RIGHT_LIMIT=INTEGRATION_RIGHT_LIMIT).to(self.device)
self.critic_target = critic(state_dim, action_dim, layer_size=layer_size, INTEGRATION_RIGHT_LIMIT=INTEGRATION_RIGHT_LIMIT).to(self.device)
self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=self.actor_lr)
self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=self.critic_lr)
self.hard_update()
self.replay_buffer = replay_buffer(buffer_size, batch_size)
def update(self, transition):
self.replay_buffer.push(transition)
if len(self.replay_buffer) >= self.replay_buffer.batch_size:
self.cur_time += 1
batch = self.replay_buffer.sample()
states, actions, rewards, next_states, dones = batch
states = torch.tensor(states).to(self.device).float()
next_states = torch.tensor(next_states).to(self.device).float()
rewards = torch.tensor(rewards).to(self.device).float()
actions = torch.tensor(actions).to(self.device).float()
dones = torch.tensor(dones).to(self.device).int()
with torch.no_grad():
next_actions = self.actor_target(next_states)
noise = ((torch.randn_like(actions) * self.sigma).clamp(-self.c, self.c)).to(self.device)
next_actions = (next_actions + noise).clamp(-1, 1).float()
Q_target1, Q_target2 = self.critic_target(next_states, next_actions)
Q_target = rewards.unsqueeze(1) + (self.gamma * torch.min(Q_target1, Q_target2) * ((1 - dones).unsqueeze(1)))
critic_1, critic_2 = self.critic(states, actions)
#critic_loss = (critic_1 - Q_target) ** 2 + (critic_2 - Q_target) ** 2
critic_loss = F.mse_loss(critic_1, Q_target) + F.mse_loss(critic_2, Q_target)
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
if self.cur_time % self.update_every == 0:
actor_loss = -self.critic.critic_1(states, self.actor(states)).mean()
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
self.soft_update()
def act(self, state, noise=False):
state = torch.from_numpy(state).float().unsqueeze(0).to(self.device)
with torch.no_grad():
action = self.actor(state).cpu().data.numpy()
if noise:
noise = np.random.normal(loc=0.0, scale=self.std, size=action.shape)
action = action + noise
action = np.clip(action, -1.0, 1.0)
self.std = max(self.std - self.std_decay, self.std_min)
return action[0]
def hard_update(self):
for target_param, param in zip(self.actor_target.parameters(), self.actor.parameters()):
target_param.data.copy_(param.data)
for target_param, param in zip(self.critic_target.parameters(), self.critic.parameters()):
target_param.data.copy_(param.data)
def soft_update(self):
for target_param, param in zip(self.actor_target.parameters(), self.actor.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
for target_param, param in zip(self.critic_target.parameters(), self.critic.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
def save(self, path='gg'):
torch.save(self.actor.state_dict(), path + '_actor.pkl')
torch.save(self.critic.state_dict(), path + '_critic.pkl')
def check_model(self, episodes=100):
history = []
local_env = gym.make(self.environment_name)
for _ in range(episodes):
state = local_env.reset()
done = False
total = 0
while not done:
action = self.act(state, noise=False)
next_state, reward, done, _ = local_env.step(action)
state = next_state
total += reward
history.append(total)
history = np.array(history)
return history
def train_loop(args):
id, time_const, seed, device_name = args
INTEGRATION_RIGHT_LIMIT = time_const
environment_name = 'Walker2DBulletEnv-v0'
env = gym.make(environment_name)
#seed = 228
env.seed(seed)
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
episodes = 3000
layer_size = 128
state_dim = 22
action_dim = 6
buffer_size = 50000
batch_size = 128
gamma = 0.99
actor_lr = 1e-4
critic_lr = 1e-4
tau = 0.05
check_episodes = 100
threshold = 250
std = 0.3
std_min = 0.05
std_decay = (std - std_min) / 500.0
c = 0.5
update_every = 2
sigma = 0.2
device = torch.device(device_name)
agent = td3(environment_name,
state_dim,
action_dim,
buffer_size,
batch_size,
gamma,
tau,
actor_lr,
critic_lr,
std,
std_min,
std_decay,
c,
update_every,
sigma,
layer_size,
INTEGRATION_RIGHT_LIMIT,
device)
history = deque(maxlen=25)
for episode in range(episodes):
state = env.reset()
score = 0
done = False
while not done:
if episode < 25:
action = env.action_space.sample()
else:
action = agent.act(state, noise=True)
next_state, reward, done, _ = env.step(action)
transition = state, action, reward, next_state, done
agent.update(transition)
state = next_state
score += reward
history.append(score)
if episode % 25 == 0:
agent.save(path=f'id_{id}_agent_{episode}')
if episode % 25 == 0:
local_history = agent.check_model(episodes=check_episodes)
local_mean = np.mean(local_history)
local_var = np.sqrt(np.var(local_history))
writer.add_scalar(f'id_{id}_mean', local_mean, episode)
writer.add_scalar(f'id_{id}_var', local_var, episode)
writer.flush()
#if local_mean >= threshold:
# agent.save(path=f'id_{id}_agent_{episode}')
if __name__ == "__main__":
writer = SummaryWriter("output")
print('Begin!')
# 42, 131, 455, 16
int_time_list = [(1, 0.1, 42, 'cuda:0'),
(2, 0.3, 42, 'cuda:1'),
(3, 0.1, 131, 'cuda:0'),
(4, 0.3, 131, 'cuda:1'),
(5, 0.1, 455, 'cuda:0'),
(6, 0.3, 455, 'cuda:1'),
(7, 0.1, 16, 'cuda:0'),
(8, 0.3, 16, 'cuda:1')]
#p = multiprocessing.Pool()
p = multiprocessing.Pool(processes=22)
p.map(train_loop, int_time_list)
p.close()
p.join()
#train_loop((1, -1.0, 'cuda:0'))
writer.close()
print('Done!')
| [
"noreply@github.com"
] | noreply@github.com |
817f463578dec04dd4c4938523da72404795d281 | f5b31994dfbe4effa7868557f223952d1a2dc0f6 | /src/mic.py | e648fca6167cfaad255f044ac0b3bb511beb4322 | [] | no_license | jacobsny/hackathon_checkin | 96f39a4f4b55599e214617c439bbc63159a0fcf6 | ff588c8511f4cf496bdf53dee1e534977a6e5a8a | refs/heads/master | 2020-07-29T21:06:08.345970 | 2019-09-22T13:14:05 | 2019-09-22T13:14:05 | 209,959,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,423 | py | from __future__ import division
import re
import sys
from google.cloud import speech
from google.cloud.speech import enums
from google.cloud.speech import types
import pyaudio
from six.moves import queue
# Audio recording parameters
RATE = 16000
CHUNK = int(RATE / 10) # 100ms
class MicrophoneStream(object):
"""Opens a recording stream as a generator yielding the audio chunks."""
def __init__(self, rate, chunk):
self._rate = rate
self._chunk = chunk
# Create a thread-safe buffer of audio data
self._buff = queue.Queue()
self.closed = True
def __enter__(self):
self._audio_interface = pyaudio.PyAudio()
self._audio_stream = self._audio_interface.open(
format=pyaudio.paInt16,
# The API currently only supports 1-channel (mono) audio
# https://goo.gl/z757pE
channels=1, rate=self._rate,
input=True, frames_per_buffer=self._chunk,
# Run the audio stream asynchronously to fill the buffer object.
# This is necessary so that the input device's buffer doesn't
# overflow while the calling thread makes network requests, etc.
stream_callback=self._fill_buffer,
)
self.closed = False
return self
def __exit__(self, type, value, traceback):
self._audio_stream.stop_stream()
self._audio_stream.close()
self.closed = True
# Signal the generator to terminate so that the client's
# streaming_recognize method will not block the process termination.
self._buff.put(None)
self._audio_interface.terminate()
def _fill_buffer(self, in_data, frame_count, time_info, status_flags):
"""Continuously collect data from the audio stream, into the buffer."""
self._buff.put(in_data)
return None, pyaudio.paContinue
def generator(self):
while not self.closed:
# Use a blocking get() to ensure there's at least one chunk of
# data, and stop iteration if the chunk is None, indicating the
# end of the audio stream.
chunk = self._buff.get()
if chunk is None:
return
data = [chunk]
# Now consume whatever other data's still buffered.
while True:
try:
chunk = self._buff.get(block=False)
if chunk is None:
return
data.append(chunk)
except queue.Empty:
break
yield b''.join(data)
def listen_print_loop(responses):
"""Iterates through server responses and prints them.
The responses passed is a generator that will block until a response
is provided by the server.
Each response may contain multiple results, and each result may contain
multiple alternatives; for details, see https://goo.gl/tjCPAU. Here we
print only the transcription for the top alternative of the top result.
In this case, responses are provided for interim results as well. If the
response is an interim one, print a line feed at the end of it, to allow
the next result to overwrite it, until the response is a final one. For the
final one, print a newline to preserve the finalized transcription.
"""
num_chars_printed = 0
for response in responses:
if not response.results:
continue
# The `results` list is consecutive. For streaming, we only care about
# the first result being considered, since once it's `is_final`, it
# moves on to considering the next utterance.
result = response.results[0]
if not result.alternatives:
continue
# Display the transcription of the top alternative.
transcript = result.alternatives[0].transcript
# Display interim results, but with a carriage return at the end of the
# line, so subsequent lines will overwrite them.
#
# If the previous result was longer than this one, we need to print
# some extra spaces to overwrite the previous result
overwrite_chars = ' ' * (num_chars_printed - len(transcript))
if not result.is_final:
sys.stdout.write(transcript + overwrite_chars + '\r')
sys.stdout.flush()
num_chars_printed = len(transcript)
else:
return transcript + overwrite_chars
def main(lang):
# See http://g.co/cloud/speech/docs/languages
# for a list of supported languages.
language_code = lang # a BCP-47 language tag
client = speech.SpeechClient()
config = types.RecognitionConfig(
encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=RATE,
language_code=language_code)
streaming_config = types.StreamingRecognitionConfig(
config=config,
interim_results=True)
with MicrophoneStream(RATE, CHUNK) as stream:
audio_generator = stream.generator()
requests = (types.StreamingRecognizeRequest(audio_content=content)
for content in audio_generator)
responses = client.streaming_recognize(streaming_config, requests)
# Now, put the transcription responses to use.
return listen_print_loop(responses)
if __name__ == '__main__':
main('en-US') | [
"jacob.snyderman@gmail.com"
] | jacob.snyderman@gmail.com |
f024098d382063eb23c1aa5ac661a60dd1e56307 | 0556e11758ec9a632d4b1406be1597c6900aa93d | /tutorials/tutorial05/05_SPDE_on_fenics_solver.py | 99cad0868dac1a4b573f149230b8cc61853a6a92 | [
"MIT"
] | permissive | mtezzele/ATHENA | 85b5f1dd44180314ab9548a7cac054ca8a77b9df | d8f48680e035a4d51d51c1883932b46fd0bd8da8 | refs/heads/master | 2023-05-30T10:53:27.722329 | 2023-04-27T22:05:40 | 2023-04-27T22:05:40 | 226,117,142 | 0 | 0 | null | 2019-12-05T14:10:57 | 2019-12-05T14:10:56 | null | UTF-8 | Python | false | false | 5,930 | py | from dolfin import *
import matplotlib.pyplot as plt
import numpy as np
import sys
import os
from pathlib import Path
import warnings
warnings.filterwarnings('ignore')
def compute_mesh_map(mesh, dim):
m_map = np.zeros((dim, 2))
for j, cell in enumerate(cells(mesh)):
m_map[j, :] = cell.midpoint().array()[:2]
# print(m_map.shape)
return m_map
def compute_cov(mesh, beta, dim, mesh_map):
print("start covariance assemble")
cov = np.zeros((dim, dim))
for i in range(dim):
for j in range(i, dim):
cov[j, i] = cov[i, j] = np.exp(
-(np.linalg.norm(mesh_map[i, :] - mesh_map[j, :], 1)) / (beta))
print("end covariance assemble")
evals, evecs = np.linalg.eig(cov)
E = (evals[:m] * evecs[:, :m]).T
return cov, E
def set_conductivity(sim_index, mesh, c):
# print("set conductivity")
D = FunctionSpace(mesh, "DG", 0)
kappa = Function(D)
dm = D.dofmap()
for i, cell in enumerate(cells(mesh)):
kappa.vector()[dm.cell_dofs(cell.index())] = np.exp(c[sim_index, i])
return kappa
def boundary(x):
return x[1] < DOLFIN_EPS or x[1] > 1.0 - DOLFIN_EPS
def boundary0(x):
return x[0] < DOLFIN_EPS
def compute_solution(sim_index, mesh, kappa, pl=False):
# print("compute solution")
V = FunctionSpace(mesh, "Lagrange", 1)
u0 = Expression("10*x[1]*(1-x[1])", degree=0)
bc = DirichletBC(V, Constant(0.0), boundary)
bc0 = DirichletBC(V, u0, boundary0)
u = TrialFunction(V)
v = TestFunction(V)
f = Constant(
1.0
) #Expression("exp( - 2*pow(x[0]-0.5, 2) - 2*pow(x[1]-0.5, 2) )", element=V.ufl_element())
a = kappa * inner(grad(u), grad(v)) * dx
L = f * v * dx
u = Function(V)
solve(a == L, u, [bc, bc0])
if pl:
u_pl = plot(u, title='u')
plt.colorbar(u_pl)
plt.show()
return u
def restrict(mesh, v):
# print("restrict on outflow right side")
Right = AutoSubDomain(lambda x, on_bnd: near(x[0], 1) and on_bnd)
V = FunctionSpace(mesh, 'CG', 1)
bc0 = DirichletBC(V, 1, Right)
u = Function(V)
bc0.apply(u.vector())
v_restriction = v.vector()[u.vector() == 1]
return v_restriction.mean()
def compute_gradients(component_index,
mesh,
kappa,
E,
boundary,
cache,
solution,
pl=False):
# print("compute gradient")
V = FunctionSpace(mesh, "Lagrange", 1)
bc = DirichletBC(V, Constant(0.0), boundary)
w = TrialFunction(V)
v = TestFunction(V)
a = kappa * inner(grad(w), grad(v)) * dx
D = FunctionSpace(mesh, "DG", 0)
dkappa = Function(D)
dm = D.dofmap()
for i, cell in enumerate(cells(mesh)):
dkappa.vector()[dm.cell_dofs(
cell.index())] = kappa.vector()[dm.cell_dofs(
cell.index())] * E[component_index, i]
rhs = dkappa * inner(grad(solution), grad(v)) * dx
w = Function(V)
solve(a == rhs, w, bc)
if pl:
w_pl = plot(w, title='w')
plt.colorbar(w_pl)
plt.show()
return w
def show_mode(mode, mesh):
c = MeshFunction("double", mesh, 2)
# value = mode.dot(E)
# Iterate over mesh and set values
for i, cell in enumerate(cells(mesh)):
c[cell] = mode[i] #np.exp(value[i])
plot(c)
plt.show()
# Read mesh from file and create function space
mesh = Mesh("data/mesh.xml")
#dim = 6668 #mesh_2
dim = 3194
m = 10
M = 500
d = 1668
cache = np.zeros((d, m))
cache_res = np.zeros(m)
#choose lengthscale
beta = 0.015 #beta=0.03
inputs = np.random.multivariate_normal(np.zeros(m), np.eye(m), M)
#samples
np.save("data/inputs", inputs)
#covariance modes assemble
m_map = compute_mesh_map(mesh, dim)
cov, E = compute_cov(mesh, beta, dim, m_map)
c = inputs.dot(E)
np.save("data/covariance", cov)
np.save("data/cov_modes", E)
print("Karhunen-Loève mode shape", E.shape)
n = 2
print("Mode number {} of Karhunen-Loève decomposition".format(n))
show_mode(E[n, :], mesh)
# cov = np.load("data/covariance.npy", allow_pickle=True)
# E = np.load("data/cov_modes.npy", allow_pickle=True)
V = FunctionSpace(mesh, "Lagrange", 1)
dofs = V.dofmap().dofs()
# Get coordinates as len(dofs) x gdim array
dim = V.dim()
N = mesh.geometry().dim()
dofs_x = V.tabulate_dof_coordinates()
n_dof = 300
print("Coordinates of degree of freedom number {0} are {1}".format(
n_dof, dofs_x[n_dof]))
mesh = Mesh("data/mesh.xml")
V = FunctionSpace(mesh, "Lagrange", 1)
u = Function(V)
print(np.array(u.vector()[:]).shape)
for j in range(16):
for i in range(1668):
if i == (j + 1) * 100:
u.vector()[i] = 1
else:
u.vector()[i] = 0
plot(u, title='dof {}'.format((1 + j) * 100))
plt.savefig('data/component_{}.png'.format((1 + j) * 100))
for it in range(M):
print("Solution number :", it)
#set conductivity
kappa = set_conductivity(it, mesh, c)
#plot(kappa)
#plt.show()
#compute solution
u = compute_solution(it, mesh, kappa, pl=False) #pl=True to plot
u_res = restrict(mesh, u)
#print("mean of the solution restricted on the outflow (right side)", u_res)
#compute gradients
for j in range(m):
#print("Evaluating gradient component number :", j)
du = compute_gradients(j, mesh, kappa, E, boundary, cache, u)
du_res = restrict(mesh, du)
cache[:, j] = du.vector()[:]
cache_res[j] = du_res
file = Path("data/outputs.npy")
with file.open('ab') as f:
np.save(f, u.vector()[:])
file = Path("data/outputs_res.npy")
with file.open('ab') as f:
np.save(f, u_res)
file = Path("data/gradients.npy")
with file.open('ab') as f:
np.save(f, cache)
file = Path("data/gradients_res.npy")
with file.open('ab') as f:
np.save(f, cache_res)
| [
"francesco.romor@gmail.com"
] | francesco.romor@gmail.com |
dac71081393db6a981bf3c583d5697793ad0de22 | 24ce8e56cd54c93c5a285089acb1825132e9e2eb | /495.TeemoAttacking/teemoattacking.py | ebdaf083b15431291215be7042244143c3ef7706 | [] | no_license | mayuripatil07/LeetCode | 6c7d2148e05fe2c086412d38a7cb71e6e74eee87 | 4617b11d9487385522ba665ca34b378659afe02c | refs/heads/master | 2023-01-13T11:03:49.805792 | 2020-11-01T21:41:59 | 2020-11-01T21:41:59 | 261,207,306 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 729 | py | class Solution:
def findPoisonedDuration(self, timeSeries: List[int], duration: int) -> int:
if not timeSeries:
return 0
poison_time = timeSeries[0] + duration
poison_condition = duration
for i in range(1,len(timeSeries)):
if poison_time <= timeSeries[i]:
poison_condition += duration
poison_time = timeSeries[i] + duration
elif poison_time > timeSeries[i]:
diff = poison_time - timeSeries[i]
add_time = duration - diff
if diff <= duration:
poison_condition += add_time
poison_time = timeSeries[i] + duration
return poison_condition
| [
"mpatil7@binghamton.edu"
] | mpatil7@binghamton.edu |
8f4026d972f244c0391ff8f24625e881b3fc284a | 1bf7f5fdfc5e7dbbc5ba14698ff488aa76b34d58 | /virtual/bin/gunicorn_paster | 3977588403c354347f27e635608040cf5dca8a00 | [
"MIT"
] | permissive | Brian23-eng/News-Highlight | ca86fab23ebfc429b31624c36ac7c3520d46966d | 19c8816cbcf2980a381d01788ba604cc85c8ebaa | refs/heads/master | 2021-06-25T02:05:01.848783 | 2019-10-17T08:09:19 | 2019-10-17T08:09:19 | 214,452,153 | 0 | 0 | MIT | 2021-03-20T01:54:54 | 2019-10-11T14:06:20 | Python | UTF-8 | Python | false | false | 282 | #!/home/brian/Documents/Core/Python/Flask/News-Highlight/virtual/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.pasterapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"b.odhiambo.bo@gmail.com"
] | b.odhiambo.bo@gmail.com | |
2e9f65cb125310604066c976eeca51526b77e980 | 4b226dfa9c17da6bab4c2e1831de057b049a7919 | /monitoring/availability.py | 17860d236749fd6a9fab47a417a837db94250c16 | [] | no_license | eea/inspire.harvest.feasibility.tools | 616070cff9608c09bb3543be711e7d66152c17a5 | b4994db8bd02ccfc2c5d5bf9fe5c8efbfda55bc3 | refs/heads/master | 2023-08-01T06:27:52.888886 | 2018-11-15T16:09:17 | 2018-11-15T16:09:17 | 142,995,171 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,446 | py | import logging
import csv
from datetime import datetime
import pytz
import requests
from monitoring.common import HTTPCheckResult, Monitor, get_service_urls
logger = logging.getLogger("availability_check")
info, debug, error = logger.info, logger.debug, logger.error
def check_availability(url, url_id, output_path, csv_write_lock, timeout):
"""
Checks the availability of a URL, and appends the result to a CSV file.
URL's are verified using a streaming GET request - the connection is severed
once the headers are received, to avoid impacting services with a sizeable
response content size.
Parameters:
url(str) : The URL to check
url_id(int) : The is written to file instead of the URL, for correlation.
output_path(str): The path of the CSV file to append the results to.
csv_write_lock (threading.Lock): Lock for writing to CSV.
timeout(float): The timeout in seconds for the GET requests - if `None`,
defaults to `DEFAULT_CHECK_INTERVAL`.
"""
info(f"Checking {url}")
try:
with requests.get(url, timeout=timeout, stream=True) as r:
try:
content_length = int(r.headers["Content-Length"])
except (KeyError, ValueError):
content_length = None
result = HTTPCheckResult(
status_code=r.status_code,
content_length=content_length,
content_type=r.headers.get("Content-Type"),
duration=r.elapsed.total_seconds(),
last_modified=r.headers.get("Last-Modified"),
)
except requests.exceptions.Timeout:
result = HTTPCheckResult(timeout=True)
except requests.exceptions.ConnectionError:
result = HTTPCheckResult(connection_error=True)
with csv_write_lock:
with open(output_path, "a") as f:
w = csv.writer(f, delimiter="\t")
w.writerow(
[
datetime.now(pytz.UTC).isoformat(),
url_id,
result.status_code,
result.content_length,
result.content_type,
result.duration,
result.last_modified,
1 if result.timeout else 0,
1 if result.connection_error else 0,
]
)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description="Run the INSPIRE endpoints availability monitor"
)
parser.add_argument("--endpoints-csv", help="Path to CSV with endpoint URL's")
parser.add_argument("--output", help="Path to monitoring output file")
parser.add_argument(
"--urls-col-no", default=0, type=int, help="URL's column number in the CSV file"
)
parser.add_argument(
"--check-interval",
default=300,
type=int,
help="Interval to check every endpoint at, in seconds. Defaults to 5 min.",
)
args = parser.parse_args()
logging.basicConfig(level=logging.INFO, format="%(message)s")
logging.getLogger("schedule").setLevel(logging.WARNING)
urls = get_service_urls(args.endpoints_csv, col_no=args.urls_col_no)
monitor = Monitor(
service_urls=urls,
check_func=check_availability,
output_path=args.output,
check_interval=args.check_interval,
)
monitor.run()
| [
"andrei@duhnea.net"
] | andrei@duhnea.net |
2d399e7009cc07f480db636304e56d03c3aa52d9 | 32d4cbfce0edf448e5c6518a3dc5fb7a27d74b88 | /build/lib/malaria/study_sites/SugungumAgeSeasonCalibSiteBabies.py | d572d77d05d0948e88dbcdb4bceecce749917500 | [] | no_license | bertozzivill/dtk-tools-malaria-old | 7458164f35452d3ede99d03d709197a7108366ba | 9aef758261b67c0b06520d8b74b7a9da537448fc | refs/heads/master | 2020-03-24T19:24:02.338717 | 2018-12-17T21:53:21 | 2018-12-17T21:53:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,717 | py | import logging
import os
import numpy as np
from calibtool.analyzers.Helpers import season_channel_age_density_csv_to_pandas
from calibtool.study_sites.site_setup_functions import \
config_setup_fn, summary_report_fn, add_treatment_fn, site_input_eir_fn
from calibtool.study_sites.DensityCalibSite import DensityCalibSite
logger = logging.getLogger(__name__)
class SugungumAgeSeasonCalibSiteBabies(DensityCalibSite):
metadata = {
'parasitemia_bins': [0.0, 16.0, 70.0, 409.0, np.inf], # (, 0] (0, 16] ... (409, inf]
'age_bins': [0, 1, 4, 8, 18, 28, 43, np.inf], # (, 1] (1, 4] ... (43, inf],
'seasons': ['DC2', 'DH2', 'W2'],
'seasons_by_month': {
'May': 'DH2',
'September': 'W2',
'January': 'DC2'
},
'village': 'Matsari'
}
def get_reference_data(self, reference_type):
super(SugungumAgeSeasonCalibSiteBabies, self).get_reference_data(reference_type)
# Load the Parasitology CSV
dir_path = os.path.dirname(os.path.realpath(__file__))
reference_csv = os.path.join(dir_path, 'inputs', 'GarkiDB_data', 'GarkiDBparasitology.csv')
reference_data = season_channel_age_density_csv_to_pandas(reference_csv, self.metadata).reset_index()
reference_data = (reference_data[reference_data['Age Bin'] == 1.0]).set_index(
['Channel', 'Season', 'Age Bin', 'PfPR Bin'])
return reference_data
def get_setup_functions(self):
setup_fns = super(SugungumAgeSeasonCalibSiteBabies, self).get_setup_functions()
setup_fns.append(config_setup_fn(duration=365 * 2)) # 60 years (with leap years)
setup_fns.append(summary_report_fn(start=365, interval=365.0 / 12, description='Monthly_Report',
parasitemia_bins=[0.0, 16.0, 70.0, 409.0, 4000000.0],
age_bins=[1.0, 4.0, 8.0, 18.0, 28.0, 43.0, 400000.0]))
setup_fns.append(site_input_eir_fn(self.name, birth_cohort=True))
setup_fns.append(lambda cb: cb.update_params(
{'Demographics_Filenames': ['Calibration\\birth_cohort_demographics_babies.json'],
'Age_Initialization_Distribution_Type': 'DISTRIBUTION_SIMPLE',
'Base_Population_Scale_Factor': 10,
'Birth_Rate_Dependence': 'FIXED_BIRTH_RATE',
"Death_Rate_Dependence": "NONDISEASE_MORTALITY_OFF",
'Enable_Birth': 1,
'Enable_Vital_Dynamics': 1,
'Maternal_Antibodies_Type': 'SIMPLE_WANING',
}))
return setup_fns
def __init__(self):
super(SugungumAgeSeasonCalibSiteBabies, self).__init__('Sugungum_babies')
| [
"jsuresh@idmod.org"
] | jsuresh@idmod.org |
7a6ad161974c26b3fb9c026dab412103523de24d | 6492db43d623d3ef5d47bfe9b22486d858b9a243 | /assignments/day2-homework/fasta_reader.py | bbbb51724a88ffbd816b5edebaaaccd892802607 | [] | no_license | rgenner/qbb2021 | 76cfed73916e51e7985f95d31e3b1587e49ebd72 | bb6b1d069f2ff438205ce5e44ddaa6ec42ddb3be | refs/heads/main | 2023-08-26T02:52:18.649496 | 2021-10-22T05:24:42 | 2021-10-22T05:24:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,181 | py | #!/usr/bin/env python3
def FASTAReader(file):
# Get the first line, which should contain the sequence name
line = file.readline()
# Let's make sure the file looks like a FASTA file
assert line.startswith('>'), "Not a FASTA file"
# Get the sequence name
seq_id = line[1:].rstrip('\r\n')
# create a list to contain the
sequence = []
# Get the next line
line = file.readline()
# Add a list to hold all of the sequences in
sequences = []
# Keep reading lines until we run out
while line:
# Check if we've reached a new sequence (in a multi-sequence file)
if line.startswith('>'):
# Add previous sequence to list
sequences.append((seq_id, ''.join(sequence)))
# Record new sequence name and reset sequence
seq_id = line[1:].rstrip('\r\n')
sequence = []
else:
# Add next chunk of sequence
sequence.append(line.strip())
# Get the next line
line = file.readline()
# Add the last sequence to sequences
sequences.append((seq_id, ''.join(sequence)))
return sequences | [
"kweave23@jhu.edu"
] | kweave23@jhu.edu |
5e1e5a1b37d9ffaf4acf035129a76aebec8edb4b | 242453b215468acdd2c13109757a3076aa7d04aa | /lessons/lesson11-prallel-tasks/Thread-web-examples.py | 54a5d5626bf4d1f16a0e940738d74651ece09e56 | [] | no_license | maksrom/data-science-less | de5644ce3fd27b99271c4c9b190243c945327ec3 | 8eb494cc907a48218bdcbcc0b9e63403b85e3941 | refs/heads/master | 2020-03-21T08:06:39.474963 | 2018-06-22T16:02:16 | 2018-06-22T16:02:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 440 | py | import urllib.request
import threading
import queue
q = queue.Queue()
def request():
while True:
url = q.get()
if url is None:
break
r = urllib.request.urlopen(url)
print(len(r.read()))
q.task_done()
for i in range(5):
t = threading.Thread(target=request)
t.start()
for i in range(50):
q.put('http://maksr51314.zz.mu/')
q.join()
for i in range(5):
q.put(None)
| [
"Maxim.Romaniv@netent.com"
] | Maxim.Romaniv@netent.com |
40115813710fb922b4615d58c11ab7d51905be62 | 9de9beaf657bf3d5967997b301753c3d1cd03d51 | /2. SLAE/errors/errors.py | 925669e7fe86341252a64bbc1bc78c60a59c5b8c | [] | no_license | karmapolice-0/Numerical-things | e87116b86c52b63424137f72ae079c3a48a7154b | 41852fb84fed71a0a5673eaa977a476b95733e59 | refs/heads/master | 2021-05-18T19:35:04.576327 | 2020-04-04T19:59:14 | 2020-04-04T19:59:14 | 251,380,982 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,462 | py | class MatrixError(Exception):
def __init__(self, msg=""):
self.message = msg
def __str__(self):
return self.message
class DimensionError(MatrixError):
def __init__(self, err, *args):
self.message = err
class NotListOrTuple(MatrixError):
def __init__(self, err, *args):
self.message = f"Given value should be a list or a tuple, not '{type(err).__name__}'"+". ".join(args)
class EmptyMatrix(MatrixError):
def __init__(self, err, *args):
self.message = str(err).join(args)
class InvalidIndex(MatrixError):
def __init__(self, err, *args):
self.message = f"'{type(err).__name__}' type index '{err}' can't be used as a row index. "+". ".join(args)
class InvalidColumn(MatrixError):
def __init__(self, err, *args):
self.message = f"'{type(err).__name__}' type index '{err}' can't be used as a column index. "+". ".join(args)
class FillError(MatrixError):
def __init__(self, err, *args):
self.message = f"'{type(err).__name__}' type '{err}' can't be used to fill matrices. "+". ".join(args)
class OutOfRangeList(MatrixError):
def __init__(self, lis, r, *args):
self.message = f"Given {lis} should have values in range {r} \n"+". ".join(args)
class ParameterError(MatrixError):
def __init__(self, err, params, *args):
self.message = f"'{err}' isn't a valid parameter name. \nAvailable parameter names:\n\t{params}. "+". ".join(args)
| [
"akselivj@gmail.com"
] | akselivj@gmail.com |
4acf925d2f474e88d0b195933e8e7df31a2aa765 | 9446feb2a94486ac16c585f712dbcbea7d112a9d | /src/taskmaster/cli/master.py | b78926059cf4a36ee7d184b223ba2326de9179e4 | [
"Apache-2.0"
] | permissive | jdunck/taskmaster | c16c879a546dd2ac383f804788e2d8ae2606abd1 | 04a03bf0853facf318ce98192db6389cdaaefe3c | refs/heads/master | 2023-08-23T19:29:22.605052 | 2012-05-16T00:52:24 | 2012-05-16T00:52:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 958 | py | """
taskmaster.cli.master
~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
def run(target, reset=False, size=10000, address='tcp://0.0.0.0:3050'):
from taskmaster.server import Server, Controller
server = Server(address, size=size)
controller = Controller(server, target)
if reset:
controller.reset()
controller.start()
def main():
import optparse
import sys
parser = optparse.OptionParser()
parser.add_option("--address", dest="address", default='tcp://127.0.0.1:3050')
parser.add_option("--size", dest="size", default='10000', type=int)
parser.add_option("--reset", dest="reset", default=False, action='store_true')
(options, args) = parser.parse_args()
if len(args) != 1:
print 'Usage: tm-master <callback>'
sys.exit(1)
sys.exit(run(args[0], **options.__dict__))
if __name__ == '__main__':
main()
| [
"dcramer@gmail.com"
] | dcramer@gmail.com |
629f2ff4feeb1c2a14b762b23be4363df4961583 | bee1bf4e458a7ea4be0cd70be00f2d7d7b2d1d3f | /lu_factorization.py | 7250058d8c2ecadc8a5e05d55857cdff0822430f | [] | no_license | mateusoliveira43/estudos-Algebra-Linear-Computacional | 3b3abd4f4b3ee7b4e2c75b8863753d5299324334 | aa1cbe2d3a928c9de689512aa8da7add3d43ebb6 | refs/heads/master | 2023-02-16T04:51:31.749993 | 2021-01-16T14:13:22 | 2021-01-16T14:13:22 | 295,873,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,395 | py | import numpy as np
from pprint import pprint
import timeit
def lu_factorization(matrix):
# fazer type hinting depois
dimensions = matrix.shape
n = dimensions[0]
lower_matrix = np.eye(n)
for i in range(n-1):
for k in range(i+1, n):
lower_matrix[k][i] = matrix[k][i]/matrix[i][i]
for j in range(i+1, n):
matrix[k][j] = matrix[k][j] - lower_matrix[k][i]*matrix[i][j]
upper_matrix = np.triu(matrix)
result = []
result.append(lower_matrix)
result.append(upper_matrix)
return result
def exibe_testes(matrix, name_matrix):
inicio = timeit.default_timer()
lu_factorization_matrix = lu_factorization(matrix)
fim = timeit.default_timer()
print(f'matriz {name_matrix}:')
pprint(matrix)
print(f'matriz triangular inferior da fatoração LU de {name_matrix}:')
pprint(lu_factorization_matrix[0])
print(f'matriz triangular superior da fatoração LU de {name_matrix}:')
pprint(lu_factorization_matrix[1])
print(f'tempo decorrido: {fim-inicio}')
print()
if __name__ == "__main__":
# fazer mais testes e melhores (e automatizados)
identity = np.eye(3)
A = np.array([[2, 1, 1, 0], [4, 3, 3, 1], [8, 7, 9, 5], [6, 7, 9, 8]])
B = np.array([[2, 2, 2], [4, 7, 7], [6, 18, 22]])
exibe_testes(A, 'A')
exibe_testes(B, 'B')
exibe_testes(identity, 'I')
| [
"matews1943@gmail.com"
] | matews1943@gmail.com |
fdf8d0c74a52e8f39d8f597575a9abaa39184b7d | 31996e49289655f60b71ed176cc94e32648ffe40 | /criterion.py | 3354fd91fc4305af5fdb94aa158db4645a80560f | [] | no_license | Will3577/MultitaskOCTA | 064fe7d437fe4a234653f4e5ba50faccc6f4bfb6 | b6719e10318421bc841daf66468c52066bec0f7a | refs/heads/master | 2023-06-24T06:59:05.763417 | 2021-07-13T09:05:55 | 2021-07-13T09:05:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,651 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 14 10:40:54 2019
@author: wujon
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import surface_distance
# import nibabel as ni
import scipy.io
import scipy.spatial
import xlwt
import os
import cv2
from skimage import morphology
from skimage.morphology import thin
from sklearn.metrics import confusion_matrix, jaccard_score, f1_score
os.chdir('./')
predictName = 'cotrain_192_pad'
predictPath = './smp/' + predictName + '/'
labelPath = "./smp/mask_ori_f1/"
name_experiment = 'exp_test'
path_experiment = './' + name_experiment + '/'
# labelPath = "./gt/"
# outpredictPath = "./gt_poor_o_thin/"
# outlabelPath = "./gt_o_thin/"
def getDSC(testImage, resultImage):
"""Compute the Dice Similarity Coefficient."""
testArray = testImage.flatten()
resultArray = resultImage.flatten()
return 1.0 - scipy.spatial.distance.dice(testArray, resultArray)
def getJaccard(testImage, resultImage):
"""Compute the Dice Similarity Coefficient."""
testArray = testImage.flatten()
resultArray = resultImage.flatten()
return 1.0 - scipy.spatial.distance.jaccard(testArray, resultArray)
def getPrecisionAndRecall(testImage, resultImage):
testArray = testImage.flatten()
resultArray = resultImage.flatten()
TP = np.sum(testArray*resultArray)
FP = np.sum((1-testArray)*resultArray)
FN = np.sum(testArray*(1-resultArray))
precision = TP/(TP+FP)
recall = TP/(TP+FN)
return precision, recall
def intersection(testImage, resultImage):
testSkel = morphology.skeletonize(testImage)
testSkel = testSkel.astype(int)
resultSkel = morphology.skeletonize(resultImage)
resultSkel = resultSkel.astype(int)
testArray = testImage.flatten()
resultArray = resultImage.flatten()
testSkel = testSkel.flatten()
resultSkel = resultSkel.flatten()
recall = np.sum(resultSkel * testArray) / (np.sum(testSkel))
precision = np.sum(resultArray * testSkel) / (np.sum(testSkel))
intersection = 2 * precision * recall / (precision + recall)
return intersection
if __name__ == "__main__":
labelList = os.listdir(labelPath)
# labelList.sort(key = lambda x: int(x[:-4]))
img_nums = len(labelList)
Q1 = []
Q2 = []
Q3 = []
Q4 = []
Q5 = []
Q6 = []
Q7 = []
Q8 = []
Q9 = []
Q10 = []
Q11 = []
Q12 = []
Q13 = []
Q14 = []
Q15 = []
Q16 = []
Q17 = []
book = xlwt.Workbook(encoding='utf-8', style_compression=0)
sheet = book.add_sheet('mysheet', cell_overwrite_ok=True)
row_num = 0
sheet.write(row_num, 0, 'CaseName')
sheet.write(row_num, 1, 'DSC')
sheet.write(row_num, 2, 'Pre')
sheet.write(row_num, 3, 'Recall')
sheet.write(row_num, 4, 'HD')
sheet.write(row_num, 5, 'ASSD')
sheet.write(row_num, 6, 'surface_dice_0')
sheet.write(row_num, 7, 'rel_overlap_gt')
sheet.write(row_num, 8, 'rel_overlap_pred')
sheet.write(row_num, 9, 'intersec')
sheet.write(row_num, 10, 'HD_thin')
sheet.write(row_num, 11, 'ASSD_thin')
sheet.write(row_num, 12, 'surface_dice_1')
sheet.write(row_num, 13, 'surface_dice_2')
sheet.write(row_num, 14, 'Jaccard')
sheet.write(row_num, 15, 'acc')
sheet.write(row_num, 16, 'spe')
sheet.write(row_num, 17, 'sen')
for idx, filename in enumerate(labelList):
label = cv2.imread(labelPath + filename, 0)
# print (label.dtype)
# label = cv2.imread(labelPath + filename)
label[label < 50] = 0
label[label >= 50] = 1
thinned_label = thin(label)
# cv2.imwrite(outlabelPath+filename,(thinned_label*255).astype(np.uint8))
# ret,label = cv2.threshold(label,127,255,cv2.THRESH_BINARY)
predict = cv2.imread(predictPath + filename.replace('_manual.png', '_expert.png'), 0)
# print(predictPath + filename)
# print (predict.dtype)
# ret,predict = cv2.threshold(predict,127,255,cv2.THRESH_BINARY)
# predict = cv2.imread(predictPath + filename)
# predict = predict / 255
predict[predict < 127] = 0
predict[predict >= 127] = 1
# ==============================================================================================================================================================================
y_scores = cv2.imread(predictPath + filename.replace('_manual.png', '_expert.png'), 0) # #####################################################################
y_scores = np.asarray(y_scores.flatten())/255.
y_scores = y_scores[:, np.newaxis]
# print(y_scores.shape)
y_true = cv2.imread(labelPath + filename, 0)
y_true = np.asarray(y_true.flatten())/255.
# fpr, tpr, thresholds = roc_curve((y_true), y_scores)
# AUC_ROC = roc_auc_score(y_true, y_scores)
# # test_integral = np.trapz(tpr,fpr) #trapz is numpy integration
# print ("\nArea under the ROC curve: " +str(AUC_ROC))
# roc_curve =plt.figure()
# plt.plot(fpr,tpr,'-',label='Area Under the Curve (AUC = %0.4f)' % AUC_ROC)
# plt.title('ROC curve')
# plt.xlabel("FPR (False Positive Rate)")
# plt.ylabel("TPR (True Positive Rate)")
# plt.legend(loc="lower right")
# plt.savefig(path_experiment+"ROC.png")
# precision, recall, thresholds = precision_recall_curve(y_true, y_scores)
# precision = np.fliplr([precision])[0] #so the array is increasing (you won't get negative AUC)
# recall = np.fliplr([recall])[0] #so the array is increasing (you won't get negative AUC)
# AUC_prec_rec = np.trapz(precision,recall)
# print ("\nArea under Precision-Recall curve: " +str(AUC_prec_rec))
# prec_rec_curve = plt.figure()
# plt.plot(recall,precision,'-',label='Area Under the Curve (AUC = %0.4f)' % AUC_prec_rec)
# plt.title('Precision - Recall curve')
# plt.xlabel("Recall")
# plt.ylabel("Precision")
# plt.legend(loc="lower right")
# plt.savefig(path_experiment+"Precision_recall.png")
# def best_f1_threshold(precision, recall, thresholds):
# best_f1=-1
# for index in range(len(precision)):
# curr_f1=2.*precision[index]*recall[index]/(precision[index]+recall[index])
# if best_f1<curr_f1:
# best_f1=curr_f1
# best_threshold=thresholds[index]
# return best_f1, best_threshold
# best_f1, best_threshold = best_f1_threshold(precision, recall, thresholds)
# print("\nthresholds: " + str(thresholds))
# print("\nbest_f1: " + str(best_f1))
# print("\nbest_threshold: " + str(best_threshold))
# Confusion matrix
threshold_confusion = 0.5
# print ("\nConfusion matrix: Custom threshold (for positive) of " +str(threshold_confusion))
y_pred = np.empty((y_scores.shape[0]))
# print(y_scores.shape[0])
# print(np.unique(y_pred))
for i in range(y_scores.shape[0]):
if y_scores[i] >= threshold_confusion:
y_pred[i] = 1
else:
y_pred[i] = 0
# print(np.unique(y_pred))
# print(np.unique(y_true))
confusion = confusion_matrix(y_true, y_pred)
# print (confusion)
accuracy = 0
if float(np.sum(confusion)) != 0:
accuracy = float(confusion[0, 0]+confusion[1, 1])/float(np.sum(confusion))
# print ("Global Accuracy: " +str(accuracy))
specificity = 0
if float(confusion[0, 0]+confusion[0, 1]) != 0: # 00 tn 11 tp 10 fn 01 fp
specificity = float(confusion[0, 0])/float(confusion[0, 0]+confusion[0, 1])
# print ("Specificity: " +str(specificity))
sensitivity = 0
if float(confusion[1, 1]+confusion[1, 0]) != 0:
sensitivity = float(confusion[1, 1])/float(confusion[1, 1]+confusion[1, 0])
# print ("Sensitivity: " +str(sensitivity))
precision = 0
if float(confusion[1, 1]+confusion[0, 1]) != 0:
precision = float(confusion[1, 1])/float(confusion[1, 1]+confusion[0, 1])
# print ("Precision: " +str(precision))
if float(confusion[1, 1]+confusion[0, 1]) != 0:
PPV = float(confusion[1, 1])/float(confusion[1, 1]+confusion[0, 1])
# print ("PPV: " +str(PPV))
# Jaccard similarity index
jaccard_index = jaccard_score(y_true, y_pred)
print("\nJaccard similarity score: " + str(jaccard_index))
# F1 score
F1_score = f1_score(y_true, y_pred, labels=None, average='binary', sample_weight=None)
# print ("\nF1 score (F-measure): " +str(F1_score))
# Save the results
# file_perf = open(path_experiment+'performances.txt', 'w')
# # file_perf.write("Area under the ROC curve: "+str(AUC_ROC)
# # + "\nArea under Precision-Recall curve: " +str(AUC_prec_rec)
# # + "\nJaccard similarity score: " +str(jaccard_index)
# # + "\nF1 score (F-measure): " +str(F1_score)
# # +"\n\nConfusion matrix:"
# # +str(confusion)
# # +"\nACCURACY: " +str(accuracy)
# # +"\nSENSITIVITY: " +str(sensitivity)
# # +"\nSPECIFICITY: " +str(specificity)
# # +"\nPRECISION: " +str(precision)
# # +"\nRECALL: " +str(sensitivity)
# # +"\nPPV: " +str(PPV)
# # +"\nbest_th: " +str(best_threshold)
# # +"\nbest_f1: " +str(best_f1)
# # )
# file_perf.write(
# "\nJaccard similarity score: " +str(jaccard_index)
# + "\nF1 score (F-measure): " +str(F1_score)
# +"\n\nConfusion matrix:"
# +str(confusion)
# +"\nACCURACY: " +str(accuracy)
# +"\nSENSITIVITY: " +str(sensitivity)
# +"\nSPECIFICITY: " +str(specificity)
# +"\nPRECISION: " +str(precision)
# +"\nRECALL: " +str(sensitivity)
# +"\nPPV: " +str(PPV)
# )
# file_perf.close()
# #==============================================================================================================================================================================
thinned_predict = thin(predict)
# cv2.imwrite(outpredictPath+filename,(thinned_predict*255).astype(np.uint8))
# predict[predict>=1] = 1
# dice = getDSC(predict, label)
# print("filename:" , filename , "dice:" , dice)
# dice_res = "the " + filename[:-4] + " image's DSC : " + str(round(dice,4)) + "\n"
DSC = getDSC(label, predict)
# surface_distances = surface_distance.compute_surface_distances(label, predict, spacing_mm=(1, 1, 1))
# HD = surface_distance.compute_robust_hausdorff(surface_distances, 95)
# distances_gt_to_pred = surface_distances["distances_gt_to_pred"]
# distances_pred_to_gt = surface_distances["distances_pred_to_gt"]
# surfel_areas_gt = surface_distances["surfel_areas_gt"]
# surfel_areas_pred = surface_distances["surfel_areas_pred"]
# ASSD = (np.sum(distances_pred_to_gt * surfel_areas_pred) +np.sum(distances_gt_to_pred * surfel_areas_gt))/(np.sum(surfel_areas_gt)+np.sum(surfel_areas_pred))
Jaccard = getJaccard(label, predict)
precision, recall = getPrecisionAndRecall(label, predict)
intersec = intersection(label, predict)
label = np.array(label, dtype=bool)
predict = np.array(predict, dtype=bool)
surface_distances = surface_distance.compute_surface_distances(label, predict, spacing_mm=(1, 1))
surface_distances_thin = surface_distance.compute_surface_distances(thinned_label, thinned_predict, spacing_mm=(1, 1))
HD = surface_distance.compute_robust_hausdorff(surface_distances, 95)
HD_thin = surface_distance.compute_robust_hausdorff(surface_distances_thin, 95)
surface_dice_2 = surface_distance.compute_surface_dice_at_tolerance(surface_distances, 2)
rel_overlap_gt, rel_overlap_pred = surface_distance.compute_surface_overlap_at_tolerance(surface_distances, 2)
surface_dice_1 = surface_distance.compute_surface_dice_at_tolerance(surface_distances, 1)
surface_dice_0 = surface_distance.compute_surface_dice_at_tolerance(surface_distances, 0)
surface_dice_3 = surface_distance.compute_surface_dice_at_tolerance(surface_distances, 3)
distances_gt_to_pred = surface_distances["distances_gt_to_pred"]
distances_pred_to_gt = surface_distances["distances_pred_to_gt"]
surfel_areas_gt = surface_distances["surfel_areas_gt"]
surfel_areas_pred = surface_distances["surfel_areas_pred"]
ASSD = (np.sum(distances_pred_to_gt * surfel_areas_pred) + np.sum(distances_gt_to_pred * surfel_areas_gt))/(np.sum(surfel_areas_gt)+np.sum(surfel_areas_pred))
distances_gt_to_pred_t = surface_distances_thin["distances_gt_to_pred"]
distances_pred_to_gt_t = surface_distances_thin["distances_pred_to_gt"]
surfel_areas_gt_t = surface_distances_thin["surfel_areas_gt"]
surfel_areas_pred_t = surface_distances_thin["surfel_areas_pred"]
ASSD_thin = (np.sum(distances_pred_to_gt_t * surfel_areas_pred_t) + np.sum(distances_gt_to_pred_t * surfel_areas_gt_t))/(np.sum(surfel_areas_gt_t)+np.sum(surfel_areas_pred_t))
# print(surface_overlap)
row_num += 1
sheet.write(row_num, 0, filename)
sheet.write(row_num, 1, DSC)
sheet.write(row_num, 2, precision)
sheet.write(row_num, 3, recall)
sheet.write(row_num, 4, HD)
sheet.write(row_num, 5, ASSD)
sheet.write(row_num, 6, surface_dice_0)
sheet.write(row_num, 7, rel_overlap_gt)
sheet.write(row_num, 8, rel_overlap_pred)
sheet.write(row_num, 9, intersec)
sheet.write(row_num, 10, HD_thin)
sheet.write(row_num, 11, ASSD_thin)
sheet.write(row_num, 12, surface_dice_1)
sheet.write(row_num, 13, surface_dice_2)
# sheet.write(row_num, 14, surface_dice_3)
sheet.write(row_num, 14, Jaccard)
sheet.write(row_num, 15, accuracy)
sheet.write(row_num, 16, specificity)
sheet.write(row_num, 17, sensitivity)
Q1.append(DSC)
Q2.append(precision)
Q3.append(recall)
Q4.append(HD)
Q5.append(ASSD)
Q6.append(surface_dice_0)
Q7.append(rel_overlap_gt)
Q8.append(rel_overlap_pred)
Q9.append(intersec)
Q10.append(HD_thin)
Q11.append(ASSD_thin)
Q12.append(surface_dice_1)
Q13.append(surface_dice_2)
# Q14.append(surface_dice_3)
Q14.append(Jaccard)
Q15.append(accuracy)
Q16.append(specificity)
Q17.append(sensitivity)
Q1 = np.array(Q1)
Q2 = np.array(Q2)
Q3 = np.array(Q3)
Q4 = np.array(Q4)
Q5 = np.array(Q5)
Q6 = np.array(Q6)
Q7 = np.array(Q7)
Q8 = np.array(Q8)
Q9 = np.array(Q9)
Q10 = np.array(Q10)
Q11 = np.array(Q11)
Q12 = np.array(Q12)
Q13 = np.array(Q13)
Q14 = np.array(Q14)
Q15 = np.array(Q15)
Q16 = np.array(Q16)
Q17 = np.array(Q17)
row_num += 2
sheet.write(row_num, 0, 'CaseName')
sheet.write(row_num, 1, 'DSC')
sheet.write(row_num, 2, 'Pre')
sheet.write(row_num, 3, 'Recall')
sheet.write(row_num, 4, 'HD')
sheet.write(row_num, 5, 'ASSD')
sheet.write(row_num, 6, 'surface_dice_0')
sheet.write(row_num, 7, 'rel_overlap_gt')
sheet.write(row_num, 8, 'rel_overlap_pred')
sheet.write(row_num, 9, 'intersec')
sheet.write(row_num, 10, 'HD_thin')
sheet.write(row_num, 11, 'ASSD_thin')
sheet.write(row_num, 12, 'surface_dice_1')
sheet.write(row_num, 13, 'surface_dice_2')
sheet.write(row_num, 14, 'Jaccard')
sheet.write(row_num, 15, 'accuracy')
sheet.write(row_num, 16, 'specificity')
sheet.write(row_num, 17, 'sensitivity')
row_num += 1
sheet.write(row_num, 0, predictName)
sheet.write(row_num, 1, Q1.mean())
sheet.write(row_num, 2, Q2.mean())
sheet.write(row_num, 3, Q3.mean())
sheet.write(row_num, 4, Q4.mean())
sheet.write(row_num, 5, Q5.mean())
sheet.write(row_num, 6, Q6.mean())
sheet.write(row_num, 7, Q7.mean())
sheet.write(row_num, 8, Q8.mean())
sheet.write(row_num, 9, Q9.mean())
sheet.write(row_num, 10, Q10.mean())
sheet.write(row_num, 11, Q11.mean())
sheet.write(row_num, 12, Q12.mean())
sheet.write(row_num, 13, Q13.mean())
sheet.write(row_num, 14, Q14.mean())
sheet.write(row_num, 15, Q15.mean())
sheet.write(row_num, 16, Q16.mean())
sheet.write(row_num, 17, Q17.mean())
book.save('./smp/' + predictName + '.xls')
| [
"11712616@mail.sustech.edu.cn"
] | 11712616@mail.sustech.edu.cn |
9090a6049a51ef8672151f75d28c7f01c75a1436 | fc3deae46d7104924d9b982638f38eb42eadbb9f | /yrnetwork/setting.py | 0fd31990a155271ff3f6b9b4c0a28ee4958148c9 | [] | no_license | THRILLERLEMON/YR_Greening_Network | 6fc578d4a94bf240b954181d9ea8d1cf18fb172d | 8745679d58c3e459d88524e0afae18e072b18e0a | refs/heads/main | 2023-03-19T00:08:28.187235 | 2021-03-05T13:21:53 | 2021-03-05T13:21:53 | 303,892,090 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 637 | py | class BaseConfig(object):
RESULT_PATH = ''
LUC_NET_DATA_PATH = 'D://OneDrive//JustDo//The_Greening_of_YR_from_the_Perspective_of_Network//Data//LUCNetData//'
LUC_NET_DATA_HEAD = 'InfoForLUCNet'
LUC_NET_DATA_TAIL = '.csv'
GEO_AGENT_PATH = 'E://MY PROGRAM//YR_Greening_Network//data//GeoAgent//ForCartopy//'
COUPLED_NET_DATA_PATH = 'E://MY PROGRAM//YR_Greening_Network//data//Data_for_LAI_Causal//'
COUPLED_NET_DATA_HEAD = 'YR_GA_Info_'
COUPLED_NET_DATA_TAIL = '.csv'
OUT_PATH = 'D://OneDrive//JustDo//The_Greening_of_YR_from_the_Perspective_of_Network//OutPutforLAICausal//'
BACKGROUND_VALUE = -999 | [
"thrillerlemon@outlook.com"
] | thrillerlemon@outlook.com |
58a25e67f1a25a87bcf69394078f2dff07c063d3 | 274a72fffdeea616d65e1ca6c343a948325b12c1 | /bot.py | 6e3b160bb2bea21e1f41575a9d32fbfa7f4cf744 | [] | no_license | ideasincrypto/moderatorBot | 9091b224039f560b9bea94d1abec736a6b1b2720 | 9e8d12cfa3b7cfb05a64aa6a80eb5ef416167319 | refs/heads/main | 2023-05-15T10:23:59.363963 | 2021-06-17T07:21:35 | 2021-06-17T07:21:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,616 | py | import time
import config
import logging
import bot_helpers
from aiogram import Bot, Dispatcher, executor, types
from filters import IsAdminFilter
from sqlite import SQLight
# log level
logging.basicConfig(level=logging.INFO)
# bot init
bot = Bot(token=config.TOKEN)
dp = Dispatcher(bot)
db = SQLight("db.db")
# activate filters
dp.filters_factory.bind(IsAdminFilter)
# replace a default message when user joined and add him in the DB
@dp.message_handler(content_types=["new_chat_members"])
async def on_user_joined(message: types.Message):
first_name = message.from_user.first_name
await message.bot.send_message(
config.GROUP_ID,
f"👋😊 Welcome *{first_name}* to our group",
"MarkdownV2",
)
if not db.user_exists(message.from_user.id):
db.add_user(message.from_user.id, 0)
# remove an user from the DB
@dp.message_handler(content_types=["left_chat_member"])
async def on_user_left(message: types.Message):
first_name = message.from_user.first_name
db.delete_user(message.from_user.id)
await message.bot.send_message(
config.GROUP_ID,
f"🥺 Goodbuy *{first_name}*",
"MarkdownV2",
)
# ban command (admins only)
@dp.message_handler(is_admin=True, commands=["kick"], commands_prefix="!/")
async def user_ban(message: types.Message):
if not message.reply_to_message:
await message.reply("⚠️ This command must be the answer on some message")
return
await message.bot.delete_message(chat_id=config.GROUP_ID, message_id=message.message_id)
await message.bot.kick_chat_member(
chat_id=config.GROUP_ID,
user_id=message.reply_to_message.from_user.id,
)
await message.reply_to_message.reply("😈 User is kicked")
# unban user (admins only)
@dp.message_handler(is_admin=True, commands=["reborn"], commands_prefix="!/")
async def user_unban(message: types.Message):
await message.bot.unban_chat_member(
chait_id=config.GROUP_ID,
user_id=message,
only_if_banned=True
)
await message.reply_to_message.reply("😇 User is reborned")
# show user's status
@dp.message_handler(is_admin=True, commands=["status"], commands_prefix="!/")
async def show_user_karma(message: types.Message):
user_karma = db.get_user_karma(message.from_user.id)
user_status = bot_helpers.get_status_by_karma(user_karma)
await message.bot.send_message(
config.GROUP_ID,
f"📜 Your status: {str(user_status)}"
)
if int(user_karma) == -42:
await send_kicking_poll(
config.GROUP_ID,
message.from_user.first_name,
message.from_user.id,
)
# send a poll about user kicking
async def send_kicking_poll(*args):
[chat_id, user_name, user_id] = args
delay = 10
response = await bot.send_poll(
chat_id=chat_id,
question=f"Delete {user_name} from the group ❓",
options=["😒 Yes, we don't need them", "😇 No, he's one of us"],
is_anonymous=True,
type="regular",
open_period=delay,
)
time.sleep(delay - 1)
poll_results = await bot.stop_poll(
chat_id=chat_id,
message_id=response.message_id,
)
[agree, disagree] = poll_results.options
if agree.voter_count > disagree.voter_count:
try:
await bot.kick_chat_member(
chat_id=config.GROUP_ID,
user_id=user_id,
)
await bot.send_message("😈 User is kicked")
except:
await bot.send_message("The deletion failed. Isn't this the admin?")
else:
await bot.send_message("🤞 This time he was lucky")
# secret method (admins only)
@dp.message_handler(is_admin=True, commands=["secret"], commands_prefix="!/")
async def secret_method(message: types.Message):
user_secret = bot_helpers.message_without_command('/secret', message.text)
# don't show secret in the chat
await message.delete()
if user_secret != config.SECRET:
fake_secret = "*" * len(user_secret)
await message.bot.send_message(
config.GROUP_ID,
f"❌ Wrong secret: {fake_secret}",
)
return
await message.bot.send_message(
config.GROUP_ID,
f"✨ Secret mode is activated for {message.chat.first_name}",
)
# delete messages with forbidden words and decrease user's karma
@dp.message_handler()
async def filter_messages(message: types.Message):
if bot_helpers.has_forbidden_word(message.text):
first_name = message.from_user.first_name
db.decrease_user_karma(message.from_user.id)
await message.delete()
await message.bot.send_message(
config.GROUP_ID,
f"👎 Inappropriate language\. Karma for *{first_name}* lowered",
"MarkdownV2",
)
# run long-polling
if __name__ == '__main__':
executor.start_polling(dp, skip_updates=True) | [
"cent1pede@protonmail.ch"
] | cent1pede@protonmail.ch |
57209408cf256d9887b2a70f4019707fbfca8030 | bf78c33be28fcd1b33cf5e3eab1e6923607bca8d | /fullversion/pressure_system/pressure_system_sensor/CurveDetection.py | 97fd783b5d1f3e37f78854af70f9431191838ec5 | [] | no_license | marcosase/E-MA-docker | 4531c973bb96ab588a039198cf5a55c5b50f6da1 | 5ce56f7ac79df0b0fcfd957b2750840b7261f973 | refs/heads/master | 2020-05-27T02:46:55.258051 | 2019-05-24T00:23:41 | 2019-05-24T00:23:41 | 188,456,200 | 1 | 0 | null | 2019-05-24T16:39:10 | 2019-05-24T16:39:10 | null | UTF-8 | Python | false | false | 15,343 | py | '''
Created on Jul 2, 2018
@author: rodrigo.guercio
'''
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''Peak detection algorithms.'''
import numpy as np
from scipy import optimize
import math
from lmfit.models import LorentzianModel
import matplotlib.pyplot as plt
import pylab
def indexes(input_array,axis_x, thres = 0.01, error_fit = 0.5, deltaMin_nm = 1.0):
'''Peak detection routine.
Finds the peaks in *y* by taking its first order difference. By using
*thres* and *min_dist* parameters, it is possible to reduce the number of
detected peaks.
Parameters
----------
y : ndarray
1D amplitude data to search for peaks.
thres : float between [0., 1.]
Normalized threshold. Only the peaks with amplitude higher than the
threshold will be detected.
min_dist : int
Minimum distance between each detected peak. The peak with the highest
amplitude is preferred to satisfy this constraint.
Returns
-------
ndarray
Array containing the indexes of the peaks that were detected
'''
try:
range = np.where((axis_x > 689) & (axis_x < 720)) #Range between 0 Gpa and 80 Gpa aprox
''' Normalizing input array '''
y = normalizeY(input_array[range])
''' Range of X '''
x = axis_x[range]
'''Find the peaks by using the first order difference'''
dy = np.diff(y)
'''Find the peaks according to first order difference +- and threshold'''
p= 0.00
peaks = np.where((np.hstack([dy, p]) < p)
& (np.hstack([p, dy]) > p)
& (y > thres))[0]
'''Min value of distinction between curves '''
min_dist = set_min_dist(x, delta_nm = deltaMin_nm) #Samples
#print('Indexes - Peaks:',peaks)
''' '''
if peaks.size > 1 and min_dist > 1: #There is some peak
''' Filter peaks according to minimum distance between them '''
majors = filterPeaks(y, peaks, min_dist)
''' Calculating the center of lorentzian '''
if majors.size > 1: #There is more than one peak
x_for_max_y = np.round_(a = x[majors[0]], decimals = 2) #nm
[center_right,I2] = lorentzian_fit(x,y,majors[0], error = error_fit,range_sample = min_dist)
[center_left,I1] = lorentzian_fit(x,y,majors[1], error = error_fit,range_sample = min_dist)
''' Are centers float or int numbers ?''' #Fitting is enough to detect peaks
if center_left is not None and center_right is not None:
center_left = np.round_(a = center_left, decimals = 2) #nm
center_right = np.round_(a = center_right, decimals = 2) #nm
'Rightmost peak is bigger than leftmost peak (nm) (obsolete) and their relative distance is smaller than 5 nm, for example'
if ((center_right > center_left) and (center_right - center_left) < 5*deltaMin_nm): #(wl[0] > wl[1]) and (wl[0] - wl[1] < 120.5): #rightmost peak should greater than leftmost
temp = temperatureCalculate(I1, I2)
print('Int. peaks:', I1,I2,temp)
return np.array([center_right,center_left,temp])
else:
return np.array([x_for_max_y,-1,-1])
else:
return np.array([x_for_max_y,-1,-1])
elif majors.size == 1:
center_right= lorentzian_fit(x,y,majors[0], error = error_fit,range_sample = min_dist)
''' Are centers float or int numbers ?'''
if center_right is not None:
center_right = np.round_(a = center_right, decimals = 2) #nm
return np.array([center_right,-1,-1])
else:
return np.array([x_for_max_y,-1,-1])
else:
'If there no peak'
return None
else:
'There is no peak'
return None
except:
return None
def temperatureCalculate(intensity_peak1,intensity_peak2):
'''
temperatureCalculate
'''
try:
intensity_peak1 = float(intensity_peak1)
intensity_peak2 = float(intensity_peak2)
temp = ((-3.55/0.08617343))/(math.log(intensity_peak1/(0.65*intensity_peak2)))
temp = round(temp,1)
return temp
except:
return 300
def filterPeaks(y,peaks,min_dist):
''' Filter peaks according to minimum distance between them '''
highest = peaks[np.argsort(y[peaks])][::-1]
rem = np.ones(y.size, dtype=bool)
rem[peaks] = False
for peak in highest:
if not rem[peak]:
sl = slice(max(0, peak - min_dist), peak + min_dist + 1)
rem[sl] = True
rem[peak] = False
peaks = np.arange(y.size)[~rem]
majors = peaks[np.argsort(y[peaks])][::-1]
if majors.size > 1:
minors = 0
minors = np.where(majors < majors[0])[0]
#inors = np.where(majors < 1)[0]
if minors.size > 0:
majors[1] = majors[minors[0]]
return majors
def getTwoMajorPeaks(majors):
''' Getting two major peaks'''
if majors.size > 1:
return majors[0:2]
elif majors.size == 1:
return majors[0]
else:
return None
def transformIndtoNM(peak,x):
peak_nm = np.round_(a = x[peak], decimals = 2) #xdata[peaks]
return peak_nm
def normalizeY(y):
y_max = np.max(y)
y_min = np.min(y)
deltaY = y_max - y_min
n = np.size(y)
z = np.ones(n)
for i in range(0,n):
z[i] = (y[i] - y_min)/deltaY
return z
def desNormalizePointY(y,z):
y_max = np.max(y)
y_min = np.min(y)
deltaY = y_max - y_min
return z*deltaY + y_min
def set_min_dist(x,delta_nm = 10):
''' Minimum distance to differentiate two Lorentzian Curves '''
#Number of samples
n = np.size(x)
#Range of x nm
x_max = np.max(x)
x_min = np.min(x)
x_range = x_max - x_min #nm
#Sample per nm
ss_p_nm = n/x_range #samples/nm
#Const 10% of all nm => ? samples
min_dist = ss_p_nm*delta_nm #samples
return int(min_dist)
def gaussian(x, ampl, center, dev):
'''Computes the Gaussian function.
Parameters
----------
x : float
Point to evaluate the Gaussian for.
a : float
Amplitude.
b : float
Center.
c : float
Width.
Returns
-------
float
Value of the specified Gaussian at *x*
'''
return ampl * np.exp(-(x - center) ** 2 / (2 * dev ** 2))
def lorentzian(x, ampl, center, w):
return ampl*(1./2.0/np.pi)*(w/((x-center)**2+w**2/4.0))
#return ((1/np.pi)*(0.5*w))/((x-center)**2 + (0.5*w)**2)
def gaussian_fit(x, y):
'''Performs a Gaussian fitting of the specified data.
Parameters
----------
x : ndarray
Data on the x axis.
y : ndarray
Data on the y axis.
Returns
-------
ndarray
Parameters of the Gaussian that fits the specified data
'''
initial = [np.max(y), x[0], (x[1] - x[0]) * 5]
params, pcov = optimize.curve_fit(gaussian, x, y, initial)
return params[1]
def lorentzian_fit(x, y,peak, error = 0.10,range_sample = 10):
try:
''' Range of signal to be fitted '''
range = round(range_sample/2)
''' Selecting a small range of signal '''
#initial = [np.max(y), x[peak], (x[peak+1] - x[peak]) * 5]
initial = [y[peak], x[peak], (x[peak+1] - x[peak]) * 5]
x_compact = x[peak - range:peak + range]
y_compact = y[peak - range:peak + range]
params, pcov = optimize.curve_fit(lorentzian, x_compact, y_compact,initial)
if (params is not None) and (pcov is not None):
perr = np.sqrt(np.diag(pcov))
#print('Error:', np.sum(perr))
#print('Params :',params)
if np.sum(perr) < error:
return float(params[1]),float(0.637*params[0]/params[2])
else:
return None,None
except:
return None,None
#return x[peak],y[peak]
def lorentzian_fit2(x, y,peak):
#initial = [np.max(y), x[i], (x[i+1] - x[i]) * 5]
initial = [np.max(y), x[peak], (x[peak+1] - x[peak]) * 5]
plt.plot(x,y)
wl = x[peak]
x_compact = x[peak - 5:peak + 5]
y_compact = y[peak - 5:peak + 5]
plt.plot(x_compact,y_compact)
params, pcov = optimize.curve_fit(lorentzian, x_compact, y_compact,initial)
plt.axvline(x = wl)
plt.axvline(x = params[1])
am = desNormalizePointY(y, np.pi/2.0)
fit = lorentzian(x = x_compact, ampl = params[0], center = params[1], w = params[2])
#inter = interpolate(x,y,ind = peak)
#print(inter)
#fit = normalizeY(fit)
print('max value of y', np.max(y_compact))
plt.plot(x_compact, fit,'r--' )
print("params: ", params)
print("pcov", pcov)
perr = np.sqrt(np.diag(pcov))
print('standard deviation errors', perr)
print('Interpolate: ',params[1])
pylab.show()
print('Real',wl )
return float(params[1])
def interpolate(x, y, ind=None, width=20, func=lorentzian_fit):
'''Tries to enhance the resolution of the peak detection by using
Gaussian fitting, centroid computation or an arbitrary function on the
neighborhood of each previously detected peak index.
Parameters
----------
x : ndarray
Data on the x dimension.
y : ndarray
Data on the y dimension.
ind : ndarray
Indexes of the previously detected peaks. If None, indexes() will be
called with the default parameters.
width : int ==> Window
Number of points (before and after) each peak index to pass to *func*
in order to encrease the resolution in *x*.
func : function(x,y)
Function that will be called to detect an unique peak in the x,y data.
Returns
-------
ndarray :
Array with the adjusted peak positions (in *x*)
'''
print("Entrou! 1")
if ind is None:
ind = indexes(y)
print("Entrou! 2")
out = []
print("Entrou! 3")
for slice_ in (slice(i - width, i + width) for i in ind):
print("Entrou! 4")
try:
fit = func(x[slice_], y[slice_])
print(fit)
out.append(fit)
except Exception:
#pass
print("ERROR ON SLICE FIT LAUT")
print('SAiu do laco')
print(np.array(out))
return np.array(out)
def get_index_from_values(vector, values):
""" returns the index of values in the vector """
ind = []
for v in values:
diff = abs(v-vector)
i = np.argmin(diff)
ind.append(i)
return np.array(ind)
def lorentzianFunctionGenerator(x1,r1,x2,r2,x3,r3,n):
#n = 50000
x = np.linspace(start = 650, stop = 750, num = n)
y_num1 = 200*(1/3.14)*(0.5*r1)
y_num2 = 200*(1/3.14)*(0.5*r2)
y_num3 = 300*(1/3.14)*(0.5*r3)
y_den1 = np.ones(n)
y_den2 = np.ones(n)
y_den3 = np.ones(n)
np.random.seed(1729)
y_noise = 0.2 * np.random.normal(size=x.size)
for i in range(0,n):
y_den1[i] = math.pow((x[i]-x1), 2) + math.pow((0.5*r1),2)
y_den2[i] = math.pow((x[i]-x2), 2) + math.pow((0.5*r2),2)
y_den3[i] = math.pow((x[i]-x3), 2) + math.pow((0.5*r3),2)
y1 = y_num1/y_den1
y2 = y_num2/y_den2
y3 = y_num3/y_den3
y = y1 + y2 + y3 + y_noise
return [x,y]
def params_Lorentzian(x,y):
mod = LorentzianModel()
params = mod.guess(y,x)
print (params)
out = mod.fit(y, params, x=x)
print(out.fit_report(min_correl=0.3))
init = mod.eval(params, x=x)
plt.figure(2)
plt.plot(x, y, 'b')
plt.plot(x, init, 'k--')
plt.plot(x, out.best_fit, 'r-')
def mult_params_peaks_Lorentzian(x,y):
#http://cars9.uchicago.edu/software/python/lmfit/builtin_models.html
loren_mod1 = LorentzianModel(prefix='l1_')
pars = loren_mod1.guess(y,x)
loren_mod2 = LorentzianModel(prefix='l2_')
pars.update(loren_mod2.make_params())
loren_mod3 = LorentzianModel(prefix='l3_')
pars.update(loren_mod3.make_params())
mod = loren_mod1 + loren_mod2 + loren_mod3
init = mod.eval(pars, x=x)
out = mod.fit(y, pars, x=x)
print(out.fit_report(min_correl=0.5))
plot_components = False
plt.plot(x, y, 'b')
plt.plot(x, init, 'k--')
plt.plot(x, out.best_fit, 'r-')
if plot_components:
comps = out.eval_components(x=x)
plt.plot(x, comps['l1_'], 'b--')
plt.plot(x, comps['l2_'], 'b--')
plt.plot(x, comps['l3_'], 'b--')
if __name__ == '__main__':
pass
#PATH = '/home/ABTLUS/rodrigo.guercio/Pictures/3test/GearBox/goldenPressure/subidarubi/'
#PATH ='/home/ABTLUS/rodrigo.guercio/Downloads/'
PATH = '/home/ABTLUS/rodrigo.guercio/Pictures/barbara/'
#PATH = '/home/ABTLUS/rodrigo.guercio/Pictures/Ruby_kousik/'
#name = 'au_002_21p85_GPa_d_n016.txt'
#name = 'au_002_6p36_GPa_d_n000.txt'
#name = 'lab6_17p5GPa_6p67K_n000.txt'
#name = 'GdPtBi_0p95GPa_300K_n000.txt'
name = 'f_10_r_11_n001.txt'
#ame = 'f_80_r_80_n002.txt'
#name = 'Ruby_8p69GPa_14K_n005.txt'
#name = 'Ruby_29p82GPa_14K_n016.txt'
#name = 'Ruby_52p30GPa_14K_n020.txt'
#name = 'Ruby_17K_n001.txt'
#name = 'Ruby_55p30GPa_14K_Problem_n021.txt'
name = 'au_002_25p63_GPa_d_n020.txt'
[x,y] = np.loadtxt(fname = PATH+name, delimiter = '\t', skiprows = 0, unpack = True, ndmin = 0)
plt.figure(1)
y = normalizeY(y)
plt.plot(x,y,'r--')
wavelength = indexes(y,x)
if wavelength is not None:
print(wavelength)
#print(wavelength[2])
else:
print(wavelength)
#print(temperatureCalculate(0.01,1))
#print(temperatureCalculate(0.03,1))
#print(temperatureCalculate(0.2,1))
#print(temperatureCalculate(0.3,1))
#print(temperatureCalculate(0.44,1))
y[y<0.01*np.max(y)] = 0
plt.plot(x,y,'*b')
pylab.show()
'''
import random
for i in range(1,2):
a = random.uniform(1,20)
b = random.uniform(-10,10)
c = random.uniform(-5,10)GdPtBi_0p95GPa_300K_n000
x_1 = 680 + a
r_1 = 10 + a
x_2 = 700 + b
r_2 = 10 + b
x_3 = 720 + c
r_3 = 20 + c
samples = int(2048 + random.uniform(-500,500))
[x,y] = lorentzianFunctionGenerator(x1 = x_1, r1 = r_1 , x2 = x_2, r2 = r_2, x3 = x_3 , r3 = r_3,n = samples)
plt.figure(i)
plt.plot(x,y,'r--')
z = normalizeY(y)
plt.plot(x,z,'b--')
peaks = indexes(y, x, thres=0.3)
print(peaks)
print(x[peaks])
for p in x[peaks]:
plt.axvline(x = p)
print('Peaks %d',i)
print('Real: %d %d %d',x_1,x_2,x_3)
print('Samples %d',samples)
print(x[peaks])
print(y[peaks])
print('------')
pylab.show()
''' | [
"root@LNLS55-linux.abtlus.org.br"
] | root@LNLS55-linux.abtlus.org.br |
601c6109a398a6044f0de960eb68847c99317d8b | b65cd9500e73e51459ce426dd7702d82cee405ba | /ANN/artificial_neural_network.py | a5e7c148dd445479dfd1f243bef73358ebc95213 | [] | no_license | ashish-atkar/ML_Algorithms | b335f9fbb04bbb093c76222c9d18fb1340ec9835 | 647cc7bb17ae201ce3cdaeb1c644b6b5a277a106 | refs/heads/master | 2022-10-08T10:11:08.101779 | 2020-06-09T19:15:27 | 2020-06-09T19:15:27 | 270,686,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,983 | py |
#Part 1: Data importing and preprocessing
import pandas as pd
dataset = pd.read_csv('Churn_Modelling.csv')
X = dataset.iloc[:,3:13].values
Y = dataset.iloc[:,13].values
#Encoding Categorical data
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
labelencoder_X_1 = LabelEncoder()
X[:,1] = labelencoder_X_1.fit_transform(X[:,1])
labelencoder_X_2 = LabelEncoder()
X[:,2] = labelencoder_X_2.fit_transform(X[:,2])
onehotencoder = OneHotEncoder(categorical_features = [1])
X = onehotencoder.fit_transform(X).toarray()
#To avoid dummy variable trap
X = X[: , 1:]
#splitting the dataset into training set and test set
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X,Y,test_size=0.2,random_state=0)
#Featue Scalling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.fit_transform(X_test)
#Part 2: Now lets make the ANN
import keras
from keras.models import Sequential
from keras.layers import Dense
#Initializing the ANN
classifier = Sequential()
#Adding the input layer and first hidden layer
classifier.add(Dense(activation="relu", input_dim=11, units=6, kernel_initializer="uniform"))
#adding the second hidden layer
classifier.add(Dense(activation = 'relu', units=6, kernel_initializer="uniform"))
#adding the output layer
classifier.add(Dense(activation = 'sigmoid', units=1, kernel_initializer="uniform" ))
#compiling the ANN
classifier.compile(optimizer='adam',loss='binary_crossentropy', metrics= ['accuracy'])
#Fitting the ANN to training set
classifier.fit(X_train, Y_train, batch_size=10 ,epochs=100)
#Part 3: Making the prediction and evaluating the model
Y_pred = classifier.predict(X_test)
Y_pred = (Y_pred>0.5)
#Making the confusion matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(Y_test,Y_pred)
print(cm)
#finding accuracy
accuracy= ((cm[0][0]+cm[1][1])/2000)*100
print(accuracy)
| [
"ashish.atkar12@gmail.com"
] | ashish.atkar12@gmail.com |
287b2dea5d50e568064505e8ecdad813d1967f06 | e966e08e69df8f6669034c1d8a2ed57293a48ef7 | /www/main.py | a8c620ef841d4f5469289bfa7a8cbc2b5c224f3a | [] | no_license | adrianPerez/notify-io | c9d06f5fb2a40d25a9399bb72319225e60ffa142 | 20eeafa5edfe2455d4b154733283aa8ce2969dbb | refs/heads/master | 2021-01-18T12:14:50.622242 | 2009-11-12T06:13:36 | 2009-11-12T06:13:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,257 | py | import wsgiref.handlers
import hashlib, time, os
from google.appengine.ext import webapp
from google.appengine.ext import db
from google.appengine.api import users
from google.appengine.api import urlfetch
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import login_required
from django.utils import simplejson
try:
is_dev = os.environ['SERVER_SOFTWARE'].startswith('Dev')
except:
is_dev = False
API_VERSION = 'v1'
if is_dev:
API_HOST = 'localhost:8191'
WWW_HOST = 'localhost:8091'
else:
API_HOST = 'api.notify.io'
WWW_HOST = 'www.notify.io'
def baseN(num,b,numerals="0123456789abcdefghijklmnopqrstuvwxyz"):
return ((num == 0) and "0" ) or (baseN(num // b, b).lstrip("0") + numerals[num % b])
class Account(db.Model):
user = db.UserProperty(auto_current_user_add=True)
hash = db.StringProperty()
api_key = db.StringProperty()
source_enabled = db.BooleanProperty()
source_name = db.StringProperty()
source_icon = db.StringProperty()
created = db.DateTimeProperty(auto_now_add=True)
updated = db.DateTimeProperty(auto_now=True)
#def __init__(self, *args, **kwargs):
# super(Account, self).__init__(*args, **kwargs)
@classmethod
def get_by_user(cls, user):
return cls.all().filter('user =', user).get()
@classmethod
def get_by_hash(cls, hash):
return cls.all().filter('hash = ', hash).get()
def set_hash_and_key(self):
self.hash = hashlib.md5(self.user.email()).hexdigest()
self.api_key = ''.join([baseN(abs(hash(time.time())), 36), baseN(abs(hash(self.hash)), 36)])
class Channel(db.Model):
target = db.ReferenceProperty(Account, required=True, collection_name='channels_as_target')
source = db.ReferenceProperty(Account, required=True, collection_name='channels_as_source')
status = db.StringProperty(required=True, default='pending')
created = db.DateTimeProperty(auto_now_add=True)
updated = db.DateTimeProperty(auto_now=True)
@classmethod
def get_all_by_target(cls, account):
return cls.all().filter('target =', account)
@classmethod
def get_all_by_source(cls, account):
return cls.all().filter('source =', account)
@classmethod
def get_by_source_and_target(cls, source, target):
return cls.all().filter('source =', source).filter('target =', target).get()
def delete(self):
notices = Notification.all().filter('channel =', self)
for n in notices:
n.channel = None
n.put()
super(Channel, self).delete()
def get_approval_notice(self):
notice = Notification(channel=self, target=self.target, text="%s wants to send you notifications. Click here to approve/deny this request." % self.source.source_name)
notice.title = "New Notification Source"
notice.link = "http://%s/dashboard/sources" % WWW_HOST
notice.icon = self.source.source_icon
notice.sticky = 'true'
return notice
class Notification(db.Model):
channel = db.ReferenceProperty(Channel)
target = db.ReferenceProperty(Account, collection_name='target_notifications')
source = db.ReferenceProperty(Account, collection_name='source_notifications')
title = db.StringProperty()
text = db.TextProperty(required=True)
link = db.StringProperty()
icon = db.StringProperty()
sticky = db.StringProperty()
created = db.DateTimeProperty(auto_now_add=True)
updated = db.DateTimeProperty(auto_now=True)
def __init__(self, *args, **kwargs):
channel = kwargs.get('channel')
if channel and isinstance(channel, Channel):
kwargs['source'] = channel.source
kwargs['target'] = channel.target
super(Notification, self).__init__(*args, **kwargs)
def to_json(self):
o = {'text': self.text}
for arg in ['title', 'link', 'icon', 'sticky']:
value = getattr(self, arg)
if value:
o[arg] = value
o['source'] = self.source.source_name
return simplejson.dumps(o)
class MainHandler(webapp.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
self.redirect('/dashboard')
return
else:
login_url = users.create_login_url('/')
self.response.out.write(template.render('templates/main.html', locals()))#file('templates/main.html').read())#
class NotificationHandler(webapp.RequestHandler):
def post(self):
target = Account.all().filter('hash =', self.request.get('hash')).get()
source = Account.all().filter('api_key =', self.request.get('api_key')).get()
replay = self.request.get('replay', None)
if replay:
self.replay(replay, target, source)
else:
self.notify(target, source)
def replay(self, replay, target, source):
notice = Notification.get_by_id(int(replay))
channel = notice.channel
# Can only replay if hash == notification target AND (api_key == notification source OR notification target)
authz = channel.target.key() == target.key() and (channel.source.key() == source.key() or source.key() == channel.target.key())
if notice and channel.status == 'enabled' and authz:
self.response.out.write(notice.to_json())
else:
self.error(404)
def notify(self, target, source):
channel = Channel.all().filter('target =', target).filter('source =', source).get()
approval_notice = None
if not channel and source and target:
channel = Channel(target=target, source=source)
channel.put()
approval_notice = channel.get_approval_notice()
if channel:
notice = Notification(channel=channel, text=self.request.get('text'), icon=source.source_icon)
for arg in ['title', 'link', 'icon', 'sticky']:
value = self.request.get(arg, None)
if value:
setattr(notice, arg, value)
notice.put()
if channel.status == 'enabled':
self.response.out.write(notice.to_json())
elif channel.status == 'pending':
self.response.set_status(202)
if approval_notice:
self.response.out.write(approval_notice.to_json())
else:
self.response.out.write("202 Pending approval")
elif channel.status == 'disabled':
self.response.set_status(202)
self.response.out.write("202 Accepted but disabled")
else:
self.error(404)
self.response.out.write("404 Target or source not found")
class DownloadHandler(webapp.RequestHandler):
@login_required
def get(self):
user = users.get_current_user()
account = Account.all().filter('user =', user).get()
host = API_HOST
hash = account.hash
api_key = account.api_key
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write(template.render('templates/client.py', locals()))
class ListenAuthHandler(webapp.RequestHandler):
def get(self):
api_key = self.request.get('api_key')
userhash = self.request.get('hash')
account = Account.all().filter('hash =', userhash).filter('api_key =', api_key).get()
if account:
self.response.out.write("ok")
else:
self.error(403)
class IntroHandler(webapp.RequestHandler):
def get(self):
user = users.get_current_user()
if not user:
login_url = users.create_login_url('/')
self.response.out.write(template.render('templates/getstarted.html', locals()))
def main():
application = webapp.WSGIApplication([
('/', MainHandler),
('/notification', NotificationHandler),
('/download/notifyio-client.py', DownloadHandler),
('/auth', ListenAuthHandler),
('/getstarted', IntroHandler),
], debug=True)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main()
| [
"progrium@gmail.com"
] | progrium@gmail.com |
001acef57576b87eb38040f53889537d452e2f72 | 552865ae5daa143bc6a7dec46f7febe49f0a7226 | /src/mr/cabot/kml.py | 96d3de4531e1a03cd61c963cb5568f2f5a0be081 | [] | no_license | collective/mr.cabot | 231a4a96c38e793356c4d06438d236d447e97bc8 | 3e905d80ed5eac52a258b74d19abf5ab182d49e2 | refs/heads/master | 2023-03-22T15:30:19.171188 | 2013-01-27T17:54:22 | 2013-01-27T18:32:03 | 6,816,996 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,045 | py | import datetime
import urllib
import os
import simplekml
from mr.cabot.interfaces import IListing, IGeolocation
import sebastian
colors = {"commit": "ff00ff00", "mailing-list": "ffff0000", "answer": "ff00ffff"}
def join(objs):
kml = simplekml.Kml()
unique_locations = set()
for obj in objs:
loc = IGeolocation(obj).coords
if loc not in unique_locations:
unique_locations.add(loc)
add_point(kml, obj)
return kml.kml()
def add_point(kml, obj):
loc = IGeolocation(obj).coords
if not loc:
return ''
else:
lat, lon = loc
listing = IListing(obj)
listing_type = listing.__name__
summary = listing.summary
if isinstance(summary, str):
summary = listing.summary.decode("utf-8", "ignore")
summary = summary.encode("ascii","xmlcharrefreplace")
point = kml.newpoint(name=listing.__name__, description=summary, coords=[(lon, lat)])
point.style.iconstyle.color = colors[listing_type]
point.style.iconstyle.scale = 1
| [
"git@matthewwilkes.name"
] | git@matthewwilkes.name |
301248baf3e0ec9b7f224e9ddc8096a28fe52d4c | 6ce6e78391e957fabf47a2242cb0a337f419a1fe | /Chapter 3/zeroDivide.py | 10b8be0c2898b90b67324319d6c1ae6348ab2c36 | [] | no_license | bj-mckay/atbswp | 3e6d84fc58cff640acc6d6236b65f19eb378d63c | be46902f6f2ae36b85fde91964bdc99187c4186a | refs/heads/master | 2020-12-29T23:29:07.200194 | 2020-09-02T20:22:14 | 2020-09-02T20:22:14 | 238,776,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | def spam(divideBy):
try:
return 42 / divideBy
except ZeroDivisionError:
print('Error: Invalid argument')
print(spam(2))
print(spam(12))
print(spam(0))
print(spam(1)) | [
"brad.goetsch@gmail.com"
] | brad.goetsch@gmail.com |
7cb4c2732a9e0437ad2c3c1be8df7a72b03dab80 | b8062e01860960131b37e27298b6b755b4191f5f | /python/level1_single_api/9_amct/amct_pytorch/resnet-101/src/resnet-101_calibration.py | 1fb64a80ea43a7e08efa9490757866a88b3a89a4 | [
"Apache-2.0"
] | permissive | RomanGaraev/samples | 4071fcbe6bf95cf274576665eb72588568d8bcf2 | 757aac75a0f3921c6d1b4d98599bd7d4ffda936b | refs/heads/master | 2023-07-16T02:17:36.640036 | 2021-08-30T15:14:05 | 2021-08-30T15:14:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,051 | py | """
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
import os
import argparse
import torch # pylint: disable=E0401
from PIL import Image # pylint: disable=E0401
from torchvision import transforms # pylint: disable=E0401
import onnxruntime as ort # pylint: disable=E0401
import amct_pytorch as amct # pylint: disable=E0401
from resnet import resnet101 # pylint: disable=E0401, C0415
PATH = os.path.realpath('./')
IMG_DIR = os.path.join(PATH, 'data/images')
LABEL_FILE = os.path.join(IMG_DIR, 'image_label.txt')
PARSER = argparse.ArgumentParser(description='whether use nuq')
PARSER.add_argument('--nuq', dest='nuq', action='store_true', help='whether use nuq')
ARGS = PARSER.parse_args()
if ARGS.nuq:
OUTPUTS = os.path.join(PATH, 'outputs/nuq')
else:
OUTPUTS = os.path.join(PATH, 'outputs/calibration')
TMP = os.path.join(OUTPUTS, 'tmp')
def get_labels_from_txt(label_file):
"""Read all images' name and label from label_file"""
images = []
labels = []
with open(label_file, 'r') as f:
lines = f.readlines()
for line in lines:
images.append(line.split(' ')[0])
labels.append(int(line.split(' ')[1]))
return images, labels
def prepare_image_input(images):
"""Read all images"""
input_tensor = torch.zeros(len(images), 3, 224, 224) # pylint: disable=E1101
preprocess = transforms.Compose(
[transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
for index, image in enumerate(images):
input_image = Image.open(image).convert('RGB')
input_tensor[index, ...] = preprocess(input_image)
return input_tensor
def img_postprocess(probs, labels):
"""Do image post-process"""
# calculate top1 and top5 accuracy
top1_get = 0
top5_get = 0
prob_size = probs.shape[1]
for index, label in enumerate(labels):
top5_record = (probs[index, :].argsort())[prob_size - 5: prob_size]
if label == top5_record[-1]:
top1_get += 1
top5_get += 1
elif label in top5_record:
top5_get += 1
return float(top1_get) / len(labels), float(top5_get) / len(labels)
def model_forward(model, batch_size, iterations):
"""Do pytorch model forward"""
images, labels = get_labels_from_txt(LABEL_FILE)
images = [os.path.join(IMG_DIR, image) for image in images]
top1_total = 0
top5_total = 0
for i in range(iterations):
input_batch = prepare_image_input(images[i * batch_size: (i + 1) * batch_size])
# move the input and model to GPU for speed if available
if torch.cuda.is_available():
input_batch = input_batch.to('cuda')
model.to('cuda')
with torch.no_grad():
output = model(input_batch)
top1, top5 = img_postprocess(output, labels[i * batch_size: (i + 1) * batch_size])
top1_total += top1
top5_total += top5
print('****************iteration:{}*****************'.format(i))
print('top1_acc:{}'.format(top1))
print('top5_acc:{}'.format(top5))
print('******final top1:{}'.format(top1_total / iterations))
print('******final top5:{}'.format(top5_total / iterations))
return top1_total / iterations, top5_total / iterations
def onnx_forward(onnx_model, batch_size, iterations):
"""Do onnx model forward"""
ort_session = ort.InferenceSession(onnx_model)
images, labels = get_labels_from_txt(LABEL_FILE)
images = [os.path.join(IMG_DIR, image) for image in images]
top1_total = 0
top5_total = 0
for i in range(iterations):
input_batch = prepare_image_input(images[i * batch_size: (i + 1) * batch_size])
output = ort_session.run(None, {'input': input_batch.numpy()})
top1, top5 = img_postprocess(output[0], labels[i * batch_size: (i + 1) * batch_size])
top1_total += top1
top5_total += top5
print('****************iteration:{}*****************'.format(i))
print('top1_acc:{}'.format(top1))
print('top5_acc:{}'.format(top5))
print('******final top1:{}'.format(top1_total / iterations))
print('******final top5:{}'.format(top5_total / iterations))
return top1_total / iterations, top5_total / iterations
def main():
"""Sample main function"""
model = resnet101(pretrained=True)
model.eval()
ori_top1, ori_top5 = model_forward(model, batch_size=32, iterations=5)
# Quantize configurations
args_shape = [(1, 3, 224, 224)]
input_data = tuple([torch.randn(arg_shape) for arg_shape in args_shape]) # pylint: disable=E1101
if torch.cuda.is_available():
input_data = tuple([data.to('cuda') for data in input_data])
model.to('cuda')
config_json_file = os.path.join(TMP, 'config.json')
skip_layers = []
batch_num = 2
if ARGS.nuq:
config_defination = os.path.join(PATH, 'src/nuq_conf/nuq_quant.cfg')
amct.create_quant_config(
config_json_file, model, input_data, skip_layers, batch_num, config_defination=config_defination)
else:
amct.create_quant_config(config_json_file, model, input_data, skip_layers, batch_num)
# Phase1: do conv+bn fusion, weights calibration and generate
# calibration model
record_file = os.path.join(TMP, 'record.txt')
modified_model = os.path.join(TMP, 'modified_model.onnx')
calibration_model = amct.quantize_model(
config_json_file, modified_model, record_file, model, input_data, input_names=['input'],
output_names=['output'], dynamic_axes={'input': {0: 'batch_size'}, 'output': {0: 'batch_size'}})
# Phase2: do calibration
model_forward(calibration_model, batch_size=32, iterations=batch_num)
if torch.cuda.is_available():
torch.cuda.empty_cache()
# Phase3: save final model, one for onnx do fake quant test, one
# deploy model for ATC
result_path = os.path.join(OUTPUTS, 'resnet-101')
amct.save_model(modified_model, record_file, result_path)
# Phase4: run fake_quant model test
quant_top1, quant_top5 = onnx_forward(
'%s_%s' % (result_path, 'fake_quant_model.onnx'), batch_size=32, iterations=5)
print('[INFO] ResNet101 before quantize top1:{:>10} top5:{:>10}'.format(ori_top1, ori_top5))
print('[INFO] ResNet101 after quantize top1:{:>10} top5:{:>10}'.format(quant_top1, quant_top5))
if __name__ == '__main__':
main()
| [
"derek.qian.wang@huawei.com"
] | derek.qian.wang@huawei.com |
7c9b22a38ca78b7384d98f036477bb1f96f5c71f | 08637a7a73653c1c3c0d5c5316589e3f290fbbfd | /ProjectEulerProblem45.py | a44a26a4d4f9f456bee09040e9a5899b5c902ea7 | [] | no_license | jdbr827/ProjectEuler | 91253bf36c86e21d95deed2bac2a43a33289da66 | ddc728a75303d7ad949c723c0aa0f36900cc9aae | refs/heads/master | 2022-07-24T12:42:50.038387 | 2019-11-12T23:39:35 | 2019-11-12T23:39:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,372 | py |
def solve3():
T = 1
Tn = 1
P = 1
Pn = 1
H = 1
Hn = 1
found = set([])
while len(found) < 2:
# update the sequence whose most recent element is smallest
M == min(T, P, H):
if M == T:
Tn += 1
T = Tn * (Tn+1) / 2
elif M == P:
Pn += 1
P = Pn * ((3*Pn)-1) / 2
elif H == P:
Hn += 1
H = Hn*((2*Hn)-1)
# check to see if we have found a number
if T == P and T == H:
found.add(T)
return found, Tn, Pn, Hn
def solve2():
T = 1
Tn = 1
P = 1
Pn = 1
H = 1
Hn = 1
found = set([])
while len(found) < 2:
if T <= P and T <= H:
Tn += 1
T = Tn * (Tn+1) / 2
elif P <= T and P <= H:
Pn += 1
P = Pn * ((3*Pn)-1) / 2
elif H <= T and H <= P:
Hn += 1
H = Hn*((2*Hn)-1)
if T == P and T == H:
found.add(T)
return found, Tn, Pn, Hn
def solve():
T = [1]
P = [1]
H = [1]
found = set([])
while len(found) < 2:
if T[-1] <= P[-1] and T[-1] <= H[-1]:
n = len(T) + 1
next_elm = n * (n+1) / 2
T.append(next_elm)
elif P[-1] <= T[-1] and P[-1] <= H[-1]:
n = len(P) + 1
next_elm = n * ((3*n)-1) / 2
P.append(next_elm)
elif H[-1] <= T[-1] and H[-1] <= P[-1]:
n = len(H) + 1
next_elm = n*((2*n)-1)
H.append(next_elm)
if T[-1] == P[-1] and T[-1] == H[-1]:
found.add(next_elm)
return found, next_elm
print solve2()
| [
"noreply@github.com"
] | noreply@github.com |
cff049faf2a96253a3c9ab4ba304667f2b5d2244 | 00edbbf472066b51d1dafa361303aa1836f3b177 | /stock_incoming_shippment_container/stock.py | 9e3d3fa5ea458d2b80b197b33294f5bb1ae8ea0d | [] | no_license | kevin808/ellico_extra_addons | 22513a927e23c00d8380c9c75ac2a5df68adf8c8 | 9953fe3ad9a092aa1dbb1631b0cf30bd96403166 | refs/heads/master | 2020-05-17T02:55:03.091842 | 2014-06-17T02:37:43 | 2014-06-17T02:37:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,373 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2010-2014 Elico Corp. All Rights Reserved.
# Alex Duan <alex.duan@elico-corp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
class stock_picking(orm.Model):
_inherit = 'stock.picking'
_columns = {
'container_num': fields.char('Container Number', size=64)
}
class stock_picking_in(orm.Model):
_inherit = 'stock.picking.in'
_columns = {
'container_num': fields.char('Container Number', size=64)
}
| [
"lin.yu@elico-corp.com"
] | lin.yu@elico-corp.com |
9c1c1496d9e87ef0b64186d9951572487e4eec52 | 2d5648035b8bd32b4a6ded311e48975e5ea100d4 | /runs/bro/100KB/src2-tgt1/ssl-par-ssl-iter00100.cfg.py | 0df43d2c6562ed9dcdd7e02e3967e1cde40ca70a | [
"MIT"
] | permissive | Largio/broeval | 3975e54a1eaead6686c53e5e99250a00becbe1e0 | 89e831d07f066100afdd1a5b220f9f08f1c10b3d | refs/heads/master | 2021-05-08T08:54:06.498264 | 2017-11-10T17:09:02 | 2017-11-10T17:09:02 | 92,508,227 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 665 | py |
# Write results to this file
OUTFILE = 'runs/bro/100KB/src2-tgt1/ssl-par-ssl-iter00100.result.csv'
# Source computers for the request
SOURCE = ['10.0.0.1', '10.0.0.3']
# Target machines for the requests (aka server)
TARGET = ['10.0.0.2']
# IDS Mode. (ATM: noids, min, max, http, ssl, ftp, icmp, mysql)
IDSMODE = 'ssl'
# Connection mode (par = parallel, seq = sequential)
MODE = 'par'
# Number of evaluation repititions to run
EPOCHS = 100
# Number of iterations to be run in each evaluation repitition
ITER = 100
# Size of the file to be downloaded from target (in Bytes * 10^SIZE)
SIZE = 5
# Protocol to be used e.g. HTTP, SSL, FTP, MYSQL
PROTOCOL = 'ssl' | [
"larswiete@googlemail.com"
] | larswiete@googlemail.com |
aee259f55635db4b74927412b4c3d307f3038436 | 1d90a784f0fb673b188f3fdde425bd7ca23617e6 | /python/multiparadigma/1195.py | 0d4b9b31bcfc229f6642bd5c9c89253f87968f29 | [] | no_license | lzacchi/INE5416 | 8b767dec0aa19baf8f27930b6c345b609220c6ff | e182dc7777a1d49c478c6655afdf1e754dce43b1 | refs/heads/master | 2021-03-15T09:36:09.924193 | 2020-11-25T01:10:40 | 2020-11-25T01:10:40 | 246,840,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,846 | py | class Node:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
class Tree:
def __init__(self):
self.root = None
self.buffer = ""
def insert(self, value):
if self.root is None:
self.root = Node(value)
else:
self.add(self.root, value)
def add(self, node, value):
if value < node.value:
if node.left != None:
self.add(node.left, value)
else:
node.left = Node(value)
else:
if node.right != None:
self.add(node.right, value)
else:
node.right = Node(value)
def print_buffer(self):
print(self.buffer)
self.buffer = ''
def in_order(self, node):
if node is not None:
self.in_order(node.left)
self.buffer += " %s" % str(node.value)
self.in_order(node.right)
def pre_order(self, node):
if node is not None:
self.buffer += " %s" % str(node.value)
self.pre_order(node.left)
self.pre_order(node.right)
def post_order(self, node):
if node is not None:
self.post_order(node.left)
self.post_order(node.right)
self.buffer += " %s" % str(node.value)
cases = int(input())
for i in range(cases):
ammount = int(input())
elements = [int(x) for x in input().split()]
tree = Tree()
for element in elements:
tree.insert(element)
print("Case %d:" % (i + 1))
print("Pre.:", end="")
tree.pre_order(tree.root)
tree.print_buffer()
print("In..:", end="")
tree.in_order(tree.root)
tree.print_buffer()
print("Post:", end="")
tree.post_order(tree.root)
tree.print_buffer()
print()
| [
"zacchilucasm@gmail.com"
] | zacchilucasm@gmail.com |
46f9074e93f7bef5beaa27844351f2b1ba6935da | 3307766701d680af6d12a726a2d98df2cb1830e5 | /jams/gcj/2013/1C/C/C.py | 0660c807359fca4cfb396ebfa66c729d1b5b2f9e | [] | no_license | dpaneda/code | c1a54037a275fa7044eb5c2d6079f052dd968615 | 7da1ede33a6a7cd19cbd0db517d91e7cccfbbfff | refs/heads/master | 2023-01-07T18:41:00.816363 | 2022-12-30T09:24:22 | 2022-12-30T09:24:22 | 1,583,913 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,495 | py | #!/usr/bin/python2
import sys
import bisect
def calculate_atacks(tribes):
# We calculate attacks day by day, until no tribe have any attacks left
attacks = {}
for tribe in tribes:
for i in xrange(0, tribe[1]):
d = tribe[0]
if d not in attacks:
attacks[d] = []
attacks[d].append((tribe[2], tribe[3], tribe[4]))
# Change tribe status
tribe[0] += tribe[5]
tribe[2] += tribe[6]
tribe[3] += tribe[6]
tribe[4] += tribe[7]
return attacks
def raise_wall(wall, wallh, w, e, s):
# print wall, wallh
# print w, e, s
a = bisect.bisect_right(wall, w)
if a > 0:
a -= 1
b = bisect.bisect_right(wall, e)
print a, b
insert = False
if wall[a] < w and wallh[a] < s:
wall.insert(a + 1, w)
wallh.insert(a + 1, s)
b += 1
insert = True
elif wall[a] == w and wallh[a] < s:
wallh[a] = s
insert = True
if insert:
if b >= len(wall):
wall.insert(a + 2, e)
wallh.insert(a + 2, 0)
elif wall[b] > e:
wall.insert(a + 2, e)
wallh.insert(a + 2, wall[b])
for i in xrange(a + 2, b):
if wallh[i] < s:
del(wall[i])
del(wallh[i])
# print wall, wallh
def wall_minimum_height(wall, wallh, w, e):
a = bisect.bisect_right(wall, w) - 1
if a < 0:
a = 0
b = bisect.bisect_right(wall, e)
if a == b:
return 0
return min(wallh[a:b])
def succeed(wall, wallh, w, e, s):
#print w, e, s
m = wall_minimum_height(wall, wallh, w, e)
return m < s
def simulate_attacks(attacks):
wall = [0]
wallh = [0]
s = 0
days = sorted(attacks.iterkeys())
for day in days:
for attack in attacks[day]:
if succeed(wall, wallh, attack[0], attack[1], attack[2]):
s += 1
for attack in attacks[day]:
raise_wall(wall, wallh, attack[0], attack[1], attack[2])
return s
def Solve():
ntribes = int(sys.stdin.readline().strip())
tribes = []
for i in xrange(0, ntribes):
d, n, w, e, s, di, pi, si = map(int, sys.stdin.readline().strip().split())
tribes.append([d, n, w, e, s, di, pi, si])
attacks = calculate_atacks(tribes)
return simulate_attacks(attacks)
num = int(sys.stdin.readline())
for case in range(1, num + 1):
print "Case #%d: %s " % (case, Solve())
| [
"dpaneda@gmail.com"
] | dpaneda@gmail.com |
c38d01418bc5557e97442e26d3317de141654596 | 5858cc331d73f04d38f2572d68bd9c2eb3747165 | /subscriptions/tests/test_views_detail.py | 27515f097684c7bcca36babb70f8b52b11b5e55c | [] | no_license | rougeth/wttd | 6f9817e3f34df8adebda5a12bde3aa66a4f5d966 | b8d63ccce01c0f7403fc53547c0a421fbe5e6c8b | refs/heads/master | 2021-01-17T05:27:04.678657 | 2014-06-01T03:02:30 | 2014-06-01T03:02:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 874 | py | # coding: utf-8
from django.test import TestCase
from subscriptions.models import Subscription
class DetailTest(TestCase):
def setUp(self):
s = Subscription.objects.create(
name='Marco Rougeth',
cpf='12345678901',
email='marco@rougeth.com',
phone='61-123456789'
)
self.response = self.client.get('/inscricao/{}/'.format(s.pk))
def test_get(self):
self.assertEqual(200, self.response.status_code)
def test_template(self):
self.assertTemplateUsed(
self.response,
'subscriptions/subscription_detail.html'
)
def test_context(self):
subscription = self.response.context['subscription']
self.assertIsInstance(subscription, Subscription)
def test_html(self):
self.assertContains(self.response, 'Marco Rougeth')
| [
"marco@rougeth.com"
] | marco@rougeth.com |
3e9b63e9492405f039f1e350d73adff14fddf664 | 39ab815dfdbab9628ede8ec3b4aedb5da3fd456a | /aql/aql/options/aql_option_types.py | e93027919b544a7de53973e716f7a8f385c8a943 | [
"MIT"
] | permissive | menify/sandbox | c03b1bf24c1527b47eb473f1acc433f17bfb1d4f | 32166c71044f0d5b414335b2b6559adc571f568c | refs/heads/master | 2016-09-05T21:46:53.369065 | 2015-04-20T06:35:27 | 2015-04-20T06:35:27 | 25,891,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,600 | py | #
# Copyright (c) 2011,2012 The developers of Aqualid project - http://aqualid.googlecode.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE
# AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__all__ = (
'OptionType', 'StrOptionType', 'VersionOptionType', 'PathOptionType', 'BoolOptionType',
'EnumOptionType', 'RangeOptionType', 'ListOptionType', 'DictOptionType',
'autoOptionType',
'ErrorOptionTypeEnumAliasIsAlreadySet', 'ErrorOptionTypeEnumValueIsAlreadySet',
'ErrorOptionTypeUnableConvertValue', 'ErrorOptionTypeNoEnumValues',
)
from aql.util_types import String, AqlException, toString, toSequence, IgnoreCaseString, Version, FilePath, UniqueList, List, \
SplitListType, ValueListType, Dict, SplitDictType, ValueDictType
#//===========================================================================//
class ErrorOptionTypeEnumAliasIsAlreadySet( AqlException ):
def __init__( self, option, value, current_value, new_value ):
msg = "Alias '%s' of Enum Option '%s' can't be changed to '%s' from '%s'" % (value, option, new_value, current_value )
super(type(self), self).__init__( msg )
#//===========================================================================//
class ErrorOptionTypeEnumValueIsAlreadySet( AqlException ):
def __init__( self, option, value, new_value ):
msg = "Value '%s' of Enum Option '%s' can't be changed to alias to '%s'" % (value, option, new_value )
super(type(self), self).__init__( msg )
#//===========================================================================//
class ErrorOptionTypeUnableConvertValue( TypeError ):
def __init__( self, option_type, invalid_value ):
self.option_type = option_type
self.invalid_value = invalid_value
msg = "Unable to convert option value '%s (%s)' to '%s'" % (invalid_value, type(invalid_value), option_type.rangeHelp())
super(type(self), self).__init__( msg )
#//===========================================================================//
class ErrorOptionTypeNoEnumValues( TypeError ):
def __init__( self, option_type ):
msg = "Enum option type '%s' doesn't have any values." % (option_type,)
super(type(self), self).__init__( msg )
#//===========================================================================//
def autoOptionType( value ):
if isinstance( value, (UniqueList, list, tuple) ):
value_type = str
if value:
try:
value_type = type(value[0])
except IndexError:
pass
return ListOptionType( value_type = value_type )
if isinstance( value, dict ):
return DictOptionType()
if isinstance( value, bool ):
return BoolOptionType()
return OptionType( value_type = type(value), is_auto = True )
#//===========================================================================//
class OptionType (object):
__slots__ = (
'value_type',
'default',
'description',
'group',
'range_help',
'is_auto',
'is_tool_key',
)
#//-------------------------------------------------------//
def __init__( self, value_type = str, description = None, group = None, range_help = None, default = NotImplemented,
is_auto = False, is_tool_key = False ):
if issubclass( value_type, OptionType ):
value_type = value_type()
self.value_type = value_type
self.is_auto = is_auto
self.is_tool_key = is_tool_key
self.description = description
self.group = group
self.range_help = range_help
if default is NotImplemented:
self.default = NotImplemented
else:
self.default = value_type( default )
#//-------------------------------------------------------//
def __call__( self, value = NotImplemented ):
"""
Converts a value to options' value
"""
try:
if value is NotImplemented:
if self.default is NotImplemented:
return self.value_type()
return self.default
return self.value_type( value )
except (TypeError, ValueError):
raise ErrorOptionTypeUnableConvertValue( self, value )
def toStr( self, value ):
"""
Converts a value to options' value string
"""
return toString( value )
#//-------------------------------------------------------//
def rangeHelp( self ):
"""
Returns a description (list of strings) about range of allowed values
"""
if self.range_help:
return list(toSequence( self.range_help ))
return ["Value of type '%s'" % self.value_type.__name__]
#//===========================================================================//
#//===========================================================================//
class StrOptionType (OptionType):
def __init__( self, ignore_case = False, description = None, group = None, range_help = None, is_tool_key = False ):
value_type = IgnoreCaseString if ignore_case else String
super(StrOptionType, self).__init__( value_type, description, group, range_help, is_tool_key = is_tool_key )
#//===========================================================================//
#//===========================================================================//
class VersionOptionType (OptionType):
def __init__( self, description = None, group = None, range_help = None, is_tool_key = False ):
super(VersionOptionType, self).__init__( Version, description, group, range_help, is_tool_key = is_tool_key )
#//===========================================================================//
#//===========================================================================//
class PathOptionType (OptionType):
def __init__( self, description = None, group = None, range_help = None, is_tool_key = False ):
super(PathOptionType, self).__init__( FilePath, description, group, range_help, is_tool_key = is_tool_key )
#//===========================================================================//
#//===========================================================================//
class BoolOptionType (OptionType):
__slots__ = (
'true_value',
'false_value',
'true_values',
'false_values',
'aliases',
)
#//-------------------------------------------------------//
__true_values = ('yes', 'true', 'on', 'enabled', 'y', '1', 't' )
__false_values = ('no', 'false', 'off', 'disabled', 'n', '0', 'f' )
#//-------------------------------------------------------//
def __init__( self, description = None, group = None, style = None, true_values = None, false_values = None, default = False, is_tool_key = False ):
#noinspection PyTypeChecker
super(BoolOptionType,self).__init__( bool, description, group, default = default, is_tool_key = is_tool_key )
if style is None:
style = ('True', 'False')
else:
style = map(IgnoreCaseString, style)
if true_values is None:
true_values = self.__true_values
else:
true_values = toSequence( true_values )
if false_values is None:
false_values = self.__false_values
else:
false_values = toSequence( false_values )
self.true_value, self.false_value = style
self.true_values = set()
self.false_values = set()
self.addValues( true_values, false_values )
self.addValues( self.true_value, self.false_value )
#//-------------------------------------------------------//
def __call__( self, value = NotImplemented ):
if type(value) is bool:
return value
if value is NotImplemented:
value = self.default
value_str = IgnoreCaseString(value)
if value_str in self.true_values:
return True
if value_str in self.false_values:
return False
return True if value else False
#//-------------------------------------------------------//
def toStr( self, value ):
return self.true_value if value else self.false_value
#//-------------------------------------------------------//
def addValues( self, true_values, false_values ):
true_values = toSequence( true_values )
false_values = toSequence( false_values )
self.true_values.update( map( lambda v: IgnoreCaseString(v), true_values ) )
self.false_values.update( map( lambda v: IgnoreCaseString(v), false_values ) )
#//-------------------------------------------------------//
def rangeHelp( self ):
return [ ', '.join( sorted( self.true_values ) ),
', '.join( sorted( self.false_values ) ) ]
#//===========================================================================//
#//===========================================================================//
class EnumOptionType (OptionType):
__slots__ = (
'__values',
)
def __init__( self, values, description = None, group = None, value_type = IgnoreCaseString, default = NotImplemented, is_tool_key = False ):
super(EnumOptionType,self).__init__( value_type, description, group, default = default, is_tool_key = is_tool_key )
self.__values = {}
if default is not NotImplemented:
self.addValues( default )
self.addValues( values )
#//-------------------------------------------------------//
def addValues( self, values ):
try:
values = tuple( values.items() ) # convert dictionary to a sequence
except AttributeError:
pass
set_default_value = self.__values.setdefault
value_type = self.value_type
for value in toSequence(values):
it = iter( toSequence( value ) )
value = value_type( next( it ) )
value = set_default_value( value, value )
for alias in it:
alias = value_type(alias)
v = set_default_value( alias, value )
if v != value:
if alias == v:
raise ErrorOptionTypeEnumValueIsAlreadySet( self, alias, value )
else:
raise ErrorOptionTypeEnumAliasIsAlreadySet( self, alias, v, value )
#//-------------------------------------------------------//
def __call__( self, value = NotImplemented ):
try:
if value is NotImplemented:
value = self.default
if value is not NotImplemented:
return value
try:
value = next(iter(self.__values.values()))
return value
except StopIteration:
raise ErrorOptionTypeNoEnumValues( self )
value = self.__values[ self.value_type( value ) ]
return value
except (KeyError, TypeError):
raise ErrorOptionTypeUnableConvertValue( self, value )
#//-------------------------------------------------------//
def rangeHelp(self):
values = {}
for alias, value in self.__values.items():
if alias is value:
values.setdefault( alias, [] )
else:
values.setdefault( value, [] ).append( alias )
help_str = []
for value, aliases in values.items():
s = toString(value)
if aliases:
s += ' (or ' + ', '.join( map( toString, aliases ) ) + ')'
help_str.append( s )
return help_str
#//-------------------------------------------------------//
def range( self ):
values = []
for alias, value in self.__values.items():
if alias is value:
values.append( alias )
return values
#//===========================================================================//
#//===========================================================================//
#noinspection PyAttributeOutsideInit
class RangeOptionType (OptionType):
__slots__ = (
'min_value',
'max_value',
'auto_correct',
)
def __init__( self, min_value, max_value, description = None, group = None, value_type = int, auto_correct = True, default = NotImplemented, is_tool_key = False ):
#noinspection PyTypeChecker
super(RangeOptionType,self).__init__( value_type, description, group, default = default, is_tool_key = is_tool_key )
self.setRange( min_value, max_value, auto_correct )
if default is not NotImplemented:
self.default = self( default )
#//-------------------------------------------------------//
def setRange( self, min_value, max_value, auto_correct = True ):
if min_value is not None:
try:
min_value = self.value_type( min_value )
except (TypeError, ValueError):
raise ErrorOptionTypeUnableConvertValue( self, min_value )
else:
min_value = self.value_type()
if max_value is not None:
try:
max_value = self.value_type( max_value )
except (TypeError, ValueError):
raise ErrorOptionTypeUnableConvertValue( self, max_value )
else:
max_value = self.value_type()
self.min_value = min_value
self.max_value = max_value
if auto_correct is not None:
self.auto_correct = auto_correct
#//-------------------------------------------------------//
def __call__( self, value = NotImplemented):
try:
min_value = self.min_value
if value is NotImplemented:
if self.default is NotImplemented:
return min_value
value = self.default
value = self.value_type( value )
if value < min_value:
if self.auto_correct:
value = min_value
else:
raise TypeError()
max_value = self.max_value
if value > max_value:
if self.auto_correct:
value = max_value
else:
raise TypeError()
return value
except TypeError:
raise ErrorOptionTypeUnableConvertValue( self, value )
#//-------------------------------------------------------//
def rangeHelp(self):
return ["%s ... %s" % (self.min_value, self.max_value) ]
#//-------------------------------------------------------//
def range( self ):
return [self.min_value, self.max_value]
#//===========================================================================//
#//===========================================================================//
class ListOptionType (OptionType):
__slots__ = ('item_type',)
#//=======================================================//
def __init__( self, value_type = str, unique = False, separators = ', ', description = None, group = None, range_help = None, is_tool_key = False ):
if isinstance(value_type, OptionType):
if description is None:
description = value_type.description
if description:
description = "List of: " + description
if group is None:
group = value_type.group
if range_help is None:
range_help = value_type.range_help
if unique:
list_type = UniqueList
else:
list_type = List
list_type = ValueListType( list_type, value_type )
if separators:
list_type = SplitListType( list_type, separators )
super(ListOptionType,self).__init__( list_type, description, group, range_help, is_tool_key = is_tool_key )
self.item_type = value_type
#//-------------------------------------------------------//
def __call__( self, values = None ):
try:
if values is NotImplemented:
values = []
return self.value_type( values )
except (TypeError, ValueError):
raise ErrorOptionTypeUnableConvertValue( self, values )
#//-------------------------------------------------------//
def rangeHelp( self ):
if self.range_help:
return list(toSequence( self.range_help ))
if isinstance(self.item_type, OptionType):
return self.item_type.rangeHelp()
return ["List of type '%s'" % self.item_type.__name__]
#//===========================================================================//
class DictOptionType (OptionType):
#//=======================================================//
def __init__( self, key_type = str, value_type = None, separators = ', ', description = None, group = None, range_help = None, is_tool_key = False ):
if isinstance(value_type, OptionType):
if description is None:
description = value_type.description
if description:
description = "List of: " + description
if group is None:
group = value_type.group
if range_help is None:
range_help = value_type.range_help
dict_type = ValueDictType( Dict, key_type, value_type )
if separators:
dict_type = SplitDictType( dict_type, separators )
super(DictOptionType,self).__init__( dict_type, description, group, range_help, is_tool_key = is_tool_key )
#//-------------------------------------------------------//
def setValueType( self, key, value_type ):
if isinstance( value_type, OptionType ):
value_type = value_type.value_type
self.value_type.setValueType( key, value_type )
#//-------------------------------------------------------//
def __call__( self, values = None ):
try:
if values is NotImplemented:
values = None
return self.value_type( values )
except (TypeError, ValueError):
raise ErrorOptionTypeUnableConvertValue( self, values )
#//-------------------------------------------------------//
def rangeHelp( self ):
if self.range_help:
return list(toSequence( self.range_help ))
return ["Dictionary of values"]
| [
"menify@a28edc5c-ec3e-0410-a3da-1b30b3a8704b"
] | menify@a28edc5c-ec3e-0410-a3da-1b30b3a8704b |
f85d432e037030d3e230472ed90ab71633bfd965 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa3/benchmarks/sieve-6.py | 50bed63741a7b659fb9658ec148349d295ad58a5 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,280 | py | # A resizable list of integers
class Vector(object):
$ClassBody
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
# Data
v:Vector = None
i:int = 0
# Crunch
v = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
963def63ffb064f3a9cabda28fe6da460f3f0ce1 | 4384fea5021c660abd188f0d7d5072a42b0b37a3 | /analysis/irb_13384/scripts/coh_04_analyze_segmentations.py | 3135adde1749d7792bb7af0868383fd7f86ec669 | [] | no_license | cohmathonc/car-t-image-analysis | ae39b6c8cb22d55f606fd478f10f56fbd6b65181 | 3719ddbd3c0da74f26da04aebdb42cad3956a184 | refs/heads/master | 2023-07-14T22:27:15.013239 | 2019-06-05T18:31:08 | 2020-06-10T08:14:39 | 190,288,277 | 0 | 1 | null | 2023-06-29T20:33:00 | 2019-06-04T22:31:16 | Python | UTF-8 | Python | false | false | 1,646 | py | import analysis.irb_13384.coh_config as config
from tools import data_io as dio
import analysis.irb_13384.coh_helpers as ch
import tools.general_tools as gt
import os
gt.ensure_dir_exists(config.coh_dir_analysis_segmentation)
data_io= dio.DataIO(config.coh_dir_bids, config.path_to_coh_bids_config)
# This function looks for existing segmentation files and analyzes them
# It gives preferences to files ending in '_p.mha'
df = ch.analyze_segmentations(data_io, subjects=None)
#-- compute total volume
all_segmentation_labels = [col for col in df.columns if col.startswith('bratumia')]
df["bratumia_total_segmented_volume"] = df[all_segmentation_labels].sum(axis=1)
all_tumor_labels = ['bratumia_EnhancingTumor', 'bratumia_Necrosis', 'bratumia_NonEnhancingTumor']
df["bratumia_TotalTumor"] = df[all_tumor_labels].sum(axis=1)
other_tumor_labels = ['bratumia_Necrosis', 'bratumia_NonEnhancingTumor']
df["bratumia_OtherTumor"] = df[other_tumor_labels].sum(axis=1)
#-- save
df.to_excel(os.path.join(config.coh_dir_analysis_segmentation, 'segmentation_stats_single_index.xls'))
df = df.set_index(['subject_id', 'session']).sort_index()
df.to_excel(config.coh_dir_output_labelstats_xls)
df.to_excel(os.path.join(config.coh_dir_analysis_segmentation, 'segmentation_stats.xls'))
# plot segmentation volumes
plot_selection = ['Edema', 'EnhancingTumor', 'NonEnhancingTumor', "Necrosis"]
ch.plot_segmentation_volumes(df, subject_ids=None, # plots all
plot_selection = plot_selection,
out_dir=os.path.join(config.coh_dir_analysis_segmentation, 'PLOTS'),
show=False) | [
"djs.abler@gmail.com"
] | djs.abler@gmail.com |
3a16a90b838831a9cd17e75fbc0cf1128433630b | 509b42db1734d1fd0577ac9d471a5dc597f81a3b | /Web/polls/migrations/0001_initial.py | 2534117e0a2c66cc416eafe0ff03efba89f6917e | [] | no_license | kote2ster/ChaosStack2019 | 19063c0fbcaf47fe6732dc78a5535890f42904a1 | 61dbb37a67c9ab15fc44f10bd98a3b955624b4b6 | refs/heads/master | 2022-12-10T09:18:15.104635 | 2019-09-28T17:00:01 | 2019-09-28T17:01:24 | 202,116,975 | 0 | 0 | null | 2022-12-08T06:17:12 | 2019-08-13T10:05:32 | Python | UTF-8 | Python | false | false | 1,075 | py | # Generated by Django 2.2.5 on 2019-09-27 14:25
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Question')),
],
),
]
| [
"kote2ster@gmail.com"
] | kote2ster@gmail.com |
4af4f611f29d8399e7635e13af155fc04e99e0b9 | 9e1dcb4f71b7eda84bbf0855d574eb38719d21a9 | /nested_loops_prime_number.py | 09ead76ff7a45ba184bcf3f6b8ff47bf66b017c6 | [] | no_license | ayoubabounakif/edX-Python | 689c2730458513151fc3b7a69f6a3e8b25462028 | 2449616fd6d9d8d8d74819cff24f3a54bff9dd4b | refs/heads/master | 2020-12-30T03:46:10.271688 | 2020-02-07T05:28:09 | 2020-02-07T05:28:09 | 238,849,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,763 | py | #ALGORITHM
'''
1. Select a number
2. Select a divisor and set it equal to 2.
3. Assume number is prime
4. If divisor is less then the number go to step 5 else go to step 8
5. If remainder of (number/divisor) is 0 then number is not prime(exit/stop)
6. Add one to the divisor
7. Go to step 4
8. Number is prime
'''
# A program that prints the prime numbers
#between x (start_number) and y (end_number)
#CODE (using while loop)
ask_user = int(input("Enter a value for x: "))
ask_user_2 = int(input("Enter a value for y: "))
x = ask_user
y = ask_user_2
current_number = x
while current_number <= y:
current_divisor = 2
current_number_prime = True
while (current_divisor < current_number):
if current_number % current_divisor == 0:
current_number_prime = False
break
current_divisor = current_divisor + 1
if current_number_prime:
print (current_number, "is prime")
current_number = current_number + 1
print ("DONE! These are all the prime numbers between your values!")
#CODE (using for loop)
ask_user = int(input("Enter a value for x: "))
ask_user_2 = int(input("Enter a value for y: "))
x = ask_user
y = ask_user_2
current_number = x
for current_number in range(x, y+1):
current_number_prime = True
for current_divisor in range (2, current_number):
if current_number % current_divisor == 0:
current_number_prime = False
break
if current_number_prime:
print (current_number, "is prime")
print ("DONE! These are all the prime numbers between your values!")
| [
"noreply@github.com"
] | noreply@github.com |
d6db180179ec7d18235d889f23c35b8b394da45a | 154620312f4e4b53382a2637e9a130e458a0d4cd | /alcoholExample.py | 4897a663f0158fe1464a8843754cb4b2e2badc1a | [] | no_license | heltongo/VAA-Tool | 0cebae267f536a449eccf6f64f0559093d17b7cb | ebe93ed0d80d4729d71bf353ae11ed3172de5988 | refs/heads/main | 2023-08-02T10:59:24.269362 | 2021-10-10T10:41:59 | 2021-10-10T10:41:59 | 406,747,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,025 | py | import wx
from numpy import arange, sin, pi
import matplotlib
import os.path
#matplotlib.use('WXAgg')
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
from matplotlib.figure import Figure
# from data.db_utils import *
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
db_path = os.path.join(BASE_DIR, "data\crashdb.db")
#with sqlite3.connect(db_path) as db:
"""
class MyApp(wx.App):
def OnInit(self):
self.frame = MyFrame()
self.SetTopWindow(self.frame)
return True
"""
class Panel_root(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
# self.SetBackgroundColour(wx.RED)
# put some text with a larger bold font on it
vaaText = wx.StaticText(self, pos=(150, 10), label="Victoria Accident Analysis Tool")
font = vaaText.GetFont()
font.PointSize += 16
font = font.Bold()
vaaText.SetFont(font)
joinText = wx.StaticText(self, pos=(115, 70), label="Home")
font = joinText.GetFont()
font.PointSize += 8
joinText.SetFont(font)
png = wx.Image('logo.png', wx.BITMAP_TYPE_ANY).ConvertToBitmap()
wx.StaticBitmap(self, -1, png, (10, 5), (png.GetWidth(), png.GetHeight()))
"""
b = wx.Button(self, -1, 'clic')
self.panel_2 = Panel_2(self)
self.panel_3 = Panel_3(self)
s = wx.BoxSizer(wx.VERTICAL)
s.Add(self.panel_2, -1, wx.EXPAND)
s.Add(self.panel_3, 2, wx.EXPAND)
root_sizer = wx.BoxSizer(wx.HORIZONTAL)
root_sizer.Add(b, 0, flag=wx.LEFT, border=200)
root_sizer.Add(s, 3, wx.EXPAND)
self.SetSizer(root_sizer)
"""
period_Button = wx.Button(self, pos=(10, 110), label="Period Analysis")
period_Button.Bind(wx.EVT_BUTTON, self.onclic)
# sizer_v.Add(self.button1, 0, flag=wx.LEFT, border=200)
"""
# create a menu bar
self.makeMenuBar()
# and a status bar
self.CreateStatusBar()
self.SetStatusText("Welcome to wxPython!")
"""
def onclic(self, e):
self.panel_3 = Panel_3(self)
# self.panel_2.text.SetValue('hello')
# self.panel_3 = Panel_3(self)
class Panel_2(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
self.SetSize((400, 400))
self.Centre()
# self.SetTitle('wx.Button')
self.SetBackgroundColour(wx.YELLOW)
# self.text = wx.TextCtrl(self, pos=(10,10))
class Panel_3(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
self.SetBackgroundColour(wx.GREEN)
self.SetSize((400, 400))
self.Centre()
self.figure = Figure()
#main2 = alcoholAnalysis()
#new_list = [i["SEVERITY"] for i in main2]
counts = {}
new_list = ['Other injury accident', 'Serious injury accident', 'Fatal accident', 'Other injury accident',
'Serious injury accident',
'Fatal accident', 'Other injury accident', 'Serious injury accident', 'Fatal accident']
for i in new_list:
counts[i] = (counts[i] + 1) if (i in counts) else 1
# Data to plot
labels = []
sizes = []
for x, y in counts.items():
labels.append(x)
sizes.append(y)
# Plot
fig, ax1 = plt.subplots()
explode = (0, 0.1, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',
shadow=True, startangle=90)
ax1.axis('equal')
plt.show()
class MainFrame(wx.Frame):
# A Frame that says Hello World
def __init__(self, *a, **k):
# ensure the parent's __init__ is called
wx.Frame.__init__(self, *a, **k)
# self.initialise()
Panel_root(self)
self.Centre()
self.SetSize((800, 600))
# self.SetTitle('wx.Button')
"""
def initialise(self):
# create a panel in the frame
self.Show(True)
pnl = wx.Panel(self)
self.Centre()
self.SetSize((1000,800))
#self.SetTitle('wx.Button')
# and create a sizer to manage the layout of child widgets
#sizer = wx.BoxSizer(wx.VERTICAL)
#sizer.Add(vaaText, wx.SizerFlags().Border(wx.TOP | wx.LEFT, 25))
#self.SetSize(sizer)
# put some text with a larger bold font on it
vaaText = wx.StaticText(pnl, pos=(150, 10), label="Victoria Accident Analysis Tool")
font = vaaText.GetFont()
font.PointSize += 16
font = font.Bold()
vaaText.SetFont(font)
# use a box sizer to lay out widgets
sizer_v = wx.BoxSizer(wx.VERTICAL)
# Add(widget, proportion, flag, border)
# border is to the left side
sizer_v.Add(self.button1, 0, flag=wx.LEFT, border=200)
# this adds a spacer (w, h)
# here only the height is important
sizer_v.Add((0, 200), proportion=0, flag=wx.EXPAND)
sizer_v.Add(self.button2, 0, flag=wx.LEFT, border=200)
self.SetSizer(sizer_v)
png = wx.Image('logo.png', wx.BITMAP_TYPE_ANY).ConvertToBitmap()
wx.StaticBitmap(self, -1, png, (10, 5), (png.GetWidth(), png.GetHeight()))
# create a menu bar
self.makeMenuBar()
# and a status bar
self.CreateStatusBar()
self.SetStatusText("Welcome to wxPython!")
"""
def makeMenuBar(self):
"""
A menu bar is composed of menus, which are composed of menu items.
This method builds a set of menus and binds handlers to be called
when the menu item is selected.
"""
# Make a file menu with Hello and Exit items
fileMenu = wx.Menu()
# The "\t..." syntax defines an accelerator key that also triggers
# the same event
helloItem = fileMenu.Append(-1, "&Hello...\tCtrl-H",
"Help string shown in status bar for this menu item")
fileMenu.AppendSeparator()
# When using a stock ID we don't need to specify the menu item's
# label
exitItem = fileMenu.Append(wx.ID_EXIT)
# Now a help menu for the about item
helpMenu = wx.Menu()
aboutItem = helpMenu.Append(wx.ID_ABOUT)
# Make the menu bar and add the two menus to it. The '&' defines
# that the next letter is the "mnemonic" for the menu item. On the
# platforms that support it those letters are underlined and can be
# triggered from the keyboard.
menuBar = wx.MenuBar()
menuBar.Append(fileMenu, "&File")
menuBar.Append(helpMenu, "&Help")
# Give the menu bar to the frame
self.SetMenuBar(menuBar)
# Finally, associate a handler function with the EVT_MENU event for
# each of the menu items. That means that when that menu item is
# activated then the associated handler function will be called.
self.Bind(wx.EVT_MENU, self.OnHome, helloItem)
self.Bind(wx.EVT_MENU, self.OnExit, exitItem)
self.Bind(wx.EVT_MENU, self.OnAbout, aboutItem)
def OnExit(self, event):
"""Close the frame, terminating the application."""
self.Close(True)
def OnHome(self, event):
"""Say hello to the user."""
wx.MessageBox("Hello again from wxPython")
def OnAbout(self, event):
"""Display an About Dialog"""
wx.MessageBox("This is a wxPython Hello World sample",
"About Hello World 2",
wx.OK | wx.ICON_INFORMATION)
if __name__ == '__main__':
# When this module is run (not imported) then create the app, the
# frame, show it, and start the event loop.
app = wx.App()
frm = MainFrame(None, title='Hello World 2')
frm.Show()
app.MainLoop()
| [
"80892899+debbiemaborekhe@users.noreply.github.com"
] | 80892899+debbiemaborekhe@users.noreply.github.com |
556a064c6aaa406e6208c1055530438590c6f151 | 9b2f4810b093639209b65bbcb5fa07125e17266f | /src/radical/pilot/umgr/staging_input/__init__.py | 66be18b437eb382c61a394d1bf9f1abbbf8f82d4 | [
"MIT"
] | permissive | karahbit/radical.pilot | 887d25d370d08e3455f19cd240677b62278ef67f | c611e1df781749deef899dcf5815728e1d8a962e | refs/heads/devel | 2020-12-21T09:54:10.622036 | 2020-08-20T18:18:12 | 2020-08-20T18:18:12 | 254,967,331 | 0 | 0 | NOASSERTION | 2020-05-01T00:47:51 | 2020-04-11T22:37:20 | null | UTF-8 | Python | false | false | 141 | py |
__copyright__ = "Copyright 2016, http://radical.rutgers.edu"
__license__ = "MIT"
from .base import UMGRStagingInputComponent as Input
| [
"andre@merzky.net"
] | andre@merzky.net |
92467aabe2d3e0851ea17a982715577fa57c6fde | 4aa6b7c3a5ae3817007e09ad1289c1e9f7a355c0 | /dynamic_programming/best-time-to-buy-and-sell-stock-iv.py | 57dc30b64f80d1c90423152d8d4b8f9a47789989 | [] | no_license | liuhuipy/Algorithm-python | 8f5143e06cf5fa2de2c178e3ba9e5fd12b9bcdf7 | 4e92a0b874f956d1df84d1493f870a5d1f06cde2 | refs/heads/master | 2021-06-03T04:19:01.946149 | 2021-01-08T07:44:40 | 2021-01-08T07:44:40 | 99,838,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,937 | py | """
买卖股票的最佳时机IV:
给定一个数组,它的第 i 个元素是一支给定的股票在第 i 天的价格。
设计一个算法来计算你所能获取的最大利润。你最多可以完成 k 笔交易。
注意: 你不能同时参与多笔交易(你必须在再次购买前出售掉之前的股票)。
示例 1:
输入: [2,4,1], k = 2
输出: 2
解释: 在第 1 天 (股票价格 = 2) 的时候买入,在第 2 天 (股票价格 = 4) 的时候卖出,这笔交易所能获得利润 = 4-2 = 2 。
示例 2:
输入: [3,2,6,5,0,3], k = 2
输出: 7
解释: 在第 2 天 (股票价格 = 2) 的时候买入,在第 3 天 (股票价格 = 6) 的时候卖出, 这笔交易所能获得利润 = 6-2 = 4 。
随后,在第 5 天 (股票价格 = 0) 的时候买入,在第 6 天 (股票价格 = 3) 的时候卖出, 这笔交易所能获得利润 = 3-0 = 3 。
"""
from typing import List
class Solution:
def maxProfit(self, k: int, prices: List[int]) -> int:
if not prices:
return 0
len_prices = len(prices)
if k >= len_prices / 2:
res = 0
for i in range(1, len_prices):
if prices[i] > prices[i - 1]:
res += prices[i] - prices[i - 1]
return res
dp = [[[0 for _ in range(k + 1)], [0 for _ in range(k + 1)]] for _ in range(len_prices)]
for i in range(k + 1):
dp[0][0][i] = -prices[0]
for i in range(1, len_prices):
dp[i][0][0] = max(-prices[i], dp[i - 1][0][0])
for j in range(1, k + 1):
dp[i][0][j] = max(dp[i - 1][1][j] - prices[i], dp[i - 1][0][j])
dp[i][1][j] = max(dp[i - 1][0][j - 1] + prices[i], dp[i - 1][1][j])
print(dp)
return max(dp[len_prices - 1][1])
if __name__ == '__main__':
print(Solution().maxProfit(2, [2,1,4,5,2,9,7]))
| [
"liuhui_py@163.com"
] | liuhui_py@163.com |
4117bf0fede268a09fcfef5704f7c884814e3c6f | d2e822f47779dff3cec81e1c9e71f381a6ceb0f0 | /COURS/python/TP7/TP7 EX1.py | ff3531bce28bd7438b72b0db37e93aad0b6b48e6 | [] | no_license | BrandidiCH4O4/Code-des-cours | fe019ba211785ade42d54fc1238d4944bb178373 | ac9eb6bf9042f2ceaca5e8e5a6de49848395bffb | refs/heads/main | 2023-01-21T02:03:51.412532 | 2020-12-02T13:41:19 | 2020-12-02T13:41:19 | 309,106,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,185 | py | """Ex 1"""
#1:
print("1:")
def saisieON():
rep=input("Entrer OUI ou NON ")
while rep!="OUI" and rep!="NON":
print(rep, "n'est pas une réponse acceptable.")
print("")
rep=input("Entrer OUI ou NON ")
return rep
print("")
print(saisieON(), "est une réponse accépté.")
############################################################
#2:
print("")
print("2:")
reponse=("oui","ui","o","y","Oui","OUI","Ui","UI","O","Y","yes","Yes","YES","nan","non","no","n","Nan","Non","No","NAN","NON","NO","N","nn","Nn","NN")
def begin():
for lettre in reponse:
if (lettre is "n") or (lettre is "N"):
return 0
elif (lettre is "y") or (lettre is "Y") or (lettre is "u") or (lettre is "U") or (lettre is "o") or (lettre is "O"):
return 1
return lettre
print(begin)
def saisieON():
rep=input("Entrer OUI ou NON ")
while rep not in reponse:
if rep!=begin():
print(rep, "n'est pas une réponse acceptable.")
print("")
rep=input("Entrer OUI ou NON ")
return rep
print("")
print(saisieON(), "est une réponse accépté.")
| [
"noreply@github.com"
] | noreply@github.com |
120fb4ec281dcd2b86e5467ee1a6d5de6064f9a9 | 08405d519907288da5e780c4c7edd86c5e898f67 | /sql_and_python_asignment_14.py | d191384a0354d544f26177e48a1941db3affeee0 | [] | no_license | brambabu/Python_oops | a2a8aeb38fb44084367e21d7119ab17edc6dc7d5 | fca97ba7be04902dc88b24a67a2d36a782b16ffb | refs/heads/master | 2022-08-26T10:22:31.024124 | 2020-05-27T04:59:35 | 2020-05-27T04:59:35 | 257,840,293 | 0 | 5 | null | 2020-04-22T08:32:12 | 2020-04-22T08:32:12 | null | UTF-8 | Python | false | false | 2,923 | py | def write_data(sql_query):
import sqlite3
connection = sqlite3.connect("students.sqlite3")
c = connection.cursor()
c.execute("PRAGMA foreign_keys=on;")
c.execute(sql_query)
connection.commit()
connection.close()
def read_data(sql_query):
import sqlite3
connection = sqlite3.connect("students.sqlite3")
c = connection.cursor()
c.execute(sql_query)
ans= c.fetchall()
connection.close()
return ans
class DoesNotExist(Exception):
pass
class MultipleObjectsReturned(Exception):
pass
class InvalidField(Exception):
pass
class Student:
def __init__(self, student_id = None ,name = None, age = None , score = None):
self.student_id = student_id
self.name = name
self.age = age
self.score = score
@staticmethod
def aggregations(agg = None,field = "", **kwargs):
list = ['student_id','name','age','score','']
multiple_values = []
if field not in list:
raise InvalidField
for i,j in kwargs.items():
a = i.split('__')
if a[0] not in list:
raise InvalidField
oper = {'gt':'>', 'lt':'<', 'lte':'<=', 'gte':'>=', 'neq':'<>', 'eq' : '='}
if len(a) == 1:
val = "{} {} '{}' ".format(a[0],oper['eq'],j)
elif a[1] == 'in':
j = tuple(j)
val = "{} {} {}".format(a[0],'IN',j)
elif a[1] == 'contains':
val = "{} {} '%{}%' ".format(a[0],'LIKE',j)
else:
val = "{} {} '{}' ".format(a[0],oper[a[1]],j)
multiple_values.append(val)
x = ' AND '.join(multiple_values)
if x == "":
data = read_data("SELECT {}({}) FROM Student".format(agg,field))
else:
data = read_data("SELECT {}({}) FROM Student where {}".format(agg,field,x))
return data[0][0]
@classmethod
def avg(cls, field, **kwargs):
ans = cls.aggregations('AVG', field, **kwargs)
return ans
@classmethod
def min(cls, field, **kwargs):
ans = cls.aggregations('MIN', field, **kwargs)
return ans
@classmethod
def max(cls, field, **kwargs):
ans = cls.aggregations('MAX', field, **kwargs)
return ans
@classmethod
def sum(cls, field, **kwargs):
ans = cls.aggregations('SUM', field, **kwargs)
return ans
@classmethod
def count(cls, field = "",**kwargs):
ans = cls.aggregations('COUNT', field, **kwargs)
return ans
| [
"noreply@github.com"
] | noreply@github.com |
75ef228fd5619a6cc49f30d58c3284d5f4dbb728 | 584dda592f5869a5ba2c0bbaee12401bcfe5454a | /telloner/apps/tellonym_api/tellonym/User.py | a40dd16a4d884433f06b7898a58c445447b60ab8 | [] | no_license | Rei-x/django-tellonym-api | d769d79549443f1729f79bbf4a498ba014198311 | 04b43f7c4f71188351ffe482b23b58c6aaa98128 | refs/heads/master | 2023-02-22T14:20:55.987434 | 2021-01-27T12:45:30 | 2021-01-27T12:45:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 581 | py |
class User:
def __init__(self, input):
print(input)
self.id = input['id']
self.display_name = input['displayName']
self.username = input['username']
self.about_me = input['aboutMe']
self.avatar_file_name = input['avatarFileName']
self.is_verified = input['isVerified']
self.is_active = input['isActive']
def get_profile_picture(self):
return 'userimg.tellonym.me/xs/' + self.avatar_file_name
def get_profile_thumbnail(self):
return 'userimg.tellonym.me/thumb/' + self.avatar_file_name
| [
"wszyzdgo@gmail.com"
] | wszyzdgo@gmail.com |
8481ed2d0f149e2feca988ff41ea6b4cf7e1eef4 | ceb726ec9aaee5b6b914ca850aa4147d3ce0aedf | /addons/odoo/addons/sale/tests/test_access_rights.py | 492ff640ce31d43341918b07152f51c87de97ddc | [
"Apache-2.0"
] | permissive | BabakMahmoudi/parOdoo | 969ab3f2946243c71a54fd12a2d80667b2671927 | abef1218992751cd87019689963e62aa3c614f31 | refs/heads/master | 2022-12-10T13:28:57.753225 | 2020-08-31T11:20:49 | 2020-08-31T11:20:49 | 291,694,670 | 0 | 0 | Apache-2.0 | 2020-08-31T11:20:50 | 2020-08-31T11:18:48 | null | UTF-8 | Python | false | false | 7,269 | py | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.exceptions import AccessError, UserError, ValidationError
from odoo.tests import tagged
from .test_sale_common import TestCommonSaleNoChart
@tagged('post_install', '-at_install')
class TestAccessRights(TestCommonSaleNoChart):
def setUp(self):
super(TestAccessRights, self).setUp()
Users = self.env['res.users'].with_context(no_reset_password=True)
group_user = self.env.ref('sales_team.group_sale_salesman')
# Create a users
self.user_manager = Users.create({
'name': 'Andrew Manager',
'login': 'manager',
'email': 'a.m@example.com',
'groups_id': [(6, 0, [self.env.ref('sales_team.group_sale_manager').id])]
})
self.user_salesperson = Users.create({
'name': 'Mark User',
'login': 'user',
'email': 'm.u@example.com',
'groups_id': [(6, 0, [group_user.id])]
})
self.user_salesperson_1 = Users.create({
'name': 'Noemie User',
'login': 'noemie',
'email': 'n.n@example.com',
'groups_id': [(6, 0, [group_user.id])]
})
self.user_portal = Users.create({
'name': 'Chell Gladys',
'login': 'chell',
'email': 'chell@gladys.portal',
'groups_id': [(6, 0, [self.env.ref('base.group_portal').id])]
})
self.user_employee = Users.create({
'name': 'Bert Tartignole',
'login': 'bert',
'email': 'b.t@example.com',
'groups_id': [(6, 0, [self.env.ref('base.group_user').id])]
})
# Create a Sales Team
self.sales_channel = self.env['crm.team'].with_context(tracking_disable=True).create({
'name': 'Test Channel',
})
# Create the SO with a specific salesperson
self.order = self.env['sale.order'].with_context(tracking_disable=True).create({
'partner_id': self.partner_customer_usd.id,
'user_id': self.user_salesperson.id
})
def test_access_sales_manager(self):
""" Test sales manager's access rights """
SaleOrder = self.env['sale.order'].with_context(tracking_disable=True)
# Manager can see the SO which is assigned to another salesperson
self.order.with_user(self.user_manager).read()
# Manager can change a salesperson of the SO
self.order.with_user(self.user_manager).write({'user_id': self.user_salesperson_1.id})
# Manager can create the SO for other salesperson
sale_order = SaleOrder.with_user(self.user_manager).create({
'partner_id': self.partner_customer_usd.id,
'user_id': self.user_salesperson_1.id
})
self.assertIn(sale_order.id, SaleOrder.search([]).ids, 'Sales manager should be able to create the SO of other salesperson')
# Manager can confirm the SO
sale_order.with_user(self.user_manager).action_confirm()
# Manager can not delete confirmed SO
with self.assertRaises(UserError):
sale_order.with_user(self.user_manager).unlink()
# Manager can delete the SO of other salesperson if SO is in 'draft' or 'cancel' state
self.order.with_user(self.user_manager).unlink()
self.assertNotIn(self.order.id, SaleOrder.search([]).ids, 'Sales manager should be able to delete the SO')
# Manager can create a Sales Team
india_channel = self.env['crm.team'].with_context(tracking_disable=True).with_user(self.user_manager).create({
'name': 'India',
})
self.assertIn(india_channel.id, self.env['crm.team'].search([]).ids, 'Sales manager should be able to create a Sales Team')
# Manager can edit a Sales Team
india_channel.with_user(self.user_manager).write({'name': 'new_india'})
self.assertEquals(india_channel.name, 'new_india', 'Sales manager should be able to edit a Sales Team')
# Manager can delete a Sales Team
india_channel.with_user(self.user_manager).unlink()
self.assertNotIn(india_channel.id, self.env['crm.team'].search([]).ids, 'Sales manager should be able to delete a Sales Team')
def test_access_sales_person(self):
""" Test Salesperson's access rights """
# Salesperson can see only their own sales order
with self.assertRaises(AccessError):
self.order.with_user(self.user_salesperson_1).read()
# Now assign the SO to themselves
self.order.write({'user_id': self.user_salesperson_1.id})
self.order.with_user(self.user_salesperson_1).read()
# Salesperson can change a Sales Team of SO
self.order.with_user(self.user_salesperson_1).write({'team_id': self.sales_channel.id})
# Salesperson can't create the SO of other salesperson
with self.assertRaises(AccessError):
self.env['sale.order'].with_user(self.user_salesperson_1).create({
'partner_id': self.partner_customer_usd.id,
'user_id': self.user_salesperson.id
})
# Salesperson can't delete the SO
with self.assertRaises(AccessError):
self.order.with_user(self.user_salesperson_1).unlink()
# Salesperson can confirm the SO
self.order.with_user(self.user_salesperson_1).action_confirm()
def test_access_portal_user(self):
""" Test portal user's access rights """
# Portal user can see the confirmed SO for which they are assigned as a customer
with self.assertRaises(AccessError):
self.order.with_user(self.user_portal).read()
self.order.write({'partner_id': self.user_portal.partner_id.id})
self.order.action_confirm()
# Portal user can't edit the SO
with self.assertRaises(AccessError):
self.order.with_user(self.user_portal).write({'team_id': self.sales_channel.id})
# Portal user can't create the SO
with self.assertRaises(AccessError):
self.env['sale.order'].with_user(self.user_portal).create({
'partner_id': self.partner_customer_usd.id,
})
# Portal user can't delete the SO which is in 'draft' or 'cancel' state
self.order.action_cancel()
with self.assertRaises(AccessError):
self.order.with_user(self.user_portal).unlink()
def test_access_employee(self):
""" Test classic employee's access rights """
# Employee can't see any SO
with self.assertRaises(AccessError):
self.order.with_user(self.user_employee).read()
# Employee can't edit the SO
with self.assertRaises(AccessError):
self.order.with_user(self.user_employee).write({'team_id': self.sales_channel.id})
# Employee can't create the SO
with self.assertRaises(AccessError):
self.env['sale.order'].with_user(self.user_employee).create({
'partner_id': self.partner_customer_usd.id,
})
# Employee can't delete the SO
with self.assertRaises(AccessError):
self.order.with_user(self.user_employee).unlink()
| [
"babak@gnco.ir"
] | babak@gnco.ir |
402bd32071698d56c3451e82af17c292a4406371 | 64dd93c757173c57ffcb0810c66e339650b37a22 | /client/models.py | 7980945194f56eb06a495c1305489b132e69785d | [] | no_license | bannawandoor27/Django-Tenant-Single-URL | 0a09ce6df33c06ec80bc4a4f69ea01e1b93b5e78 | 47ca4c2b7ce1f71050b9ae896df98e5749082bfc | refs/heads/main | 2023-08-11T13:31:31.622577 | 2021-09-15T20:51:07 | 2021-09-15T20:51:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 850 | py | from django.db import models
from django_tenants.models import TenantMixin, DomainMixin
import uuid
import os
from django_tenants.postgresql_backend.base import _check_schema_name
# Create your models here.
class Client(TenantMixin):
REQUIRED_FIELDS = ("tenant_name", "paid_until", "schema_name", "on_trial")
tenant_name = models.CharField(max_length=100, unique=True, null=False, blank=False)
tenant_uuid = models.UUIDField(default=uuid.uuid4, null=False, blank=False)
paid_until = models.DateField()
on_trial = models.BooleanField()
created_on = models.DateField(auto_now_add=True)
domain_url = models.URLField(blank=True, null=True, default=os.getenv("DOMAIN"))
# default true, schema will be automatically created and synced when it is saved
auto_create_schema = True
class Domain(DomainMixin):
pass
| [
"david@holdcb.com"
] | david@holdcb.com |
62840653bb6acafdea57d4f93202a81cccb65c4f | 488d6a8df1c50f232344308c6377e9bbe58eb012 | /Trabajo Practico 1 - B/errorController.py | 60c54ba0c325b51b043479d4b1fc66218c03c865 | [] | no_license | fjp2k/SistemasDeTransmisionDeDatos2017 | 97e719c759f98652b6796d40375d3fa1a81f8b69 | c301bdcad1d92cc419730dacbb17d21b0abcff9c | refs/heads/master | 2021-01-20T06:08:21.967764 | 2017-11-20T17:42:20 | 2017-11-20T17:42:20 | 101,487,633 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,453 | py | def controlar_trama(trama):
error_funcion = trama[2:4]
if error_funcion == '83':
return False
elif error_funcion == '86':
return False
elif error_funcion == '90':
return False
return True
def obtener_error(trama):
error_funcion = trama[2:4]
codigo_error = trama[4:6]
if error_funcion == '83':
mensaje_error = "Error en funcion 3: " + match_error_code(codigo_error)
return mensaje_error
elif error_funcion == '86':
mensaje_error = "Error en funcion 6: " + match_error_code(codigo_error)
return mensaje_error
elif error_funcion == '90':
mensaje_error = "Error en funcion 16: " + match_error_code(codigo_error)
return mensaje_error
return ""
def match_error_code(codigo_error):
if codigo_error == "01":
return "ILLEGAL FUNCTION"
if codigo_error == "02":
return "ILLEGAL DATA ADDRESS"
if codigo_error == "03":
return "ILLEGAL DATA VALUE"
if codigo_error == "04":
return "SLAVE DEVICE FAILURE"
if codigo_error == "05":
return "ACKNOWLEDGE"
if codigo_error == "06":
return "SLAVE DEVICE BUSY"
if codigo_error == "08":
return "MEMORY PARITY ERROR"
if codigo_error == "0A":
return "GATEWAY PATH UNAVAILABLE"
if codigo_error == "0B":
return "GATEWAY TARGET DEVICE FAILED TO RESPOND"
return "Unknown Error" | [
"fpalma2k@gmail.com"
] | fpalma2k@gmail.com |
29f193740bef122fbd90749abed73ecb40569459 | b3a2beaab1ac676c96e93a48d4f35ff6ed6799d0 | /anyex/async/btcchina.py | 1adec6030b7a567203d310d05d8ea27f4920560c | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | ttwishing/anyex | 40c06cf34e4d8f96bb968e8b7be3d2da5e6023f8 | cfd1f2f04ab992b790add4843aafff91e5773cbf | refs/heads/master | 2020-05-23T12:07:58.615432 | 2019-05-15T05:09:46 | 2019-05-15T05:09:46 | 186,751,745 | 0 | 0 | MIT | 2019-05-15T04:57:08 | 2019-05-15T04:57:08 | null | UTF-8 | Python | false | false | 11,676 | py | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/anyex/anyex/blob/master/CONTRIBUTING.md#how-to-contribute-code
from anyex.async.base.exchange import Exchange
import base64
import hashlib
class btcchina (Exchange):
def describe(self):
return self.deep_extend(super(btcchina, self).describe(), {
'id': 'btcchina',
'name': 'BTCChina',
'countries': 'CN',
'rateLimit': 1500,
'version': 'v1',
'has': {
'CORS': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766368-465b3286-5ed6-11e7-9a11-0f6467e1d82b.jpg',
'api': {
'plus': 'https://plus-api.btcchina.com/market',
'public': 'https://data.btcchina.com/data',
'private': 'https://api.btcchina.com/api_trade_v1.php',
},
'www': 'https://www.btcchina.com',
'doc': 'https://www.btcchina.com/apidocs',
},
'api': {
'plus': {
'get': [
'orderbook',
'ticker',
'trade',
],
},
'public': {
'get': [
'historydata',
'orderbook',
'ticker',
'trades',
],
},
'private': {
'post': [
'BuyIcebergOrder',
'BuyOrder',
'BuyOrder2',
'BuyStopOrder',
'CancelIcebergOrder',
'CancelOrder',
'CancelStopOrder',
'GetAccountInfo',
'getArchivedOrder',
'getArchivedOrders',
'GetDeposits',
'GetIcebergOrder',
'GetIcebergOrders',
'GetMarketDepth',
'GetMarketDepth2',
'GetOrder',
'GetOrders',
'GetStopOrder',
'GetStopOrders',
'GetTransactions',
'GetWithdrawal',
'GetWithdrawals',
'RequestWithdrawal',
'SellIcebergOrder',
'SellOrder',
'SellOrder2',
'SellStopOrder',
],
},
},
'markets': {
'BTC/CNY': {'id': 'btccny', 'symbol': 'BTC/CNY', 'base': 'BTC', 'quote': 'CNY', 'api': 'public', 'plus': False},
'LTC/CNY': {'id': 'ltccny', 'symbol': 'LTC/CNY', 'base': 'LTC', 'quote': 'CNY', 'api': 'public', 'plus': False},
'LTC/BTC': {'id': 'ltcbtc', 'symbol': 'LTC/BTC', 'base': 'LTC', 'quote': 'BTC', 'api': 'public', 'plus': False},
'BCH/CNY': {'id': 'bcccny', 'symbol': 'BCH/CNY', 'base': 'BCH', 'quote': 'CNY', 'api': 'plus', 'plus': True},
'ETH/CNY': {'id': 'ethcny', 'symbol': 'ETH/CNY', 'base': 'ETH', 'quote': 'CNY', 'api': 'plus', 'plus': True},
},
})
async def fetch_markets(self):
markets = await self.publicGetTicker({
'market': 'all',
})
result = []
keys = list(markets.keys())
for p in range(0, len(keys)):
key = keys[p]
market = markets[key]
parts = key.split('_')
id = parts[1]
base = id[0:3]
quote = id[3:6]
base = base.upper()
quote = quote.upper()
symbol = base + '/' + quote
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'info': market,
})
return result
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privatePostGetAccountInfo()
balances = response['result']
result = {'info': balances}
currencies = list(self.currencies.keys())
for i in range(0, len(currencies)):
currency = currencies[i]
lowercase = currency.lower()
account = self.account()
if lowercase in balances['balance']:
account['total'] = float(balances['balance'][lowercase]['amount'])
if lowercase in balances['frozen']:
account['used'] = float(balances['frozen'][lowercase]['amount'])
account['free'] = account['total'] - account['used']
result[currency] = account
return self.parse_balance(result)
def create_market_request(self, market):
request = {}
field = 'symbol' if (market['plus']) else 'market'
request[field] = market['id']
return request
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
method = market['api'] + 'GetOrderbook'
request = self.create_market_request(market)
orderbook = await getattr(self, method)(self.extend(request, params))
timestamp = orderbook['date'] * 1000
return self.parse_order_book(orderbook, timestamp)
def parse_ticker(self, ticker, market):
timestamp = ticker['date'] * 1000
last = float(ticker['last'])
return {
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': float(ticker['high']),
'low': float(ticker['low']),
'bid': float(ticker['buy']),
'ask': float(ticker['sell']),
'vwap': float(ticker['vwap']),
'open': float(ticker['open']),
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': float(ticker['vol']),
'quoteVolume': None,
'info': ticker,
}
def parse_ticker_plus(self, ticker, market):
timestamp = ticker['Timestamp']
symbol = None
if market:
symbol = market['symbol']
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': float(ticker['High']),
'low': float(ticker['Low']),
'bid': float(ticker['BidPrice']),
'ask': float(ticker['AskPrice']),
'vwap': None,
'open': float(ticker['Open']),
'last': float(ticker['Last']),
'change': None,
'percentage': None,
'average': None,
'baseVolume': float(ticker['Volume24H']),
'quoteVolume': None,
'info': ticker,
}
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
method = market['api'] + 'GetTicker'
request = self.create_market_request(market)
tickers = await getattr(self, method)(self.extend(request, params))
ticker = tickers['ticker']
if market['plus']:
return self.parse_ticker_plus(ticker, market)
return self.parse_ticker(ticker, market)
def parse_trade(self, trade, market):
timestamp = int(trade['date']) * 1000
return {
'id': trade['tid'],
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'type': None,
'side': None,
'price': trade['price'],
'amount': trade['amount'],
}
def parse_trade_plus(self, trade, market):
timestamp = self.parse8601(trade['timestamp'])
return {
'id': None,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'type': None,
'side': trade['side'].lower(),
'price': trade['price'],
'amount': trade['size'],
}
def parse_trades_plus(self, trades, market=None):
result = []
for i in range(0, len(trades)):
result.append(self.parse_trade_plus(trades[i], market))
return result
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
method = market['api'] + 'GetTrade'
request = self.create_market_request(market)
if market['plus']:
now = self.milliseconds()
request['start_time'] = now - 86400 * 1000
request['end_time'] = now
else:
method += 's' # trades vs trade
response = await getattr(self, method)(self.extend(request, params))
if market['plus']:
return self.parse_trades_plus(response['trades'], market)
return self.parse_trades(response, market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
method = 'privatePost' + self.capitalize(side) + 'Order2'
order = {}
id = market['id'].upper()
if type == 'market':
order['params'] = [None, amount, id]
else:
order['params'] = [price, amount, id]
response = await getattr(self, method)(self.extend(order, params))
return {
'info': response,
'id': response['id'],
}
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
market = params['market'] # TODO fixme
return await self.privatePostCancelOrder(self.extend({
'params': [id, market],
}, params))
def nonce(self):
return self.microseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api] + '/' + path
if api == 'private':
self.check_required_credentials()
p = []
if 'params' in params:
p = params['params']
nonce = self.nonce()
request = {
'method': path,
'id': nonce,
'params': p,
}
p = ','.join(p)
body = self.json(request)
query = (
'tonce=' + nonce +
'&accesskey=' + self.apiKey +
'&requestmethod=' + method.lower() +
'&id=' + nonce +
'&method=' + path +
'¶ms=' + p
)
signature = self.hmac(self.encode(query), self.encode(self.secret), hashlib.sha1)
auth = self.encode(self.apiKey + ':' + signature)
headers = {
'Authorization': 'Basic ' + base64.b64encode(auth),
'Json-Rpc-Tonce': nonce,
}
else:
if params:
url += '?' + self.urlencode(params)
return {'url': url, 'method': method, 'body': body, 'headers': headers}
| [
"yong2452@gmail.com"
] | yong2452@gmail.com |
4285a06223ef406e7b6a8cfcba809f60b3d98731 | 57eb2354f8fba9d46c8edcfac60c13fc0468d950 | /Lekhaka/deformer_noiser.py | af37dc110bc7fa9c610374b8ecf483f63c73effc | [] | no_license | rakeshvar/Lekhaka | 597e91e60c30c566e6f792af2d1378205f698087 | 1d2d31035fe8a29f002adb5a70d762669102a0f3 | refs/heads/main | 2023-06-16T11:18:30.121653 | 2021-07-09T08:35:56 | 2021-07-09T08:35:56 | 370,766,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,731 | py | import numpy as np
from scipy import ndimage as nd
from scipy.special import cosdg, sindg
def _summary(mat, name):
print(f"{name}\tshape:{mat.shape}\tmax:{mat.max():.2f} min:{mat.min():.2f}")
pass
class Deformer:
def __init__(self, translation=0, zoom=0, elastic_magnitude=0, sigma=1, angle=0, nearest=False, debug=False):
self.translation = translation
self.zoom = zoom
self.elastic_magnitude = elastic_magnitude
self.sigma = sigma
self.angle = angle
self.nearest = nearest
# Build a gaussian filter for elastic distortion
if elastic_magnitude:
self.nrounds = 2
nsds = 2
sigma //= self.nrounds
filt = np.exp(-.5 * np.linspace(-nsds, nsds, int(2*nsds*sigma+1)) ** 2)
filt /= filt.sum()
if debug:
print(f"Gaussian Filter Range: {filt.max():.4f}-{filt.min():.4f} "
f"Ratio:{filt.max()/filt.min():.2f} Sum:{filt.sum()}")
self.filt = filt
self.summary = _summary if debug else lambda _, __: None
def __str__(self):
print('Elastic Translation:{:} Zoom:{} Mag:{:d} Sig:{:d} Angle:{} Interpolation:{}'.format(
self.translation, self.zoom, self.elastic_magnitude, self.sigma, self.angle,
'Nearest' if self.nearest else 'Linear'))
def __call__(self, inpt):
# Degenerate Case
if not (self.elastic_magnitude or self.translation or self.angle or self.zoom):
return inpt
b, h, w = inpt.shape
_hwidx = np.indices((h, w)).astype('float')
target = np.stack([_hwidx for _ in range(b)])
self.summary(target, "initial traget")
if self.elastic_magnitude:
# Elastic
elast = self.elastic_magnitude * np.random.normal(size=(b, 2, h, w))
for _ in range(self.nrounds):
for ax in (-1, -2):
nd.correlate1d(elast, self.filt, axis=ax, output=elast)
target += elast
self.summary(elast, "elastic")
# Zoom and Rotate
if self.zoom or self.angle:
# Center at 'about' half way
origin = np.random.uniform(.4, .6, size=(b, 2, 1, 1)) * np.array((h, w)).reshape((1, 2, 1, 1))
target -= origin
self.summary(origin, "origin")
# Zoom
if self.zoom:
zoomer = np.exp(self.zoom * np.random.uniform(-1, size=(b, 2, 1, 1)))
target *= zoomer
self.summary(zoomer, "zoom")
# Rotate
if self.angle:
theta = self.angle * np.random.uniform(-1, size=b)
c, s = cosdg(theta), sindg(theta)
rotate = np.array([[c, -s], [s, c]])
rotate = np.moveaxis(rotate, -1, 0) # b x 2 x 2
for i in range(b):
target[i] = np.tensordot(rotate[i], target[i], axes=(0, 0))
self.summary(rotate, "rotate")
# Uncenter
target += origin
# Make sure you do not go below zero along the width (vertical axis because of Transpose)
least_vert_disp = target[:, 0, 0].min(axis=-1)
self.summary(least_vert_disp[:, None, None], "least_vert_disp")
target[:, 0] -= least_vert_disp[:, None, None]
if self.translation:
transln = self.translation * np.random.uniform(-1, size=(b, 2, 1, 1))
transln[:, 0] = -2 * np.abs(transln[:, 0]) # Along slab width translation is (0, 2translation)
target += transln
self.summary(transln, "translation")
for i in range(b):
self.summary(target[i, 0], f"{i} final traget y")
self.summary(target[i, 1], f"{i} final traget x")
transy = np.clip(target[:, 0], 0, h - 1 - .001)
transx = np.clip(target[:, 1], 0, w - 1 - .001)
output = np.empty_like(inpt)
if self.nearest:
vert = np.rint(transy).astype(int)
horz = np.rint(transx).astype(int)
for i in range(b):
output[i] = inpt[i, vert[i], horz[i]]
else:
topp = np.floor(transy)
left = np.floor(transx)
fraction_y = transy - topp
fraction_x = transx - left
topp = topp.astype('int32')
left = left.astype('int32')
for i in range(b):
output[i] = inpt[i, topp, left] * (1 - fraction_y) * (1 - fraction_x) + \
inpt[i, topp, left + 1] * (1 - fraction_y) * fraction_x + \
inpt[i, topp + 1, left] * fraction_y * (1 - fraction_x) + \
inpt[i, topp + 1, left + 1] * fraction_y * fraction_x
self.summary(inpt, "input")
self.summary(output, "output")
return output
class Noiser:
def __init__(self, num_blots=0, erase_fraction=.5, minsize=0, maxsize=0):
self.num_blots = num_blots
self.erase_fraction = erase_fraction
self.minsize = minsize
self.maxsize = maxsize
def __call__(self, inpt):
batch_sz, h, w = inpt.shape
size = batch_sz, self.num_blots
colors = np.random.binomial(n=1, p=1-self.erase_fraction, size=size)
xs = np.random.randint(h, size=size)
dxs = np.random.randint(self.minsize, self.maxsize, size=size)
ys = np.random.randint(w, size=size)
dys = np.random.randint(self.minsize, self.maxsize, size=size)
for i in range(batch_sz):
for x, dx, y, dy, c in zip(xs[i], dxs[i], ys[i], dys[i], colors[i]):
inpt[i, x:(x+dx), y:(y+dy)] = c
return inpt
| [
"rakeshvar@gmail.com"
] | rakeshvar@gmail.com |
201d456efa359e1836be4a554f33efd81f1843c1 | 552a03030160de2f096489e704f7b8088450f979 | /concesionario/apps/empleado/views.py | f578f11b90b50e634d7022b3f57a4dff3e76097f | [] | no_license | lisafbe/SIGIA | ce718e9ba5b37cabeaaabe879ca99dcbe6e12059 | a883b38e310af646fa8d6023e8f62bdb9e0a21d8 | refs/heads/master | 2021-01-10T08:46:08.055581 | 2016-02-20T18:58:54 | 2016-02-20T18:58:54 | 52,169,345 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | # -*- encoding: utf-8 -*-
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.views.generic import TemplateView
from .models import Empleado
from apps.sucursal.models import Sucursal
class EmpleadoListView(TemplateView):
def get(self,request,*args,**kwargs):
sucursal_id = kwargs['spk']
empleados = Empleado.objects.filter(sucursal_id=sucursal_id).exclude(user_id=request.user.id)
sucursal = Sucursal.objects.get(id=sucursal_id)
context = {
'sucursal':sucursal,
'empleados':empleados}
return render_to_response(
'empleado/empleado_list.html',
context,
RequestContext(request)) | [
"lisabetanco@gmail.com"
] | lisabetanco@gmail.com |
c6984060bdb66e9297a30262564f0ec5543acd5e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03146/s790644084.py | 7e0cb3ce3d0317c1b444b17f7e0a4ff736bda753 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | s = int(input())
a = s
prev = set()
for i in range(1, 1500000):
if a in prev:
print(i)
exit()
prev.add(a)
if a % 2 == 0:
a //= 2
else:
a = 3 * a + 1 | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
baaf7396d7d64ca02b696064862bf5652b225a14 | 568ed7fdc9ccbd7967dd2950669c68002b454869 | /yotta/test/cli/test.py | ccec43116468a2790ebad484c3f8dcd52ce643de | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | headlessme/yotta | ade06c41108dca045e295bd2e0fdb2b7baef8c89 | 947ab074b629c8f18ca91ab84ebaa29096b011c6 | refs/heads/master | 2021-01-17T11:10:07.569198 | 2015-12-08T11:45:12 | 2015-12-08T11:45:12 | 27,595,705 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,592 | py | #!/usr/bin/env python
# Copyright 2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
import unittest
import copy
# internal modules:
from yotta.lib.detect import systemDefaultTarget
from . import cli
from . import util
Test_Tests = {
'module.json':'''{
"name": "test-tests",
"version": "0.0.0",
"description": "Test yotta's compilation of tests.",
"author": "James Crosby <james.crosby@arm.com>",
"licenses": [
{
"url": "https://spdx.org/licenses/Apache-2.0",
"type": "Apache-2.0"
}
]
}''',
'source/foo.c':'''#include "stdio.h"
int foo(){
printf("foo!\\n");
return 7;
}''',
'test-tests/foo.h':'int foo();',
'test/a/bar.c':'#include "test-tests/foo.h"\nint main(){ foo(); return 0; }',
'test/b/a/bar.c':'#include "test-tests/foo.h"\nint bar(); int main(){ foo(); bar(); return 0; }',
'test/b/b/bar.c':'#include "stdio.h"\nint bar(){ printf("bar!\\n"); return 7; }',
'test/c/a/a/bar.c':'#include "test-tests/foo.h"\nint bar(); int main(){ foo(); bar(); return 0; }',
'test/c/b/a/bar.c':'#include "stdio.h"\nint bar(){ printf("bar!\\n"); return 7; }',
'test/d/a/a/bar.c':'#include "test-tests/foo.h"\nint bar(); int main(){ foo(); bar(); return 0; }',
'test/d/a/b/bar.c':'#include "stdio.h"\nint bar(){ printf("bar!\\n"); return 7; }',
'test/e/a/a/a/bar.c':'#include "test-tests/foo.h"\nint bar(); int main(){ foo(); bar(); return 0; }',
'test/e/b/a/a/bar.c':'#include "stdio.h"\nint bar(){ printf("bar!\\n"); return 7; }',
'test/f/a/a/a/bar.c':'#include "test-tests/foo.h"\nint bar(); int main(){ foo(); bar(); return 0; }',
'test/f/a/b/a/bar.c':'#include "stdio.h"\nint bar(){ printf("bar!\\n"); return 7; }',
'test/g/a/a/a/bar.c':'#include "test-tests/foo.h"\nint bar(); int main(){ foo(); bar(); return 0; }',
'test/g/a/a/b/bar.c':'#include "stdio.h"\nint bar(){ printf("bar!\\n"); return 7; }'
}
Test_Fitler_Pass = copy.copy(Test_Tests)
Test_Fitler_Pass['module.json'] = '''{
"name": "test-tests",
"version": "0.0.0",
"licenses": [
{
"url": "https://spdx.org/licenses/Apache-2.0",
"type": "Apache-2.0"
}
],
"scripts": {
"testReporter": [
"grep",
"!"
]
}
}'''
Test_Fitler_Fail = copy.copy(Test_Tests)
Test_Fitler_Fail['module.json'] = '''{
"name": "test-tests",
"version": "0.0.0",
"licenses": [
{
"url": "https://spdx.org/licenses/Apache-2.0",
"type": "Apache-2.0"
}
],
"scripts": {
"testReporter": [
"grep",
"string that isnt in the output"
]
}
}'''
Test_Fitler_NotFound = copy.copy(Test_Tests)
Test_Fitler_NotFound['module.json'] = '''{
"name": "test-tests",
"version": "0.0.0",
"licenses": [
{
"url": "https://spdx.org/licenses/Apache-2.0",
"type": "Apache-2.0"
}
],
"scripts": {
"testReporter": [
"commandthatshouldntexist"
]
}
}'''
class TestCLITest(unittest.TestCase):
@unittest.skipIf(not util.canBuildNatively(), "can't build natively on windows yet")
def test_tests(self):
test_dir = util.writeTestFiles(Test_Tests, True)
output = self.runCheckCommand(['--target', systemDefaultTarget(), 'build'], test_dir)
output = self.runCheckCommand(['--target', systemDefaultTarget(), 'test'], test_dir)
self.assertIn('test-a passed', output)
self.assertIn('test-c passed', output)
self.assertIn('test-d passed', output)
self.assertIn('test-e passed', output)
self.assertIn('test-f passed', output)
self.assertIn('test-g passed', output)
util.rmRf(test_dir)
@unittest.skipIf(not util.canBuildNatively(), "can't build natively on windows yet")
def test_testOutputFilterPassing(self):
test_dir = util.writeTestFiles(Test_Fitler_Pass, True)
stdout = self.runCheckCommand(['--target', systemDefaultTarget(), 'test'], test_dir)
util.rmRf(test_dir)
@unittest.skipIf(not util.canBuildNatively(), "can't build natively on windows yet")
def test_testOutputFilterFailing(self):
test_dir = util.writeTestFiles(Test_Fitler_Fail, True)
stdout, stderr, statuscode = cli.run(['--target', systemDefaultTarget(), 'test'], cwd=test_dir)
if statuscode == 0:
print(stdout)
print(stderr)
self.assertIn('test-a failed', '%s %s' % (stdout, stderr))
self.assertIn('test-c failed', '%s %s' % (stdout, stderr))
self.assertIn('test-d failed', '%s %s' % (stdout, stderr))
self.assertIn('test-e failed', '%s %s' % (stdout, stderr))
self.assertIn('test-f failed', '%s %s' % (stdout, stderr))
self.assertIn('test-g failed', '%s %s' % (stdout, stderr))
self.assertNotEqual(statuscode, 0)
util.rmRf(test_dir)
@unittest.skipIf(not util.canBuildNatively(), "can't build natively on windows yet")
def test_testOutputFilterNotFound(self):
test_dir = util.writeTestFiles(Test_Fitler_NotFound, True)
stdout, stderr, statuscode = cli.run(['--target', systemDefaultTarget(), 'test'], cwd=test_dir)
if statuscode == 0:
print(stdout)
print(stderr)
self.assertNotEqual(statuscode, 0)
util.rmRf(test_dir)
def runCheckCommand(self, args, test_dir):
stdout, stderr, statuscode = cli.run(args, cwd=test_dir)
if statuscode != 0:
print('command failed with status %s' % statuscode)
print(stdout)
print(stderr)
self.assertEqual(statuscode, 0)
return '%s %s' % (stdout, stderr)
| [
"James.Crosby@arm.com"
] | James.Crosby@arm.com |
b71f4f4c0c82b54bf051e4b6b83878612d3b30c1 | dc9f2638209a9be235a1c4acc44fe2a26256c4b4 | /venv/projects/lib/python3.8/site-packages/pip/_vendor/msgpack/_version.py | 7f0f77b35e6f3f520b75e0ff6182498615a30fa0 | [] | no_license | alwinruby/RealWorld | 4f5fcaed68fdd2d9fc37f5973fec365195cb3e9e | ec446f96f3545cb847429b5e33cefdc4f00ce432 | refs/heads/main | 2023-08-13T10:28:40.528047 | 2021-10-10T14:58:23 | 2021-10-10T14:58:23 | 408,079,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20 | py | version = (1, 0, 2)
| [
"alwinsolanky@gmail.com"
] | alwinsolanky@gmail.com |
72c565718ed0cf9f285301d357f8ad4810cba575 | a1588ec1e4480c4ab58dccf49150066ce2ab1ee8 | /exampls/operationLogic.py | de1a5aa399a69ee2262eb544dd54396d4260ec34 | [] | no_license | silvermiguel96/pythonVentas | 9e9a60b9573e39d2113f91362bf82b3a9ab42153 | b785696dea666a49076d01ec9edecf8d054e09d4 | refs/heads/master | 2020-04-22T13:45:48.822291 | 2019-02-14T21:55:39 | 2019-02-14T21:55:39 | 170,420,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 734 | py | x = 2
y = 3
x == y
y = 2
x == y
x != y
x > y
x < y
x >= y #false
x <= y #True
x = 2
y = 3
a = 5
b = 6
print('De acuerdo a los siguientes valores de variables')
print('x=',x)
print('y=',y)
print('a=',a)
print('b=',b)
print('Sabemos que...')
if x == y:
print ('"x" es igual que "y"')
else:
print ('"x" no es igual que "y"')
if x < y:
print('"x" es menor que "y"')
if x > y:
print('"x" es mayor que "y"')
if y < x:
print('"y" es menor que "x"')
if y > x:
print('"y" es mayor que "x"')
if x < y and a < b:
print('"x" es menor que "y" y "a" es menor que "b"')
if x < y or a > b:
print('"x" es menor que "y" o "a" es mayor que "b"')
if x > y or a < b:
print('"x" es mayor que "y" o "a" es mayor que "b"')
input() | [
"silvermiguel96@gmail.com"
] | silvermiguel96@gmail.com |
fc4c8a68056ed1b5b0241e8b4020194a7895889a | e560cf41cf47debaa297d43522f52961eb660c1c | /Python/3.- Logging.py | 1ed14805b8e569dba36b2fbfe62a59773fea1661 | [] | no_license | ActivandoIdeas/Concurrent-Programming | 6c2eb18981c62f09c1cc6de00392343c01158ac9 | bcd673dcd676b33c9f33c8b28dac3cb968d8d041 | refs/heads/master | 2022-12-16T08:15:01.130024 | 2020-09-16T03:56:26 | 2020-09-16T03:56:26 | 295,833,380 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 673 | py | import logging
# Debug (10)
# Info (20)
# Warning (30)
# Error (40)
# Critical (50)
logging.basicConfig(
level=logging.DEBUG, # 10
format='%(filename)s %(asctime)s %(message)s %(funcName)s %(levelname)s %(lineno)s %(module)s %(name)s %(pathname)s %(thread)s %(threadName)s %(process)s %(processName)s',
datefmt='%H:%M:%S',
# filename='logging/messages.txt'
)
def messages():
logging.debug('This is a debug message')
logging.info('This is a info message')
logging.warning('This is a warning message')
logging.error('This is a error message')
logging.critical('This is a critical message')
if __name__ == '__main__':
messages()
| [
"eliasojedamedina@gmail.com"
] | eliasojedamedina@gmail.com |
295338183b59fe88a08317b8e639fd6a5734f638 | 1ee4c8d3208d1b51a72d30e4732a9b2082da605c | /sao_portal/asgi.py | 42ad8861fc2ad5d0afd93f540fdc60c77c34b824 | [] | no_license | abhiram-g/SAO_service_dashboard | 8336f52a9968019102884e24edc735e8e4f38bc6 | 4d2cde4cefe6c10bc644223981b67755cf6c1145 | refs/heads/master | 2022-10-15T10:23:30.537956 | 2020-06-08T12:43:51 | 2020-06-08T12:43:51 | 270,624,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
ASGI config for sao_portal project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sao_portal.settings')
application = get_asgi_application()
| [
"abc@gmail.com"
] | abc@gmail.com |
08ab74257fcfe8e582694e17d8f70578c069d383 | f15449e438b0b799a3866ba21243924ce0e4fa2d | /survey/models.py | e6565f3535ec711e92f3831b062f00dd86ac58f5 | [] | no_license | xmduhan/qisite | 46af79d0e4d1af814298862cfaa18c6f7ddf3a74 | 2c9d7513c3e0cd483341dc457a8d289e5e174f20 | refs/heads/master | 2021-01-17T08:44:29.826082 | 2020-02-07T11:22:29 | 2020-02-07T11:22:29 | 14,419,020 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 26,025 | py | # -*- coding: utf-8 -*-
from __future__ import division
from django.db import models
from django.db.models import F
import account.models
from datetime import datetime
from numstyle import NumStyle, defaultQuestionNumStyle, defaultBranchNumStyle
from django.core.exceptions import ValidationError
from django.core.signing import Signer
import copy
from dateutil.relativedelta import relativedelta
import operator
import re
from jieba.analyse import extract_tags
from qisite.definitions import MAX_TEXT_CONTENT_LENGTH
phonePattern = re.compile(r'^((13[0-9])|(15[^4,\D])|(14[57])|(17[0])|(18[0,0-9]))\d{8}$')
def validate_phone(phone):
if not phonePattern.match(phone):
raise ValidationError(u'phone:手机号码的格式不正确')
class TimeModel(models.Model):
createTime = models.DateTimeField("创建时间", default=datetime.now)
modifyTime = models.DateTimeField("修改时间", default=datetime.now)
class Meta:
abstract = True
class Paper(TimeModel):
def __unicode__(self):
return self.title
# PAPER_STYLE = ( ('F', '平展'), ('P', '分页'))
QUESTION_NUM_STYLE = (('123', '1.2.3.……'), ('(1)(2)(3)', '(1).(2).(3).……'), ('Q1Q2Q3', 'Q1.Q2.Q3.……'))
PAPER_TYPE = (('T', '模板'), ('I', '实例'))
code = models.CharField('编码', max_length=100, blank=True, null=True, default=None) # 用于在测试中找到对象
title = models.CharField('问卷标题', max_length=500)
description = models.CharField('问卷说明', max_length=500, blank=True)
# 题目集 question_set (ok) (已在Question中设置外键引用)
inOrder = models.BooleanField('顺序答题', default=False)
questionNumStyle = models.CharField(
'问题标号样式', max_length=50, choices=QUESTION_NUM_STYLE, default=defaultQuestionNumStyle)
lookBack = models.BooleanField('返回修改', default=False)
# style = models.CharField('展现方式', max_length=5, choices=PAPER_STYLE) #使用paging字段取代
# paging = models.BooleanField('分页答题', default=True) # 正在考虑用step字段取代
step = models.BooleanField('分步答题', default=False)
type = models.CharField('问题类型', choices=PAPER_TYPE, max_length=10, default='T')
survey = models.ForeignKey('Survey', related_name='paperReversed_set', verbose_name="调查", null=True,
blank=True) # 执行调查的反向链接,用于自动删除
createBy = models.ForeignKey(
account.models.User, verbose_name="创建者", related_name='paperCreated_set', blank=True, null=True)
modifyBy = models.ForeignKey(
account.models.User, verbose_name="修改者", related_name='paperModified_set', blank=True, null=True)
# 样本集 sample_set (ok) (已在sample中设置外键引用)
previewSurvey = models.ForeignKey(
'Survey', related_name='paperPreview_set', verbose_name="预览对象", null=True, blank=True, on_delete=models.SET_NULL)
def clean(self):
'''
说明:
1、createBy和modifyBy不能为空的校验放在这里,主要是考虑到我们经常需要创建一些测试用的Paper,如果这两个字段在
定义时就限定死成不能为空,则每次我们都还要多创建一个User,比较麻烦。
'''
if self.createBy is None:
raise ValidationError(u'创建者信息不能为空')
if self.modifyBy is None:
raise ValidationError(u'修改者信息不能为空')
# 处理那些向前跳转的选项
invalidBranchSet = Branch.objects.filter(
question__paper=self, question__ord__gte=F('nextQuestion__ord'))
invalidBranchSet.update(nextQuestion=None)
class Meta:
verbose_name = "问卷"
verbose_name_plural = "[01].问卷"
ordering = ["title"]
def getQuestionSetInOrder(self):
return self.question_set.order_by('ord')
def getNumStyleAvailable(self):
return Paper.QUESTION_NUM_STYLE
def getIdSigned(self):
signer = Signer()
return signer.sign(self.id)
def copy(self, user=None):
'''
拷贝问卷信息
'''
# 拷贝问题对象本身的信息
newPaper = copy.copy(self)
newPaper.createTime = datetime.now()
newPaper.modifyTime = datetime.now()
if user:
newPaper.createBy = user
newPaper.modifyBy = user
newPaper.id = None
newPaper.save()
# 号码问卷的所有问题
questionContrast = {}
for question in self.question_set.all():
newQuestion = question.copy(user)
newQuestion.paper = newPaper
newQuestion.save()
questionContrast[question] = newQuestion
# 将选项指向新拷贝出来的问题
for question in newPaper.question_set.all():
for branch in question.branch_set.all():
if branch.nextQuestion in questionContrast:
branch.nextQuestion = questionContrast[branch.nextQuestion]
branch.save()
return newPaper
def getSampleCount(self):
"""
获取文件采集到的样本数量
"""
return self.sample_set.count()
def createPaperInstance(self, user):
'''
通过一个模板paper创建调查问卷的实例
'''
if self.type != 'T':
raise Exception('非模板Paper对象不能创建Instance')
newPaper = self.copy(user)
newPaper.type = 'I'
newPaper.save()
return newPaper
def isStepNeed(self):
"""
检查文件是否需要分步
"""
count = Branch.objects.filter(question__paper=self, nextQuestion__isnull=False).count()
return count != 0
class PaperCatalog(TimeModel):
name = models.CharField("目录名称", max_length=100)
code = models.CharField("目录编码", max_length=50, unique=True)
parent = models.ForeignKey('self', verbose_name="上级目录", blank=True, null=True)
ord = models.IntegerField("排序号")
paper_set = models.ManyToManyField(Paper, verbose_name='包含问卷', through='PaperCatalogPaper')
createBy = models.ForeignKey(account.models.User, verbose_name="创建者", related_name='paperCatalogCreated_set')
modifyBy = models.ForeignKey(account.models.User, verbose_name="修改者", related_name='paperCatalogModified_set')
class Meta:
verbose_name = "问卷目录"
verbose_name_plural = "[02].问卷目录"
class PaperCatalogPaper(TimeModel):
paperCatalog = models.ForeignKey(PaperCatalog, verbose_name='对应的目录')
paper = models.ForeignKey(Paper, verbose_name='对应的问卷')
ord = models.IntegerField("排序号")
createBy = models.ForeignKey(account.models.User, verbose_name="创建者", related_name='paperCatalogPaperCreated_set')
modifyBy = models.ForeignKey(account.models.User, verbose_name="修改者", related_name='paperCatalogPaperModified_set')
class Meta:
verbose_name = "问卷目录-问卷"
verbose_name_plural = "[03].问卷目录-问卷"
class Question(TimeModel):
QUESTION_TYPE = (
('Single', '单选题'), ('Multiple', '多选题'), ('Text', '问答题'), ('Score', '评分题'),
('EndValid', '有效结束'), ('EndInvalid', '无效结束')
)
QUESTION_TYPE_AVAILABLE = ('Single', 'Multiple', 'Text', 'Score')
BRANCH_NUM_STYLE = (('ABC', 'A.B.C.……'), ('abc.', 'a.b.c.……'), ('123.', '1.2.3……'))
text = models.CharField('文字', max_length=300)
type = models.CharField('题型', max_length=100, choices=QUESTION_TYPE)
ord = models.IntegerField("排序号")
# contentLength = models.IntegerField('内容长度', default=MAX_TEXT_CONTENT_LENGTH) # 仅填空题有效,是否可以作为多选题的选项数量限制
contentLength = models.IntegerField('内容长度', default=0) # 仅填空题有效,是否可以作为多选题的选项数量限制
valueMin = models.FloatField('最小值', null=True, blank=True, default=0) # 仅评分题有效
valueMax = models.FloatField('最大值', null=True, blank=True, default=10) # 仅评分题有效
# 题支 branch_set 对象集 (ok) (已在branche中设置反向外键)
confused = models.BooleanField('乱序', default=False)
branchNumStyle = models.CharField('标号样式', max_length=50, choices=BRANCH_NUM_STYLE, default=defaultBranchNumStyle)
# nextQuestion 是否需要这个信息,似乎多余?
nextQuestion = models.ForeignKey('self', verbose_name='下一题', blank=True, null=True, on_delete=models.SET_NULL)
paper = models.ForeignKey(Paper, verbose_name='所属问卷', null=True, blank=True)
createBy = models.ForeignKey(account.models.User, verbose_name="创建者", related_name='questionCreated_set')
modifyBy = models.ForeignKey(account.models.User, verbose_name="修改者", related_name='questionModified_set')
def clean(self):
'''
问题模型校验
'''
if self.type not in Question.QUESTION_TYPE_AVAILABLE:
raise ValidationError(u'无效的问题类型')
if self.type in ('Single', 'Multiple') and self.contentLength != 0:
raise ValidationError(u'选择题不能有填写值长度')
if self.type not in ('Single', 'Multiple') and self.confused:
raise ValidationError(u'非选择题不能指定乱序选项')
def setOrd(self, newOrd):
"""
修改当前问题的顺序,其他问题将自动响应调整顺序,并且讲删除无效的选项跳转引用
参数:
newOrd 问题的新排序号
"""
paper = Paper.objects.select_for_update().get(id=self.paper.id)
ord = self.ord
# 锁定所有的问题
questionList = list(paper.question_set.select_for_update().order_by('ord'))
questionCount = len(questionList)
if newOrd == ord:
return
if (newOrd > questionCount - 1) or (newOrd < 0):
# TODO : 这里需要设置合适的异常类型
raise Exception()
questionList.insert(newOrd, questionList.pop(ord))
for i, q in enumerate(questionList):
if q.ord != i:
q.ord = i
q.save()
paper.clean()
def getStemText(self):
'''
通过问题直接读取题干的文字信息
'''
return self.text
getStemText.short_description = '题干信息'
def getBranchSetInOrder(self):
return self.branch_set.order_by('ord')
def getNum(self):
# 针对特殊问题类型做特殊处理
if self.type in ('EndValid', 'EndInvalid'):
return self.get_type_display()
else:
numStyle = NumStyle(self.paper.questionNumStyle)
return numStyle.getNum(self.ord)
def __unicode__(self):
return u"(%d)(%s)%s" % (self.ord, self.type, unicode(self.text))
class Meta:
verbose_name = "问题"
verbose_name_plural = "[04].问题"
ordering = ["ord"]
def getIdSigned(self):
signer = Signer()
return signer.sign(self.id)
def getScoreStat(self, max=10):
"""
获取评分分布统计信息
"""
querySet = SampleItem.objects.filter(question=self)
r1 = querySet.values('score').annotate(count=models.Count('score'))
r2 = {i['score']: i['count']for i in r1}
r3 = sorted(r2.items(), key=operator.itemgetter(1), reverse=True)[:10]
r4 = zip(*r3)
return r4
def getTextKeywords(self, n=10):
"""
从文字题中提取关键字
"""
querySet = SampleItem.objects.filter(question=self)
text = ' '.join([rec['content'] for rec in querySet.values('content')])
tags = extract_tags(text, topK=n)
return tags
def copy(self, user=None):
'''
拷贝一个问题
'''
# 拷贝问题对象本身的信息
newQuestion = copy.copy(self)
newQuestion.createTime = datetime.now()
newQuestion.modifyTime = datetime.now()
if user:
newQuestion.createBy = user
newQuestion.modifyBy = user
newQuestion.id = None
newQuestion.save()
# 拷贝问题所属选项信息
for branch in self.branch_set.all():
newBranch = branch.copy(user)
newBranch.question = newQuestion
newBranch.save()
return newQuestion
class QuestionCatalog(TimeModel):
name = models.CharField("目录名称", max_length=100)
code = models.CharField("目录编码", max_length=50, unique=True)
parent = models.ForeignKey('self', blank=True, null=True, verbose_name="上级目录")
ord = models.IntegerField("排序号")
question_set = models.ManyToManyField(Question, verbose_name='包含问题', through='QuestionCatalogQuestion')
createBy = models.ForeignKey(account.models.User, verbose_name="创建者", related_name='questionCatalogCreated_set')
modifyBy = models.ForeignKey(account.models.User, verbose_name="修改者",
related_name='questionCatalogModified_set')
class Meta:
verbose_name = "问题目录"
verbose_name_plural = "[05].问题目录"
def __unicode__(self):
return '%s(%s)' % (self.name, self.code)
class QuestionCatalogQuestion(TimeModel):
questionCatalog = models.ForeignKey(QuestionCatalog, verbose_name='对应的目录')
question = models.ForeignKey(Question, verbose_name='对应的问题')
ord = models.IntegerField("排序号")
createBy = models.ForeignKey(
account.models.User, verbose_name="创建者", related_name='questionCatalogQuestionCreated_set')
modifyBy = models.ForeignKey(
account.models.User, verbose_name="修改者", related_name='questionCatalogQuestionModified_set')
class Meta:
verbose_name = "问题目录-问题"
verbose_name_plural = "[06].问题目录-问题"
class Resource(TimeModel):
RESOURCE_TYPE = (('Picture', '图片'), ('Audio', '音频'), ('Video', '视频'))
resourceType = models.CharField('文字', max_length=50, choices=RESOURCE_TYPE)
resourceUrl = models.CharField('文字', max_length=1000)
width = models.FloatField("资源宽度")
height = models.FloatField("资源高度")
question = models.ForeignKey(Question, verbose_name="对应问题")
createBy = models.ForeignKey(account.models.User, verbose_name="创建者", related_name='resourceCreated_set')
modifyBy = models.ForeignKey(account.models.User, verbose_name="修改者", related_name='resourceModified_set')
class Meta:
verbose_name = "资源"
verbose_name_plural = "[08].资源"
class Branch(TimeModel):
text = models.CharField('文字', max_length=200)
ord = models.IntegerField('排序号')
nextQuestion = models.ForeignKey(
# 如何包含结果信息呢?(结束无效问卷,结束有效问卷)
'Question', verbose_name='下个问题', related_name='fromBranch', null=True, blank=True, on_delete=models.SET_NULL)
question = models.ForeignKey(Question, verbose_name="问题")
createBy = models.ForeignKey(account.models.User, verbose_name="创建者", related_name='branchCreated_set')
modifyBy = models.ForeignKey(account.models.User, verbose_name="修改者", related_name='branchModified_set')
class Meta:
verbose_name = "题支"
verbose_name_plural = "[09].题支"
def getNum(self):
numStyle = NumStyle(self.question.branchNumStyle)
return numStyle.getNum(self.ord)
def getReachableQuestionList(self):
# 获取当前选项对应问题的之后的所有问题
question = self.question
paper = question.paper
reachableQuestion = list(paper.question_set.filter(ord__gt=question.ord).order_by('ord'))
return reachableQuestion
def getSystemPredefined(self):
# 获取预定义的问题
systemPredefinedCatalog = QuestionCatalog.objects.filter(code='SystemPredefined')[0]
systemPredefined = list(systemPredefinedCatalog.question_set.order_by('ord'))
return systemPredefined
def getIdSigned(self):
signer = Signer()
return signer.sign(self.id)
def copy(self, user=None):
newBranch = copy.copy(self)
newBranch.createTime = datetime.now()
newBranch.modifyTime = datetime.now()
if user:
newBranch.createBy = user
newBranch.modifyBy = user
newBranch.id = None
newBranch.save()
return newBranch
def getSelectedCount(self):
"""
获取选择该选项的样本项的数量,实际就是统计该选项被用户选了几次
"""
return self.sampleitem_set.count()
def getSelectedPct(self):
"""
获得当前选项的选择比例
其值为0-100之间
"""
sampleCount = self.question.paper.sample_set.count()
if sampleCount == 0:
return None
else:
return self.getSelectedCount() / sampleCount * 100
def oneYearLater():
return datetime.now() + relativedelta(years=1)
class Survey(TimeModel):
code = models.CharField('编码', max_length=100, blank=True, null=True, default=None) # 用于在测试中找到对象
paper = models.ForeignKey('Paper', related_name='survey_set', verbose_name="问卷", null=True, blank=True)
# 目标客户清单 targetcust_set (ok) (已在目标客户中设置外键)
targetOnly = models.BooleanField('定向调查', default=False)
custList = models.ForeignKey('CustList', verbose_name='客户清单', blank=True, null=True, default=None)
state = models.CharField("状态", max_length=5, default='A')
paused = models.BooleanField('暂停', default=False)
shared = models.BooleanField('是否分享', default=False)
viewResult = models.BooleanField('查看结果', default=True)
anonymous = models.BooleanField('查看结果', default=False)
resubmit = models.BooleanField('是否允许重填', default=True)
password = models.CharField("参与密码", max_length=10, blank=True)
ipLimit = models.IntegerField("IP限制", default=5)
macLimit = models.IntegerField("MAC限制", default=5)
publishTime = models.DateTimeField("发布时间", default=datetime.now)
endTime = models.DateTimeField("结束时间", default=oneYearLater)
# 参与者约束 constraints 对象集 (hold)
pay = models.BooleanField('查看结果', default=True)
hardCost = models.FloatField('调查费', default=0)
bonus = models.FloatField('奖金', default=0)
fee = models.FloatField('手续费', default=0)
validSampleLimit = models.IntegerField("有效样本上限", default=0) # 0 表示无限制
lastSmsSendTime = models.DateTimeField("最后一次推送短信时间", blank=True, null=True, default=None)
createBy = models.ForeignKey(account.models.User, verbose_name="创建者", related_name='surveyCreated_set')
modifyBy = models.ForeignKey(account.models.User, verbose_name="修改者", related_name='surveyModified_set')
def getResubmitText(self):
return u'是' if self.resubmit else u'否'
def getVeiwResultText(self):
return u'是' if self.viewResult else u'否'
def getAnonymousText(self):
return u'是' if self.anonymous else u'否'
def getSharedText(self):
return u'是' if self.shared else u'否'
class Meta:
verbose_name = "调查"
verbose_name_plural = "[10].调查"
def getIdSigned(self):
signer = Signer()
return signer.sign(self.id)
def __unicode__(self):
if self.custList:
name = self.custList.name
else:
name = 'None'
return '<%s,%s>' % (self.paper.title, name)
class TargetCust(TimeModel):
name = models.CharField('姓名', max_length=50)
phone = models.CharField('手机号码', max_length=50)
email = models.CharField('电子邮件', max_length=100)
defineInfo_set = models.ManyToManyField('DefineInfo', verbose_name='附件信息', blank=True, null=True)
# sample = models.ForeignKey('Sample', verbose_name='样本') 在样本中已设定了一对一关系 (ok)
token = models.CharField('访问令牌', max_length=50)
survey = models.ForeignKey(Survey, verbose_name="所属调查", related_name='targetCust_set')
createBy = models.ForeignKey(account.models.User, verbose_name="创建者", related_name='targetCustCreated_set')
modifyBy = models.ForeignKey(account.models.User, verbose_name="修改者", related_name='targetCustModified_set')
class Meta:
verbose_name = "目标客户"
verbose_name_plural = "[11].目标客户"
def __unicode__(self):
return u'<%s,%s>' % (self.name, self.phone)
def getIdSigned(self):
signer = Signer()
return signer.sign(self.id)
class Sample(TimeModel):
# 样本项集 sampleItems 对象集 (ok) (已在样本中设置对应外键)
targetCust = models.ForeignKey('TargetCust', verbose_name='清单项', null=True, blank=True)
# session字段用户保存无定向调查客户端标识信息
session = models.CharField('客户端会话标识', max_length=40, null=True, blank=True)
user = models.ForeignKey(account.models.User, verbose_name="参与用户", null=True,
blank=True) # 这里是否设置一个related_name
ipAddress = models.CharField('受访IP', max_length=50)
# macAddress = models.CharField('受访MAC', max_length=50) web端实际无法获得该字段
finished = models.BooleanField('是否完成', default=True)
# lastQuestion用于单步答题,保存最后一次回答的题目,以便之后继续回答
# lastQuestion = models.ForeignKey('Question', verbose_name='下一题', null=True, blank=True, on_delete=models.SET_NULL)
# nextQuestion用于单步答题,保存最后一次回答的题目,以便之后继续回答
# 之前考虑使用的是lastQuestion但是每次进入答题页面时,还要显示判断上次答题结果才能知道要从哪题开始,不直观。
nextQuestion = models.ForeignKey('Question', verbose_name='下一题', null=True, blank=True, on_delete=models.SET_NULL)
isValid = models.BooleanField('是否有效', default=True)
paper = models.ForeignKey(Paper, verbose_name='所属问卷')
createBy = models.ForeignKey(account.models.User, verbose_name="创建者", related_name='sampleCreated_set')
modifyBy = models.ForeignKey(account.models.User, verbose_name="修改者", related_name='sampleModified_set')
class Meta:
verbose_name = "样本"
verbose_name_plural = "[12].样本"
class SampleItem(TimeModel):
question = models.ForeignKey('Question', verbose_name='问题')
branch_set = models.ManyToManyField(Branch, verbose_name='已选')
content = models.CharField('内容', max_length=MAX_TEXT_CONTENT_LENGTH, blank=True, null=True)
score = models.FloatField('得分', default=0)
sample = models.ForeignKey(Sample, verbose_name='所属样本')
createBy = models.ForeignKey(account.models.User, verbose_name="创建者", related_name='sampleItemCreated_set',
null=True, blank=True)
modifyBy = models.ForeignKey(account.models.User, verbose_name="修改者", related_name='sampleItemModified_set',
null=True, blank=True)
class Meta:
verbose_name = "样本项"
verbose_name_plural = "[13].样本项"
class CustList(TimeModel):
name = models.CharField('清单名称', max_length=50)
descrition = models.CharField('清单说明', max_length=200, blank=True, null=True, default='')
createBy = models.ForeignKey(account.models.User, verbose_name="创建者", related_name='custListCreated_set')
modifyBy = models.ForeignKey(account.models.User, verbose_name="修改者", related_name='custListModified_set')
class Meta:
verbose_name = "客户清单"
verbose_name_plural = "[14].客户清单"
def getIdSigned(self):
signer = Signer()
return signer.sign(self.id)
def __unicode__(self):
return self.name
class CustListItem(TimeModel):
name = models.CharField('客户名称', max_length=50)
phone = models.CharField('手机号', max_length=50, validators=[validate_phone])
email = models.CharField('电子邮件', max_length=100, blank=True, null=True, default='')
custList = models.ForeignKey(CustList, verbose_name='所属清单', related_name="custListItem_set")
defineInfo_set = models.ManyToManyField('DefineInfo', verbose_name='附件信息', blank=True, null=True)
createBy = models.ForeignKey(account.models.User, verbose_name="创建者", related_name='custListItemCreated_set')
modifyBy = models.ForeignKey(account.models.User, verbose_name="修改者", related_name='custListItemModified_set')
class Meta:
verbose_name = "客户清单项"
verbose_name_plural = "[15].客户清单项"
def getIdSigned(self):
signer = Signer()
return signer.sign(self.id)
def __unicode__(self):
return self.name
class DefineInfo(TimeModel):
name = models.CharField('信息名称', max_length=100)
value = models.CharField('信息值', max_length=200)
ord = models.IntegerField('排序号')
createBy = models.ForeignKey(account.models.User, verbose_name="创建者", related_name='defineInfoCreated_set')
modifyBy = models.ForeignKey(account.models.User, verbose_name="修改者", related_name='defineInfoModified_set')
class Meta:
verbose_name = "自定义信息"
verbose_name_plural = "[16].自定义信息"
| [
"xmduhan@gmail.com"
] | xmduhan@gmail.com |
6a6035e8568deb8735087e5c391727eb03d1e06f | 9b9be4c8c1824c524556a074afaec0b989cf389e | /download.py | 5e9b29fb8bc0d4458de9d72ff46ee060abd7dc9b | [] | no_license | bezoadam/MetaIT | ad9abc459b932efc3d6c86226afaa710081eb71a | 23cf145d3ad796198d9e916f00c6a50f341d2749 | refs/heads/master | 2021-01-19T22:47:15.019640 | 2017-04-20T20:09:19 | 2017-04-20T20:09:19 | 88,870,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,374 | py | #! /usr/bin/env python3
import sys
import json
import requests
from operator import itemgetter
# sys.path.insert(1,'/usr/local/lib/python3.5/site-packages/')
from bs4 import BeautifulSoup
NHL_LINK = "https://www.ifortuna.cz/cz/sazeni/hokej/nhl"
PREMIER_LEAGUE_LINK = "https://www.ifortuna.cz/cz/sazeni/fotbal/evropska-liga"
if __name__ == '__main__' :
response_nhl = requests.get(NHL_LINK)
response_premier = requests.get(PREMIER_LEAGUE_LINK)
result = {}
for response, name in zip([response_nhl,response_premier], ['NHL', "PREMIER_LEAGUE"]):
list_tmp = []
c = response.content
soup = BeautifulSoup(c, 'html.parser')
table = soup.find_all('table', attrs={'class':'bet_table'})
table_body = table[0].find_all('tbody')
table_tr = table_body[0].find_all('tr')
for i in table_tr:
dict_tmp = {}
bet = i.find_all('a', attrs={'class':'add_bet_link'})
match = i['data-gtm-enhanced-ecommerce-match']
rate = bet[0]['data-rate']
team1 = match[:match.find('-') - 1]
team2 = match[match.find('-') + 2:]
dict_tmp['team1'] = team1
dict_tmp['team2'] = team2
dict_tmp['rate'] = rate
list_tmp.append(dict_tmp)
sorted_list = sorted(list_tmp, key=itemgetter('rate'))
result[name] = sorted_list
parsed = json.loads(json.dumps(result))
with open('kurz.json', 'w') as outfile:
json.dump(parsed, outfile, sort_keys = True, indent = 4) | [
"bezoadam95@gmail.com"
] | bezoadam95@gmail.com |
0e848300eeaafad6eda8c0a708e85d39d6455cce | 5229fd82b398b4f1fc370dd874af780f41837c35 | /bot/triggers/commands/story.py | 4c10178151d808eb8de42b894985b5a220238436 | [] | no_license | kdung/jenova | 3264bd2b0dd6b5aa32cd4517a3f325d643f8e23a | df0589ee8a6f4e1da8183ee113bb727e7122b27b | refs/heads/master | 2020-12-30T15:42:10.101999 | 2017-05-12T08:19:38 | 2017-05-12T08:19:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,598 | py | """Trigger implementation to tell a story"""
import time
import random
from ev3bot.trigger import Trigger
from utils import tts
class Story(Trigger):
"""Trigger to tell a story"""
def __init__(self):
Trigger.__init__(self)
self.stopped = False
def run(self, execution_context):
"""run the action"""
self.stopped = False
openings = self.get_config('story.opening')
no_story = self.get_config('story.no_story')
pause_time = self.get_config('story.pause_time')
stories = self.get_config('story.stories')
tag = execution_context.event.get('tag', None)
if tag is not None:
# filter stories based on tags
stories = list(filter(lambda story: tag in story.get('tags'), stories))
if len(stories) == 0:
execution_context.finish('no story')
tts.say_random(no_story)
return
story = stories[random.randint(0, len(stories) - 1)]
execution_context.finish('telling ' + story.get('name'))
tts.say_random(openings)
time.sleep(pause_time)
if self.stopped:
return
with open('cache/stories/' + story.get('file')) as data_file:
text = data_file.read()
self.read_long_text(text)
def read_long_text(self, text):
"""Read a possibly long text"""
for line in text.splitlines():
if not self.stopped and len(line) > 0:
tts.say([line])
def stop(self):
"""stop reading the story"""
self.stopped = True
| [
"griever@Dzungs-MacBook-Pro.local"
] | griever@Dzungs-MacBook-Pro.local |
f6bb2899a6ff92ba5e477f3de7c6537b0ec6391e | 14f66af5aefd883ed49d3c3bd349581e64f2207d | /app/user/tests/test_user_api.py | fb932470d2aa8c492618d0ab71b698b099ab06dc | [
"MIT"
] | permissive | CompSci2013/recipe-app-api | 10e2dd253ba7b53c2a3a2ea719b22b874fb4df70 | b5546ad7b1d05492eacf2d086274e45a97a86f24 | refs/heads/main | 2023-04-25T18:48:36.906440 | 2021-05-31T14:32:13 | 2021-05-31T14:32:13 | 359,903,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,287 | py | from django.test import TestCase
from django.contrib.auth import get_user, get_user_model
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
CREATE_USER_URL = reverse('user:create')
TOKEN_URL = reverse('user:token')
def create_user(**params):
return get_user_model().objects.create_user(**params)
class PublicUserApiTest(TestCase):
"""Test the users API (public)"""
def setUp(self):
self.client = APIClient()
def test_create_valid_user_success(self):
"""Test creating user with valid payload is successful"""
payload = {
'email': 'test@londonappdev.com',
'password': 'testpass',
'name': 'Test name'
}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
user = get_user_model().objects.get(**res.data)
self.assertTrue(user.check_password(payload['password']))
self.assertNotIn('password', res.data)
def test_user_exists(self):
"""Test creating a user that already exists fails"""
payload = {'email': 'test@londonappdev.com', 'password': 'testpass'}
create_user(**payload)
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_password_too_short(self):
"""Test that the password must be more than 5 characters"""
payload = {'email': 'test@londonappdev.com', 'password': 'pw'}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
user_exists = get_user_model().objects.filter(
email=payload['email']
).exists()
self.assertFalse(user_exists)
def test_create_token_for_user(self):
"""Test that a token is created for the user"""
payload = {'email': 'test@londonappdev.com', 'password': 'testpass'}
create_user(**payload)
res = self.client.post(TOKEN_URL, payload)
self.assertIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_create_token_invalid_credentials(self):
"""Test that token is not created if invalid credentials are given"""
create_user(email='test@londonappdev.com', password='testpass')
payload = {'email': 'test@londonappdev.com', 'password': 'wrong'}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_no_user(self):
"""Test that token is not created if user doesn't exist"""
payload = {'email': 'test@londonappdev.com', 'password': 'testpass'}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_missing_field(self):
"""Test that email and password are required"""
res = self.client.post(TOKEN_URL, {'email': 'one', 'password': ''})
self.assertNotIn(' token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
| [
"kcollins2004@gmail.com"
] | kcollins2004@gmail.com |
4b3bcb583dfe4073fbaf60da96a44bc417c9ff61 | 1ddbe64e41ba648bb60a8758be6764e19b1c418a | /trunk/tygame-hall5-py/src/hall5/plugins/hallitem/_private/_actions/open.py | 722642fc9426119f820446f54c160267601f47ce | [] | no_license | zhaozw/freetime5 | 9bc3d0671a594822cc82e04b69c8016b7afd0554 | 99c47ad235583e765c35627ba34d4f496ccccbe4 | refs/heads/master | 2020-03-08T04:09:15.293616 | 2020-02-11T06:06:29 | 2020-02-11T06:06:29 | 127,913,013 | 0 | 0 | null | 2018-04-03T13:31:36 | 2018-04-03T13:31:35 | null | UTF-8 | Python | false | false | 8,612 | py | # -*- coding=utf-8 -*-
"""
@file : itemaction
@date : 2016-09-22
@author: GongXiaobo
"""
from hall5.plugins.hallitem._private._actions import _action
from hall5.plugins.hallitem._private._items.box import TYBoxItem
from tuyoo5.core.typlugin import pluginCross
from tuyoo5.game import tycontent
from tuyoo5.game._private._tycontent import TYContentItem, TYEmptyContent
from tuyoo5.plugins.item import assetutils, items
from tuyoo5.plugins.item.itemexceptions import TYItemConfException
class TYItemActionBoxOpenResult(items.TYItemActionResult):
def __init__(self, action, item, message, gotAssetList, todotask):
super(TYItemActionBoxOpenResult, self).__init__(action, item, 0, message, todotask)
self.gotAssetList = gotAssetList
class _TYItemBindings(object):
def __init__(self, items, params):
self.items = items
self.params = params
def getParam(self, paramName, defVal=None):
return self.params.get(paramName, defVal)
@property
def failure(self):
return self.getParam('failure', '')
@classmethod
def decodeFromDict(cls, d):
params = d.get('params', {})
if not isinstance(params, dict):
raise TYItemConfException(d, 'TYItemBindings.params must be dict')
items = TYContentItem.decodeList(d.get('items', []))
return cls(items, params)
# 处理items
def consume(self, gameId, item, userAssets, timestamp, eventId, intEventParam):
for contentItem in self.items:
assetKind, consumeCount, final = userAssets.consumeAsset(gameId,
contentItem.assetKindId,
contentItem.count,
timestamp,
eventId,
intEventParam)
if consumeCount == contentItem.count:
return True, (assetKind, consumeCount, final)
return False, None
class TYItemActionBoxOpen(_action.HallItemAction):
TYPE_ID = 'common.box.open'
def __init__(self):
super(TYItemActionBoxOpen, self).__init__()
self.itemBindings = None
self.contentList = None
self.nextItemKindId = None
self.nextItemKind = None
def _decodeFromDictImpl(self, d):
bindings = d.get('bindings')
if bindings:
self.itemBindings = _TYItemBindings.decodeFromDict(bindings)
self.contentList = self._decodeContents(d)
self.nextItemKindId = d.get('nextItemKindId')
if self.nextItemKindId is not None and not isinstance(self.nextItemKindId, int):
raise TYItemConfException(d, 'TYItemActionBoxOpen.nextItemKindId must be int')
def _decodeContents(self, d):
'''
从d中解析数据
'''
contentList = []
contents = d.get('contents')
if not isinstance(contents, list) or not contents:
raise TYItemConfException(d, 'TYItemActionBoxOpen.contents must be not empty list')
for contentConf in contents:
openTimes = contentConf.get('openTimes', {'start': 0, 'stop': -1})
if not isinstance(openTimes, dict):
raise TYItemConfException(contentConf, 'TYItemActionBoxOpen.openTimes must be dict')
startTimes = openTimes.get('start')
stopTimes = openTimes.get('stop')
if (not isinstance(startTimes, int)
or not isinstance(stopTimes, int)):
raise TYItemConfException(openTimes, 'TYItemActionBoxOpen.openTimes.start end must be int')
if 0 <= stopTimes < startTimes:
raise TYItemConfException(openTimes, 'TYItemActionBoxOpen.openTimes.stop must ge start')
content = tycontent.decodeFromDict(contentConf)
contentList.append((startTimes, stopTimes, content))
return contentList
def _initWhenLoaded(self, itemKind, itemKindMap, assetKindMap):
if self.nextItemKindId:
nextItemKind = itemKindMap.get(self.nextItemKindId)
if not nextItemKind:
raise TYItemConfException(self.conf, 'TYItemActionBoxOpen._initWhenLoad unknown nextItemKind %s' % (
self.nextItemKindId))
self.nextItemKind = nextItemKind
def canDo(self, gameId, clientId, userBag, item, timestamp):
return not item.isDied(timestamp)
def doAction(self, gameId, clientId, userAssets, item, timestamp, params):
assert (isinstance(item, TYBoxItem))
userBag = userAssets.getUserBag()
if item.isDied(timestamp):
return items.TYItemActionResult(None, None, -30, '道具已经过期', None)
if self.itemBindings:
ok, _assetTuple = self.itemBindings.consume(gameId,
item,
userAssets,
timestamp,
'ITEM_USE',
item.kindId)
if not ok:
return _action._makeTodoWithPayOrder(self.itemBindings,
gameId,
userAssets.userId,
clientId)
if not item.itemKind.singleMode:
# 互斥型道具打开时直接删除
userBag.removeItem(gameId, item, timestamp, 'ITEM_USE', item.kindId)
else:
# 保存item
item.openTimes += 1
item.original = 0
userBag.consumeItemUnits(gameId, item, 1, timestamp, 'ITEM_USE', item.kindId)
sendItems = self._getContent(item).getItems()
assetItemList = userAssets.sendContentItemList(gameId,
sendItems,
1,
True,
timestamp,
'ITEM_USE',
item.kindId)
# 如果需要生成下一个道具
if self.nextItemKind:
userBag.addItemUnitsByKind(gameId,
self.nextItemKind,
1,
timestamp,
0,
'ITEM_USE',
item.kindId)
# 生成打开生成的列表
rewardsList = []
for assetItemTuple in assetItemList:
'''
0 - assetItem
1 - count
2 - final
'''
assetItem = assetItemTuple[0]
reward = {'name': assetItem.displayName, 'pic': assetItem.pic, 'count': assetItemTuple[1],
'kindId': assetItem.kindId}
rewardsList.append(reward)
rewardTodotask = pluginCross.halltodotask.makeTodoTaskShowRewards(rewardsList)
# 提示文案
gotContent = assetutils.buildContentsString(assetItemList)
# 提示消息替换参数
replaceParams = {'item': item.itemKind.displayName, 'gotContent': gotContent}
_mail, message, _changed = _action._handleMailAndMessageAndChanged(gameId,
userAssets,
self,
assetItemList,
replaceParams)
# TGHall.getEventBus().publishEvent(TYOpenItemEvent(gameId, userBag.userId, item, assetItemList))
return TYItemActionBoxOpenResult(self, item, message, assetItemList, rewardTodotask)
def _getContent(self, item):
if self.contentList:
openTimes = max(item.openTimes - 1, 0)
for startTimes, stopTimes, content in self.contentList:
if (startTimes < 0 or openTimes >= startTimes) and (stopTimes < 0 or openTimes <= stopTimes):
return content
return TYEmptyContent()
| [
"tuyoo@tuyoodeMac-mini-8.local"
] | tuyoo@tuyoodeMac-mini-8.local |
8c3118deb13c8c994393b2003f7d577b34e10853 | 0479655f53b20aabd58e74fab15d7a0d278bd0f5 | /scripts/modules/enumerators.py | 7767ef0c42ae6ae865e130491fb2e739654393d2 | [
"MIT"
] | permissive | gianheck/MeshSim | a717e54fa92bafea5b4522b54d02f9967c95d7b4 | f2ff91634e1d7db2946fa86ab4384047dbfe0747 | refs/heads/master | 2022-04-07T11:06:38.212254 | 2020-02-27T22:45:46 | 2020-02-27T22:45:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,437 | py | #!/usr/bin/env python
"""
Small module to enumerate sets of variable values.
For simulation, we often want the enumerate the possible values in a
grid, i.e. look at the cartesian product possible values. Sometimes the
set is slightly more complicated, but usually not much so. In such
cases, the explicit listing of parameter values together with the
cartesian_ext enumerator, as described below, comes in handy.
Enumerators are implemented as python generators that generate the
parameter sets sequentially.
The following types of enumerators are available:
* cartesian
* cartesian_ext
* union
* list
* singleton
For a brief explanation of the enumerators, read up the documentation of
the corresponding enum_* functions below.
The get_enumerator_from_dict() is
"""
import sys
def enum_cartesian(dict_vars):
"""Create a cartesian enumerator from a dict of vars
Takes a dict of the form
{ 'var_a': [ list of var_a values ],
'var_b': [ list of var_b values ],
...
}
The cartesian() generator then emits dicts with all the possible
combinations; e.g. if var_a can have values 1, 2, 3, and var_b can have
values 'a', 'b', there will be six pairs.
"""
if len(dict_vars) == 0:
return
if len(dict_vars) == 1:
key = next(iter(dict_vars.keys()))
for val in dict_vars[key]:
yield { key: val }
return
# General case
key = next(iter(dict_vars.keys()))
dict2 = {}
for k, v in dict_vars.items():
if k == key:
continue
dict2[k] = v
for v in dict_vars[key]:
for d in enum_cartesian(dict2):
d[key] = v
yield d
def enum_cartesian_ext(enums):
"""Create a cartesian_ext enumerator from a list of enumerators.
A cartesian_ext enumerator takes N enumerators; each of those
enumerators is assumed to enumerate disjoint parameter sets. For
example one enumerator could enumerate all the dicts with values for
parameters a and b, and another one could list all the dicts with
values for parameters c and d. Then every combination of values for
a and b is joined with every combination of values for c and d.
"""
if len(enums) == 0:
return
E = enums[-1]
if len(enums) == 1:
for d in E:
yield d
return
# General case:
E = list(E)
for d1 in enum_cartesian_ext(enums[:-1]):
for d2 in E:
# Need to copy d2 here, since otherwise for each iteration
# in the outer loop (d1), the same dicts are reused; causing
# previous enumerations to be overwritten.
d2 = dict(d2)
d2.update(d1)
yield d2
def enum_union(enums):
"""Produce a union enumerator from a python list of enumerators.
This takes a list of enumerators, and first enumerates all the
parameter sets in the first enumerator, followed by those in the
second enumerator, etc. In other words, it produces the union of
all the parameter sets. (Duplicates are not removed.)
"""
for e in enums:
for d in e:
yield d
def enum_list(list_of_dicts):
"""Produce a list enumerator from a python list.
The list enumerator returns each item in the explicitly provided
list, one after another. In other words, The input is a list of
dicts (the dicts containing parameter->value assignments), and one
of those dicts will be yielded by each iteration.
"""
for d in list_of_dicts:
yield d
def enum_singleton(vars_dict):
"""Produce a singleton enumerator.
The singleton enumerator produces just a single set of values: the
one provided. This is syntactic sugar, the same can be achieved
with a list enumerator or a cartesian enumerator, but in each case
with a bit more baggage.
"""
yield vars_dict
###
def get_enumerator_from_dict(d):
"""Generically produce an enumerator from a python dict.
This is typically used after deserializing JSON into python objects
to get the actual enumerator.
cartesian enumerator syntax:
{ "type": "cartesian", "vars": { "var_a": [ ... ], ... } }
cartesian_ext enumerator syntax:
{ "type": "cartesian_ext", "enums": [ { ... }, { ... }, ... ]
union enumerator syntax:
{ "type": "union", "enums": [ { ... }, { ... }, ... ]
list enumerator syntax:
{ "type": "list", "list": [ { ... }, ... ] }
singleton enumerator syntax:
{ "type": "singleton", "vars": { "var_a": value_a, ... } }
"""
assert type(d) == dict;
tp = d["type"]
if tp == "cartesian":
return enum_cartesian(d["vars"])
elif tp == "cartesian_ext":
enums = [ get_enumerator_from_dict(x) for x in d["enums"] ]
return enum_cartesian_ext(enums)
elif tp == "union":
enums = [ get_enumerator_from_dict(x) for x in d["enums"] ]
return enum_union(enums)
elif tp == "list":
return enum_list(d["list"])
elif tp == "singleton":
return enum_singleton(d["vars"])
sys.stderr.write("Error: Not a known enumerator type: `%s'\n" % tp)
return None
def _usage():
print("""Parameter set enumerator.
usage: enumerators.py [-h] <json-string-or-filename>
enumerators.py is typically used as a python module in other tools
(e.g., stagesim) to create enumerators from dicts. But It can be
run on the command line for testing purposes. With no options
given, and just a JSON string or the name of a file containing JSON,
it will produce the enumeration, and list all the combinations, one
per line, on the command line.
Options:
-h print this help and exit
-t run tests and exit
""")
sys.exit(0)
def _tests():
ex_r_pairs = [ ("""
{
"type": "cartesian_ext",
"enums": [
{ "type": "singleton",
"vars": {
}
},
{ "type": "cartesian_ext",
"enums": [
{ "type": "cartesian",
"vars": {
"a": [ 1, 2, 3, 4 ]
}
},
{ "type": "singleton",
"vars": {
}
}
]
}
]
}""", [ {'a': 1}, {'a': 2}, {'a': 3}, {'a': 4} ]) ]
for example, result in ex_r_pairs:
d = json.loads(example)
r_received = list(get_enumerator_from_dict(d))
if r_received != result:
print("Failure in example:", example)
print("Expected:", result)
print("Got:", r_received)
sys.exit(1)
print("All tests passed OK.")
sys.exit(0)
if __name__ == "__main__":
import json
import getopt
opts, args = getopt.getopt(sys.argv[1:], "ht")
for o, a in opts:
if o == '-h':
_usage()
elif o == '-t':
_tests()
if len(args) != 1:
sys.stderr.write("Error: Need exactly one argument on command "
+ "line: description string or filename.\n")
sys.exit(1)
arg = args[0]
# load JSON
if arg[0] == '{':
descr = json.loads(arg)
else:
descr = json.load(open(arg, 'r'))
# Get the combinations
for d in get_enumerator_from_dict(descr):
s = ""
for k in sorted(d.keys()):
s += "%s=%s " % (k, d[k])
print(s)
| [
"lminder@gmx.net"
] | lminder@gmx.net |
b9f098dd72ef674afb3bd21f560cc6124c6f2a47 | d159a228eebf3a050e75f72f8e49c64e19a09eb8 | /ui/SwitchButton.py | 24bed49fc5f28fb547fceaffa54b56c9b1941a60 | [] | no_license | LK007CX/General-Industrial-Smart-Monitoring-V2.0 | ca7f34d3f26e6e09f73a77f7b9bf4aca19590968 | ae16825504ab4b081779867513ea7a068acff7b1 | refs/heads/main | 2023-06-20T09:57:16.338101 | 2021-07-19T14:28:33 | 2021-07-19T14:28:33 | 343,361,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,626 | py | #!/usr/bin/python3
# -*- coding: UTF-8 -*-
import sys
from PyQt5.QtCore import pyqtSignal, QTimer, QRect, QRectF, QSize, Qt
from PyQt5.QtGui import QColor, QFont, QPainter, QPainterPath, QPen
from PyQt5.QtWidgets import QPushButton, QMainWindow, QApplication
"""
TO DO LIST
"""
class SwitchButton(QPushButton):
checkedChanged = pyqtSignal(bool)
def __init__(self, parent=None):
super(SwitchButton, self).__init__(parent)
self.checked = False
self.bgColorOff = QColor(233, 233, 235)
self.bgColorOn = QColor(86, 200, 94)
self.sliderColorOff = QColor(255, 255, 255)
self.sliderColorOn = QColor(255, 255, 255)
self.textColorOff = QColor(143, 143, 143)
self.textColorOn = QColor(255, 255, 255)
self.textOff = "OFF"
self.textOn = "ON"
self.space = 3
self.rectRadius = 5
self.step = self.width() / 50
self.startX = 0
self.endX = 0
self.timer = QTimer(self)
self.timer.timeout.connect(self.updateValue)
self.setFont(QFont("Microsoft Yahei", 10))
def updateValue(self):
"""
Update value.
:return: None
"""
if self.checked:
if self.startX < self.endX:
self.startX = self.startX + self.step
else:
self.startX = self.endX
self.timer.stop()
else:
if self.startX > self.endX:
self.startX = self.startX - self.step
else:
self.startX = self.endX
self.timer.stop()
self.update()
def mousePressEvent(self, event):
"""
Mouse press event.
:param event: event
:return: None
"""
self.checked = not self.checked
self.checkedChanged.emit(self.checked)
self.step = self.width() / 50
if self.checked:
self.endX = self.width() - self.height()
else:
self.endX = 0
self.timer.start(5)
def setChecked(self, boolean):
"""
Set self checked or unchecked.
:param boolean: checked?
:return: None
"""
if self.checked == boolean:
return
self.checked = not self.checked
self.checkedChanged.emit(self.checked)
self.step = self.width() / 50
if self.checked:
self.endX = self.width() - self.height()
else:
self.endX = 0
self.timer.start(5)
def paintEvent(self, event):
"""
Paint event.
:param event: event
:return: None
"""
painter = QPainter()
painter.begin(self)
painter.setRenderHint(QPainter.Antialiasing)
self.drawBg(event, painter)
self.drawSlider(event, painter)
painter.end()
def drawText(self, event, painter):
"""
Draw text.
:param event: event
:param painter: painter
:return: None
"""
painter.save()
if self.checked:
painter.setPen(self.textColorOn)
painter.drawText(0, 0, self.width() / 2 + self.space * 2, self.height(), Qt.AlignCenter, self.textOn)
else:
painter.setPen(self.textColorOff)
painter.drawText(self.width() / 2, 0, self.width() / 2 - self.space, self.height(), Qt.AlignCenter,
self.textOff)
painter.restore()
def drawBg(self, event, painter):
"""
Draw background.
:param event: event
:param painter: painter
:return: None
"""
painter.save()
painter.setPen(Qt.NoPen)
if self.checked:
painter.setBrush(self.bgColorOn)
else:
painter.setBrush(self.bgColorOff)
rect = QRect(0, 0, self.width(), self.height())
radius = rect.height() / 2
circleWidth = rect.height()
path = QPainterPath()
path.moveTo(radius, rect.left())
path.arcTo(QRectF(rect.left(), rect.top(), circleWidth, circleWidth), 90, 180)
path.lineTo(rect.width() - radius, rect.height())
path.arcTo(QRectF(rect.width() - rect.height(), rect.top(), circleWidth, circleWidth), 270, 180)
path.lineTo(radius, rect.top())
painter.drawPath(path)
painter.restore()
def drawSlider(self, event, painter):
"""
Draw slider.
:param event: event
:param painter: painter
:return: None
"""
painter.save()
if self.checked:
painter.setPen(QPen(Qt.white, 1, Qt.SolidLine))
painter.setBrush(self.sliderColorOn)
else:
painter.setPen(QPen(Qt.white, 1, Qt.SolidLine))
painter.setBrush(self.sliderColorOff)
rect = QRect(0, 0, self.width(), self.height())
sliderWidth = rect.height() - self.space * 2
sliderRect = QRect(self.startX + self.space, self.space, sliderWidth, sliderWidth)
painter.drawEllipse(sliderRect)
painter.restore()
class MainWindow(QMainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self.switchBtn = SwitchButton(self)
self.switchBtn.resize(QSize(50, 30))
self.switchBtn.checkedChanged.connect(self.getState)
self.setStyleSheet('''background-color: white;''')
def getState(self, checked):
print("checked=", checked)
if __name__ == "__main__":
app = QApplication(sys.argv)
form = MainWindow()
form.show()
sys.exit(app.exec_())
| [
"1317108121@qq.com"
] | 1317108121@qq.com |
fbb9c8ae7ff641259199c43c4776c6e6207c9d4a | bddd137df94aeefe3b2dddda450cb3830c4d612a | /数据/raw data/20170816price_radio_2012/2_128/tsc_model.py | faa4fa79ffe20f5d75bfc9d8e1eaf3cae1bfd247 | [] | no_license | yangyiqwer/vue-sp | e211f05965930086edb0b6625177ec1f0a9557c6 | 44f09352dc6aac01ebb749f55635714c3cf11b53 | refs/heads/master | 2021-01-01T19:04:16.109666 | 2017-08-27T15:04:26 | 2017-08-27T15:04:26 | 98,498,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,624 | py | import numpy as np
import pandas as pd
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='3'
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
from tensorflow.python.framework import ops
from tensorflow.python.ops import clip_ops
from tensorflow.contrib.rnn import LSTMCell
from tensorflow.python.ops.rnn import static_rnn
def read_form_csv():
res = []
time_step = 30
f=open('000001SZ_2012.csv')
df=pd.read_csv(f)
price = np.array(df['price'])
for i in range(1, len(price) - time_step):
tmp = []
if price[i + time_step] - price[i + time_step - 1] > 0:
tmp.append(1)
else:
tmp.append(0)
for j in range(time_step):
tmp.append((price[i + j] - price[i + j - 1]) / price[i + j - 1] * 10)
res.append(tmp.copy())
return np.array(res)
def load_data():
need_test = True
"""Input:
direc: location of the UCR archive
ratio: ratio to split training and testset
dataset: name of the dataset in the UCR archive"""
#data_train = np.loadtxt('500cnn_train.txt',delimiter=',')
#data_test_val = np.loadtxt('500cnn_test.txt',delimiter=',')
#data = np.loadtxt(os.path.join(os.getcwd(),'000001SZ_all_price.txt'),delimiter=',')
data = read_form_csv()
if(need_test):
data_test_val = data[-100:]
data_train = data[:-100]
else:
data_train = data
'''
ratio = (ratio*N).astype(np.int32)
ind = np.random.permutation(N)
X_train = DATA[ind[:ratio[0]],1:]
X_val = DATA[ind[ratio[0]:ratio[1]],1:]
X_test = DATA[ind[ratio[1]:],1:]
# Targets have labels 1-indexed. We subtract one for 0-indexed
y_train = DATA[ind[:ratio[0]],0]-1
y_val = DATA[ind[ratio[0]:ratio[1]],0]-1
y_test = DATA[ind[ratio[1]:],0]-1
return X_train,X_test,y_train,y_test
'''
X_train = data_train[:, 1:]
y_train = data_train[:, 0]
X_test = data_test_val[:, 1:]
y_test = data_test_val[:, 0]
return X_train,X_test,y_train,y_test
def sample_batch(X_train,y_train,batch_size):
""" Function to sample a batch for training"""
N,data_len = X_train.shape
ind_N = np.random.choice(N,batch_size,replace=False)
X_batch = X_train[ind_N]
y_batch = y_train[ind_N]
return X_batch,y_batch
class Model():
def __init__(self,config):
num_layers = config['num_layers']
hidden_size = config['hidden_size']
max_grad_norm = config['max_grad_norm']
self.batch_size = config['batch_size']
sl = config['sl']
learning_rate = config['learning_rate']
num_classes = config['num_classes']
"""Place holders"""
self.input = tf.placeholder(tf.float32, [None, sl], name = 'input')
#self.input = tf.placeholder(tf.float32, [None, sl, input_size], name = 'input')
self.labels = tf.placeholder(tf.int64, [None], name='labels')
self.keep_prob = tf.placeholder("float", name = 'Drop_out_keep_prob')
with tf.name_scope("LSTM_setup") as scope:
def single_cell():
return tf.contrib.rnn.DropoutWrapper(LSTMCell(hidden_size),output_keep_prob=self.keep_prob)
cell = tf.contrib.rnn.MultiRNNCell([single_cell() for _ in range(num_layers)])
initial_state = cell.zero_state(self.batch_size, tf.float32)
#!!!modify
input_list = tf.unstack(tf.expand_dims(self.input,axis=2),axis=1)
outputs,_ = static_rnn(cell, input_list, dtype=tf.float32)
output = outputs[-1]
#Generate a classification from the last cell_output
#Note, this is where timeseries classification differs from sequence to sequence
#modelling. We only output to Softmax at last time step
with tf.name_scope("Softmax") as scope:
with tf.variable_scope("Softmax_params"):
softmax_w = tf.get_variable("softmax_w", [hidden_size, num_classes])
softmax_b = tf.get_variable("softmax_b", [num_classes])
logits = tf.nn.xw_plus_b(output, softmax_w, softmax_b)
#Use sparse Softmax because we have mutually exclusive classes
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,labels=self.labels,name = 'softmax')
self.cost = tf.reduce_sum(loss) / self.batch_size
with tf.name_scope("Evaluating_accuracy") as scope:
correct_prediction = tf.equal(tf.argmax(logits,1),self.labels)
self.res = tf.argmax(logits,1)
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
#h1 = tf.summary.scalar('accuracy',self.accuracy)
#h2 = tf.summary.scalar('cost', self.cost)
"""Optimizer"""
with tf.name_scope("Optimizer") as scope:
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost, tvars),max_grad_norm) #We clip the gradients to prevent explosion
optimizer = tf.train.AdamOptimizer(learning_rate)
gradients = zip(grads, tvars)
self.train_op = optimizer.apply_gradients(gradients)
# Add histograms for variables, gradients and gradient norms.
# The for-loop loops over all entries of the gradient and plots
# a histogram. We cut of
# for gradient, variable in gradients: #plot the gradient of each trainable variable
# if isinstance(gradient, ops.IndexedSlices):
# grad_values = gradient.values
# else:
# grad_values = gradient
#
# tf.summary.histogram(variable.name, variable)
# tf.summary.histogram(variable.name + "/gradients", grad_values)
# tf.summary.histogram(variable.name + "/gradient_norm", clip_ops.global_norm([grad_values]))
#Final code for the TensorBoard
self.merged = tf.summary.merge_all()
self.init_op = tf.global_variables_initializer()
print('Finished computation graph')
| [
"yangyiqwer@gmail.com"
] | yangyiqwer@gmail.com |
db7db6a90704df75b54f315bea68f0ad7f3365c1 | 649bd422025e421d86025743eac324c9b882a2e8 | /exam/1_three-dimensional_atomic_system/dump/phasetrans/temp101_6000.py | ca5589222090cec51eb4387d80afbfad76d2ebaa | [] | no_license | scheuclu/atom_class | 36ddee1f6a5995872e858add151c5942c109847c | 0c9a8c63d9b38898c1869fe8983126cef17662cd | refs/heads/master | 2021-01-21T10:52:28.448221 | 2017-03-07T23:04:41 | 2017-03-07T23:04:41 | 83,489,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68,814 | py | ITEM: TIMESTEP
6000
ITEM: NUMBER OF ATOMS
2048
ITEM: BOX BOUNDS pp pp pp
1.4321272860081535e-01 4.7056787271390377e+01
1.4321272860081535e-01 4.7056787271390377e+01
1.4321272860081535e-01 4.7056787271390377e+01
ITEM: ATOMS id type xs ys zs
8 1 0.121128 0.0652153 0.0529452
35 1 0.0586052 0.120368 0.0662597
130 1 0.0478415 0.0644832 0.129102
165 1 0.124033 0.12342 0.120496
161 1 0.98823 0.125918 0.128182
391 1 0.190985 0.0048602 0.439043
12 1 0.24819 0.0670862 0.0611532
39 1 0.187074 0.122063 0.0533896
43 1 0.320293 0.130682 0.0544349
134 1 0.181293 0.0560446 0.125742
138 1 0.30732 0.0530883 0.121272
169 1 0.240007 0.10949 0.126835
1165 1 0.371986 0.497731 0.127201
512 1 0.876989 0.44083 0.433068
1291 1 0.316116 0.496974 0.311832
16 1 0.371795 0.0612295 0.0620073
47 1 0.437826 0.114021 0.0581376
142 1 0.430052 0.062614 0.122352
173 1 0.379179 0.124915 0.138167
177 1 0.495709 0.118015 0.114163
511 1 0.941227 0.373121 0.434656
1051 1 0.813842 0.494019 0.0542743
20 1 0.503697 0.0626443 0.0466291
24 1 0.618396 0.0614018 0.0596878
51 1 0.563683 0.121955 0.0584759
146 1 0.560756 0.0636515 0.116751
181 1 0.621046 0.121178 0.119958
28 1 0.748168 0.0555409 0.063096
55 1 0.691085 0.129398 0.0688364
59 1 0.813312 0.120681 0.0650308
150 1 0.685922 0.0616512 0.124457
154 1 0.808926 0.0625564 0.132809
185 1 0.748522 0.118407 0.129589
1045 1 0.632401 0.496465 0.00307537
257 1 0.985749 0.0019644 0.252104
4 1 0.99584 0.0682345 0.0673613
32 1 0.876888 0.0629551 0.0639142
63 1 0.928141 0.122593 0.0568666
158 1 0.928595 0.0571168 0.124497
189 1 0.875148 0.112629 0.126521
1285 1 0.134889 0.494434 0.257063
113 1 0.499521 0.382165 0.00639871
40 1 0.121127 0.175363 0.063895
67 1 0.0682582 0.24445 0.0545415
72 1 0.119785 0.316021 0.0664481
162 1 0.0596738 0.182325 0.11949
194 1 0.0635595 0.30876 0.125828
197 1 0.119146 0.235746 0.126522
36 1 0.00873541 0.176155 0.0563818
1301 1 0.625385 0.498166 0.242573
27 1 0.816269 0.00160164 0.0667978
44 1 0.254646 0.186829 0.0541968
71 1 0.183945 0.242923 0.050513
75 1 0.320405 0.2448 0.0546557
76 1 0.255848 0.302572 0.063998
166 1 0.189254 0.190654 0.123266
170 1 0.321352 0.185042 0.115355
198 1 0.178342 0.310604 0.124489
201 1 0.250149 0.24938 0.117744
202 1 0.324996 0.307847 0.115514
1437 1 0.871202 0.493117 0.372296
1183 1 0.952448 0.50141 0.182558
1167 1 0.433169 0.496037 0.195564
174 1 0.438422 0.183023 0.124459
48 1 0.388053 0.173805 0.0595832
79 1 0.441226 0.248243 0.0616888
80 1 0.379786 0.300443 0.0584279
205 1 0.379228 0.242368 0.127794
206 1 0.429667 0.310109 0.120297
84 1 0.488685 0.316129 0.0569946
1309 1 0.884009 0.500873 0.235989
52 1 0.492207 0.182649 0.0507947
209 1 0.500796 0.234277 0.115626
56 1 0.624645 0.185926 0.0744435
83 1 0.56474 0.259103 0.063541
88 1 0.620886 0.312571 0.0573471
178 1 0.563889 0.183501 0.125628
210 1 0.557067 0.314044 0.125164
213 1 0.624564 0.258809 0.119893
60 1 0.74487 0.188774 0.0692651
87 1 0.684781 0.256381 0.0639363
91 1 0.809947 0.244391 0.0600154
92 1 0.753095 0.309474 0.0640282
182 1 0.684525 0.192522 0.126194
186 1 0.811141 0.18199 0.121576
214 1 0.685344 0.31175 0.125652
217 1 0.753052 0.25301 0.119086
218 1 0.807026 0.324013 0.124366
287 1 0.938339 0.0102788 0.315231
193 1 0.00648118 0.238458 0.132031
68 1 0.00756776 0.308302 0.0598156
64 1 0.868943 0.182474 0.0616799
95 1 0.945237 0.235997 0.0773487
96 1 0.866704 0.306592 0.074907
190 1 0.92692 0.17961 0.13134
221 1 0.875069 0.2353 0.120871
222 1 0.934342 0.31433 0.112717
99 1 0.0618397 0.373871 0.0677851
104 1 0.127296 0.433875 0.0660925
226 1 0.0634204 0.427211 0.126115
229 1 0.127749 0.375214 0.123966
1417 1 0.247161 0.488042 0.378628
510 1 0.946002 0.431991 0.368591
103 1 0.189705 0.366519 0.0628518
107 1 0.312201 0.368755 0.0717985
108 1 0.253562 0.428358 0.0606608
230 1 0.195211 0.432487 0.119114
233 1 0.248939 0.365739 0.121965
234 1 0.31071 0.438854 0.127608
155 1 0.807621 0.00434102 0.197672
269 1 0.375204 0.00295794 0.255217
141 1 0.377144 0.00650305 0.123111
111 1 0.431994 0.379364 0.0619769
112 1 0.377306 0.438418 0.0688109
237 1 0.380627 0.371326 0.115958
238 1 0.437714 0.44024 0.128722
116 1 0.491601 0.440328 0.064435
241 1 0.493594 0.381498 0.120745
534 1 0.684606 0.0677011 0.497799
1283 1 0.0711745 0.491009 0.304388
115 1 0.559624 0.369957 0.0625803
120 1 0.62473 0.439067 0.0674431
242 1 0.558531 0.434985 0.122181
245 1 0.623 0.372693 0.115498
2 1 0.0597747 0.0666446 -0.0018904
145 1 0.499772 0.00365486 0.116262
1433 1 0.751504 0.494536 0.368606
389 1 0.11701 0.00597755 0.359588
119 1 0.691012 0.37475 0.0589997
123 1 0.817019 0.372982 0.0550826
124 1 0.754011 0.43874 0.0616733
246 1 0.694249 0.435861 0.117448
249 1 0.751083 0.379071 0.115771
250 1 0.818031 0.438744 0.125211
538 1 0.807618 0.0519969 0.497404
617 1 0.255228 0.37739 0.495101
509 1 0.883631 0.385582 0.378427
225 1 1.00178 0.359418 0.117514
100 1 0.00298638 0.427398 0.06825
127 1 0.938144 0.370269 0.0579974
128 1 0.883877 0.433222 0.0587752
253 1 0.876317 0.37671 0.118641
254 1 0.937414 0.431481 0.127322
1169 1 0.497299 0.495151 0.12319
135 1 0.189169 -0.00090506 0.1859
136 1 0.120549 0.065911 0.185594
163 1 0.0562221 0.128368 0.182479
258 1 0.0596169 0.0667815 0.237037
264 1 0.108614 0.0711829 0.309163
291 1 0.053496 0.137016 0.30447
293 1 0.121067 0.12736 0.247731
132 1 0.981324 0.0641198 0.188972
289 1 0.992882 0.125668 0.2463
54 1 0.688222 0.190013 0.00134316
1031 1 0.193836 0.492392 0.0561465
140 1 0.249307 0.0534475 0.18891
167 1 0.18331 0.129856 0.181925
171 1 0.319054 0.124426 0.193673
262 1 0.183877 0.0718001 0.249468
266 1 0.305653 0.0642806 0.252152
268 1 0.246009 0.0679596 0.311467
295 1 0.180614 0.12368 0.322482
297 1 0.248296 0.128553 0.257276
299 1 0.311311 0.122931 0.314736
3 1 0.0444232 0.00474053 0.0720287
109 1 0.383211 0.366447 0.00378107
89 1 0.749516 0.248164 0.00572867
1039 1 0.430071 0.499077 0.0728967
144 1 0.380092 0.0648068 0.185115
175 1 0.443059 0.127202 0.195953
270 1 0.434443 0.0580826 0.25566
272 1 0.375384 0.0559552 0.314034
301 1 0.375799 0.12496 0.249942
303 1 0.436468 0.133556 0.311093
276 1 0.500308 0.0703962 0.309802
148 1 0.499375 0.0581931 0.182404
143 1 0.436467 0.00489952 0.179232
153 1 0.739173 0.00304366 0.128371
1293 1 0.370919 0.490759 0.250121
305 1 0.49286 0.129961 0.25537
152 1 0.612463 0.0592013 0.192506
179 1 0.552254 0.125039 0.181771
274 1 0.554553 0.0587076 0.252637
280 1 0.611748 0.0590055 0.311719
307 1 0.560414 0.135425 0.3073
309 1 0.611341 0.121031 0.24354
273 1 0.494943 0.00735931 0.247505
263 1 0.186061 0.00750506 0.309957
156 1 0.750532 0.0654744 0.202114
183 1 0.678135 0.118851 0.188105
187 1 0.808758 0.130888 0.185069
278 1 0.683576 0.0619224 0.249834
282 1 0.812881 0.0718269 0.258829
284 1 0.739986 0.0561687 0.302256
311 1 0.676152 0.122 0.302808
313 1 0.756053 0.119526 0.253378
315 1 0.814276 0.122225 0.320888
638 1 0.937118 0.437881 0.489462
609 1 0.997218 0.383571 0.497383
260 1 1.00077 0.0598909 0.300806
160 1 0.86137 0.0630755 0.192195
191 1 0.927397 0.128784 0.194914
286 1 0.931028 0.0659774 0.249095
288 1 0.881471 0.0704887 0.317239
317 1 0.869844 0.129723 0.253029
319 1 0.941499 0.130241 0.306729
502 1 0.684708 0.428444 0.375944
168 1 0.120738 0.184627 0.189778
195 1 0.0629465 0.253956 0.184077
200 1 0.12685 0.314629 0.1888
290 1 0.0560866 0.189238 0.246466
296 1 0.124888 0.185823 0.316385
323 1 0.0685434 0.251252 0.315974
325 1 0.123807 0.247371 0.260307
328 1 0.1278 0.32047 0.315551
292 1 0.990068 0.191519 0.29956
164 1 0.993496 0.189351 0.189716
196 1 0.998118 0.313547 0.192619
322 1 0.0749752 0.314431 0.252122
172 1 0.253633 0.180008 0.178928
199 1 0.179287 0.245765 0.18862
203 1 0.31666 0.245505 0.187571
294 1 0.180983 0.189005 0.252656
298 1 0.307984 0.187991 0.26037
300 1 0.241511 0.179661 0.321388
326 1 0.189631 0.301119 0.24891
327 1 0.187416 0.251404 0.313083
329 1 0.251477 0.245258 0.231845
330 1 0.306948 0.301314 0.245889
331 1 0.299118 0.249107 0.307883
332 1 0.245593 0.316458 0.322579
204 1 0.246783 0.304336 0.17844
176 1 0.3768 0.18862 0.191603
207 1 0.441886 0.258177 0.178856
208 1 0.375406 0.310951 0.190509
302 1 0.435887 0.206017 0.250153
304 1 0.374011 0.182432 0.311216
333 1 0.372571 0.248956 0.258777
334 1 0.435445 0.311531 0.24245
335 1 0.433158 0.260026 0.313605
336 1 0.36535 0.310332 0.309661
337 1 0.498704 0.260114 0.250191
180 1 0.493058 0.196904 0.184407
212 1 0.495117 0.320505 0.181935
340 1 0.4928 0.320328 0.312942
308 1 0.496909 0.197152 0.307168
184 1 0.620009 0.191185 0.183044
211 1 0.563756 0.252591 0.188711
216 1 0.623246 0.312842 0.193273
306 1 0.554787 0.1885 0.240768
312 1 0.624717 0.190809 0.306772
338 1 0.561157 0.317049 0.247719
339 1 0.555498 0.266503 0.313382
341 1 0.630568 0.25877 0.249182
344 1 0.625745 0.322038 0.304517
314 1 0.810226 0.190021 0.25251
188 1 0.744975 0.180104 0.18884
215 1 0.685088 0.257996 0.189504
219 1 0.81252 0.251556 0.179636
220 1 0.756545 0.321944 0.187859
310 1 0.687626 0.181407 0.25209
316 1 0.749004 0.184778 0.318247
342 1 0.694204 0.323102 0.248087
343 1 0.688369 0.256054 0.320795
345 1 0.760125 0.253363 0.247136
346 1 0.812104 0.316969 0.253226
347 1 0.811411 0.250973 0.313083
348 1 0.746109 0.314529 0.313484
324 1 0.992186 0.313155 0.311799
321 1 0.00807945 0.255823 0.245881
192 1 0.862071 0.187784 0.186276
223 1 0.935978 0.255975 0.181534
224 1 0.874632 0.30868 0.168438
318 1 0.932307 0.183715 0.24662
320 1 0.879842 0.182687 0.314147
349 1 0.87128 0.253988 0.248517
350 1 0.938413 0.310918 0.250528
351 1 0.934406 0.252762 0.314453
352 1 0.874857 0.310205 0.307123
277 1 0.616868 -0.00267472 0.253056
569 1 0.746805 0.119592 0.498992
385 1 0.999467 0.000229777 0.371311
227 1 0.0719792 0.368813 0.190951
232 1 0.130389 0.436107 0.191244
354 1 0.0740025 0.436355 0.247832
355 1 0.0616367 0.370269 0.309927
357 1 0.131959 0.372375 0.249437
360 1 0.128174 0.430318 0.313971
228 1 0.00799869 0.428548 0.183531
356 1 0.0052846 0.437063 0.301342
15 1 0.430579 0.00747275 0.0572639
231 1 0.197739 0.371336 0.187143
235 1 0.314983 0.367143 0.173173
236 1 0.252667 0.42798 0.186932
358 1 0.196409 0.429214 0.246545
359 1 0.184342 0.376592 0.324591
361 1 0.255661 0.371645 0.247239
362 1 0.307247 0.438688 0.25153
363 1 0.308764 0.374031 0.320278
364 1 0.2561 0.439748 0.318348
506 1 0.808548 0.439684 0.370675
505 1 0.754403 0.374737 0.368943
507 1 0.812861 0.360161 0.435154
625 1 0.494369 0.370427 0.497217
239 1 0.43563 0.379492 0.190583
240 1 0.368895 0.431072 0.185821
365 1 0.36374 0.370127 0.250241
366 1 0.436851 0.43946 0.255998
367 1 0.4291 0.385911 0.312882
368 1 0.360999 0.43222 0.3125
372 1 0.497518 0.440881 0.312139
106 1 0.323538 0.427381 0.00438207
369 1 0.489447 0.378472 0.251187
244 1 0.493323 0.438804 0.190328
243 1 0.556918 0.375787 0.190831
248 1 0.627653 0.441538 0.176577
370 1 0.557412 0.433714 0.243819
371 1 0.550651 0.373447 0.307205
373 1 0.617932 0.385291 0.24921
376 1 0.619139 0.444814 0.310797
137 1 0.245452 -0.000687656 0.122808
614 1 0.180397 0.43821 0.498999
247 1 0.683 0.38139 0.188766
251 1 0.818103 0.377411 0.180721
252 1 0.752535 0.445803 0.183858
374 1 0.683436 0.441921 0.256636
375 1 0.68652 0.371526 0.312362
377 1 0.749849 0.380634 0.245944
378 1 0.811659 0.450142 0.238764
379 1 0.81434 0.383388 0.301672
380 1 0.748821 0.43311 0.311193
495 1 0.431179 0.37015 0.437404
542 1 0.93188 0.0490275 0.496337
353 1 1.00114 0.373768 0.244386
255 1 0.944165 0.368505 0.184717
256 1 0.878409 0.43519 0.187829
381 1 0.878593 0.371312 0.240778
382 1 0.947383 0.439829 0.240379
383 1 0.943962 0.374637 0.308113
384 1 0.876645 0.438515 0.312045
114 1 0.552691 0.442736 0.0140531
413 1 0.883924 0.00982582 0.375202
503 1 0.687019 0.374773 0.43651
386 1 0.0537825 0.0681364 0.362356
392 1 0.116826 0.0628668 0.433433
419 1 0.0494314 0.131186 0.435066
421 1 0.110048 0.126035 0.37221
388 1 0.987355 0.0586155 0.435113
417 1 0.00123889 0.130761 0.369666
390 1 0.182057 0.0573305 0.376315
394 1 0.30643 0.0577327 0.377751
396 1 0.249731 0.0588082 0.435368
423 1 0.182291 0.119968 0.441863
425 1 0.247246 0.119125 0.377243
427 1 0.307347 0.129285 0.441677
545 1 0.996348 0.128058 0.494262
494 1 0.428387 0.436813 0.37727
526 1 0.442734 0.0660786 0.499029
626 1 0.559785 0.433314 0.495576
398 1 0.4307 0.0645666 0.382829
400 1 0.373774 0.0652292 0.440701
429 1 0.378573 0.129331 0.373288
431 1 0.441164 0.127871 0.442911
433 1 0.507252 0.128962 0.371812
26 1 0.807021 0.0586015 0.00269561
395 1 0.313517 -0.000957738 0.436622
508 1 0.75383 0.427335 0.428589
159 1 0.925029 0.00938259 0.188258
404 1 0.504537 0.0725638 0.440706
402 1 0.556546 0.0667685 0.378907
408 1 0.62587 0.0675687 0.43018
435 1 0.573564 0.132694 0.440911
437 1 0.618656 0.133571 0.376668
1425 1 0.50076 0.500056 0.3784
497 1 0.49223 0.382296 0.370653
553 1 0.252025 0.12881 0.4961
406 1 0.677708 0.0668947 0.365429
410 1 0.815704 0.0540681 0.373372
412 1 0.742694 0.0611798 0.438586
439 1 0.689886 0.128695 0.435334
441 1 0.756876 0.122177 0.380631
443 1 0.815578 0.129123 0.437048
594 1 0.553594 0.308052 0.496484
1311 1 0.936992 0.494754 0.319739
1287 1 0.189396 0.489062 0.313738
414 1 0.950559 0.0744779 0.366125
416 1 0.86926 0.0572342 0.439301
445 1 0.87777 0.114251 0.382571
447 1 0.93858 0.137559 0.437642
530 1 0.566645 0.0587052 0.498126
418 1 0.0659617 0.189206 0.376669
424 1 0.117717 0.1841 0.440256
450 1 0.0531448 0.30954 0.371959
451 1 0.0549244 0.249404 0.444509
453 1 0.117126 0.253963 0.383274
456 1 0.125371 0.319386 0.454215
449 1 0.00264226 0.239201 0.365422
452 1 0.997653 0.308206 0.431584
1431 1 0.68659 0.495015 0.440719
422 1 0.179979 0.190496 0.383419
426 1 0.307464 0.186983 0.377407
428 1 0.240088 0.190402 0.43697
454 1 0.181067 0.313639 0.377801
455 1 0.18452 0.253998 0.431553
457 1 0.249475 0.241847 0.37612
458 1 0.315338 0.294557 0.373428
459 1 0.313059 0.242878 0.439579
460 1 0.252717 0.31343 0.433354
1173 1 0.638616 0.499143 0.121875
430 1 0.448002 0.192673 0.379607
432 1 0.377153 0.183201 0.436683
461 1 0.380607 0.244487 0.380418
462 1 0.430941 0.318761 0.371433
463 1 0.433157 0.245247 0.445837
464 1 0.373193 0.308902 0.436731
436 1 0.504288 0.185764 0.455589
465 1 0.499425 0.262478 0.372246
468 1 0.496512 0.31859 0.432987
557 1 0.375883 0.122012 0.493476
500 1 0.495956 0.437053 0.43507
440 1 0.631405 0.189371 0.436318
466 1 0.558206 0.315572 0.376345
472 1 0.625791 0.321161 0.441019
469 1 0.620524 0.25425 0.372468
434 1 0.557807 0.189072 0.369724
467 1 0.564864 0.249469 0.436851
470 1 0.675451 0.31516 0.375975
442 1 0.813444 0.199138 0.382928
471 1 0.686071 0.251731 0.4438
444 1 0.746241 0.18851 0.44419
473 1 0.747145 0.261432 0.377315
476 1 0.743968 0.314887 0.442497
438 1 0.680696 0.197774 0.373411
474 1 0.818398 0.328331 0.367029
475 1 0.804203 0.262633 0.436794
22 1 0.683516 0.0701632 -0.000595372
420 1 0.00190908 0.194073 0.42975
479 1 0.944014 0.250534 0.424619
448 1 0.870052 0.199608 0.433559
446 1 0.939496 0.181517 0.369623
477 1 0.874169 0.25861 0.372188
480 1 0.877117 0.307083 0.441013
478 1 0.936767 0.3164 0.367778
267 1 0.307545 0.00168665 0.32149
481 1 0.00876619 0.375028 0.383104
484 1 0.00728894 0.436296 0.436439
485 1 0.119328 0.373231 0.380754
483 1 0.0622024 0.367245 0.443661
488 1 0.109383 0.431936 0.441308
482 1 0.0619555 0.444351 0.368627
1537 1 -0.00309395 0.499975 0.495049
498 1 0.564503 0.444728 0.369663
501 1 0.618187 0.374247 0.375203
492 1 0.24397 0.449296 0.45114
491 1 0.310236 0.374075 0.4266
486 1 0.179455 0.439234 0.387218
490 1 0.318449 0.44216 0.376773
487 1 0.184983 0.372144 0.442645
489 1 0.247473 0.394729 0.384257
58 1 0.802085 0.177155 0.00544861
496 1 0.384038 0.445178 0.443134
57 1 0.748161 0.12212 0.00866366
504 1 0.621196 0.435119 0.428186
499 1 0.558088 0.3785 0.440002
493 1 0.373807 0.372853 0.375848
1171 1 0.55377 0.496383 0.19
1163 1 0.311153 0.495238 0.188333
70 1 0.177878 0.316652 0.000495599
1153 1 0.0116227 0.489476 0.128232
1155 1 0.060959 0.498221 0.190081
634 1 0.812936 0.434853 0.491969
1161 1 0.243017 0.494859 0.122145
1281 1 0.998394 0.498183 0.252641
387 1 0.0517238 0.00653301 0.433633
1027 1 0.0739598 0.501202 0.0629198
1439 1 0.939695 0.494508 0.425331
1035 1 0.308177 0.490806 0.0610102
61 1 0.866456 0.118663 -0.000554736
93 1 0.880638 0.24985 0.00169254
149 1 0.620237 -0.00126225 0.130658
1159 1 0.191283 0.495113 0.188278
151 1 0.683904 0.00191618 0.187628
399 1 0.429822 0.00292457 0.437849
117 1 0.624343 0.373781 -0.00145932
285 1 0.871717 0.000172731 0.256449
1295 1 0.421051 0.498963 0.311546
131 1 0.0531605 0.0053344 0.185422
126 1 0.955795 0.4394 -5.17223e-05
1055 1 0.951098 0.490323 0.0767626
1415 1 0.17092 0.497121 0.443776
621 1 0.373982 0.37778 0.493722
261 1 0.119443 0.0126827 0.241378
11 1 0.319937 0.000832331 0.0534228
550 1 0.179007 0.184848 0.494279
401 1 0.488769 0.00194453 0.376001
9 1 0.246131 0.00544418 0.00587372
574 1 0.935456 0.195013 0.49442
637 1 0.886806 0.377513 0.492745
1157 1 0.137115 0.490309 0.121206
33 1 0.992392 0.12339 0.00182719
521 1 0.255566 0.0023313 0.496699
581 1 0.124519 0.245942 0.492752
570 1 0.813371 0.188503 0.495417
598 1 0.68666 0.312012 0.500355
81 1 0.493713 0.239485 0.00128045
573 1 0.870499 0.126781 0.495379
1033 1 0.253668 0.495279 0.000537149
586 1 0.306419 0.306281 0.496415
1029 1 0.124148 0.491168 0.00619924
65 1 0.998343 0.250753 0.00790032
610 1 0.0502485 0.441977 0.500613
86 1 0.68722 0.317975 0.00798476
522 1 0.315063 0.0674079 0.49551
41 1 0.252257 0.128845 0.00200848
520 1 0.119835 0.0637772 0.56574
547 1 0.0591587 0.121069 0.568235
642 1 0.0554993 0.0552552 0.62429
677 1 0.123094 0.124835 0.623211
1943 1 0.675019 0.497362 0.937691
585 1 0.251169 0.242296 0.506156
524 1 0.252753 0.0583012 0.562912
551 1 0.181296 0.121024 0.559598
555 1 0.318584 0.116161 0.560324
646 1 0.182016 0.0649942 0.624475
650 1 0.308449 0.0619911 0.621688
681 1 0.257227 0.126792 0.616806
1553 1 0.502145 0.494538 0.506979
1687 1 0.695296 0.498581 0.6987
528 1 0.387262 0.0591382 0.561706
559 1 0.437786 0.130648 0.560292
654 1 0.435578 0.0577267 0.629732
685 1 0.372009 0.124008 0.62748
689 1 0.496027 0.12536 0.619562
656 1 0.364451 0.0600731 0.684318
927 1 0.926861 0.00351331 0.937106
1673 1 0.257468 0.50226 0.61985
532 1 0.493015 0.0619454 0.563527
536 1 0.617966 0.0628884 0.562155
563 1 0.554837 0.128405 0.568813
658 1 0.554329 0.0602531 0.625487
693 1 0.626436 0.119805 0.628102
993 1 0.991459 0.36687 0.874766
1555 1 0.561645 0.498555 0.579625
540 1 0.747177 0.0591899 0.561169
567 1 0.680128 0.122321 0.562953
571 1 0.8135 0.122767 0.566831
662 1 0.681071 0.0626324 0.614664
666 1 0.811642 0.0613672 0.615466
697 1 0.755275 0.12569 0.621261
622 1 0.433423 0.438594 0.512506
561 1 0.499945 0.121437 0.506363
590 1 0.431405 0.308288 0.504784
516 1 0.995092 0.0604302 0.561585
673 1 0.995614 0.117362 0.623887
544 1 0.868427 0.0648344 0.562265
575 1 0.933935 0.125747 0.560074
670 1 0.933231 0.0643969 0.625429
701 1 0.878627 0.13157 0.619698
102 1 0.193575 0.432934 0.999707
552 1 0.117526 0.180751 0.565197
579 1 0.0594201 0.252219 0.564802
584 1 0.122946 0.308461 0.55941
674 1 0.0561646 0.191799 0.632381
706 1 0.0630737 0.3165 0.624192
709 1 0.118152 0.243577 0.625321
548 1 0.995401 0.189678 0.570533
1939 1 0.571934 0.48665 0.941628
558 1 0.41873 0.184782 0.501295
1551 1 0.434916 0.496069 0.566739
678 1 0.184937 0.178635 0.620043
556 1 0.244423 0.178301 0.556535
583 1 0.180627 0.244545 0.562667
587 1 0.309306 0.241575 0.569152
588 1 0.249067 0.314236 0.564872
682 1 0.310904 0.1797 0.635495
710 1 0.182336 0.315833 0.616618
713 1 0.24617 0.25033 0.623385
714 1 0.309488 0.314203 0.622789
597 1 0.620437 0.247334 0.50112
78 1 0.438844 0.307532 0.996077
523 1 0.317618 0.00261417 0.562412
560 1 0.368433 0.179915 0.56983
591 1 0.432574 0.24729 0.566486
592 1 0.385416 0.321327 0.576436
686 1 0.438242 0.185379 0.62331
717 1 0.372335 0.257232 0.63601
718 1 0.439826 0.312139 0.635549
596 1 0.485641 0.316356 0.564585
789 1 0.629985 0.00705505 0.740836
589 1 0.367408 0.2522 0.501974
564 1 0.49151 0.188616 0.548636
721 1 0.499051 0.248695 0.625324
568 1 0.620972 0.193809 0.566599
595 1 0.563127 0.246439 0.566868
600 1 0.620452 0.317905 0.560042
690 1 0.553849 0.189245 0.619316
722 1 0.543962 0.317274 0.631323
725 1 0.618184 0.261781 0.62329
549 1 0.115613 0.119159 0.499778
1691 1 0.828755 0.501707 0.683091
578 1 0.0541539 0.316497 0.505623
46 1 0.436898 0.172751 0.987513
572 1 0.747747 0.17792 0.548397
599 1 0.694357 0.256602 0.562521
603 1 0.809002 0.24409 0.56289
604 1 0.755142 0.318717 0.566431
694 1 0.687169 0.183875 0.622526
698 1 0.817132 0.190522 0.624604
726 1 0.689541 0.308267 0.624803
729 1 0.7487 0.245104 0.619449
730 1 0.816087 0.315685 0.631417
97 1 0.00188806 0.369834 0.995072
605 1 0.872275 0.252869 0.496779
602 1 0.809398 0.311953 0.504586
705 1 0.993059 0.25725 0.626483
580 1 0.998467 0.31308 0.564426
576 1 0.879581 0.189076 0.559245
607 1 0.943016 0.25026 0.560466
608 1 0.874254 0.310278 0.563093
702 1 0.935642 0.184559 0.6307
733 1 0.876963 0.244126 0.624763
734 1 0.926958 0.311509 0.623562
1563 1 0.818482 0.49739 0.549145
1801 1 0.24886 0.498415 0.756852
554 1 0.315123 0.185091 0.502692
611 1 0.0571109 0.385069 0.569049
616 1 0.124659 0.444053 0.560875
738 1 0.0712745 0.441387 0.63199
741 1 0.132667 0.381432 0.621999
122 1 0.82207 0.43867 0.99202
1665 1 0.987441 0.496093 0.618838
615 1 0.185776 0.377842 0.554423
619 1 0.318452 0.377879 0.563638
620 1 0.25144 0.446113 0.559029
742 1 0.184753 0.447184 0.615145
745 1 0.248397 0.375241 0.620284
746 1 0.307497 0.440569 0.622943
30 1 0.93003 0.0596986 1.00029
903 1 0.191036 0.00597635 0.942456
613 1 0.11076 0.375913 0.505086
42 1 0.31647 0.184369 0.993057
577 1 0.996276 0.245172 0.500465
623 1 0.43018 0.387916 0.572468
624 1 0.367145 0.449408 0.570104
749 1 0.369686 0.382169 0.631941
750 1 0.432221 0.441091 0.631895
669 1 0.872975 0.00144212 0.616933
94 1 0.933651 0.308971 0.994316
1815 1 0.687873 0.501298 0.815291
34 1 0.0667826 0.184811 0.995736
628 1 0.503254 0.441291 0.569354
753 1 0.492418 0.384416 0.627831
627 1 0.552884 0.372235 0.553377
632 1 0.62689 0.42666 0.576364
754 1 0.565748 0.426948 0.637854
757 1 0.626858 0.367649 0.628403
50 1 0.561015 0.179968 1.00101
518 1 0.184426 0.0644955 0.504002
533 1 0.622743 0.000560708 0.511438
631 1 0.693294 0.371702 0.565348
635 1 0.817984 0.374549 0.559543
636 1 0.750454 0.439924 0.556398
758 1 0.69315 0.443583 0.627097
761 1 0.761408 0.385047 0.620453
762 1 0.820608 0.445938 0.618602
1539 1 0.0655218 0.497715 0.56799
1693 1 0.884606 0.496526 0.621404
105 1 0.25124 0.37801 0.997035
737 1 -0.00057837 0.366293 0.625668
612 1 0.988928 0.439466 0.553093
639 1 0.929367 0.373893 0.565341
640 1 0.873453 0.435026 0.558282
765 1 0.872595 0.384766 0.623974
766 1 0.931711 0.430021 0.620508
1813 1 0.623914 0.492649 0.741322
1024 1 0.880258 0.433346 0.937343
10 1 0.312523 0.0624103 0.994291
629 1 0.62847 0.381267 0.505089
1023 1 0.936953 0.376731 0.94997
648 1 0.113825 0.0545436 0.690208
675 1 0.0603347 0.121957 0.691358
770 1 0.0569529 0.0570899 0.750625
776 1 0.118291 0.0608467 0.81617
803 1 0.0517804 0.121157 0.822514
805 1 0.113885 0.127705 0.758637
652 1 0.244835 0.0571359 0.692317
679 1 0.178342 0.114133 0.693801
683 1 0.306287 0.119561 0.685358
774 1 0.184934 0.0576834 0.752516
778 1 0.322929 0.0631418 0.75279
780 1 0.257579 0.0580108 0.819117
807 1 0.186627 0.122531 0.8248
809 1 0.248147 0.118886 0.753704
811 1 0.314834 0.115056 0.812325
546 1 0.0606903 0.188196 0.503472
1022 1 0.938925 0.433225 0.878135
1021 1 0.86756 0.371045 0.885668
593 1 0.499718 0.252626 0.51099
1803 1 0.303707 0.497562 0.823013
118 1 0.69332 0.436452 1.00033
687 1 0.443612 0.122656 0.689811
782 1 0.429262 0.0526603 0.735975
784 1 0.377658 0.0531615 0.812375
813 1 0.373913 0.129444 0.738401
815 1 0.429873 0.118575 0.808833
660 1 0.50287 0.0723015 0.679757
98 1 0.0678466 0.433639 1.00268
517 1 0.122397 0.00696856 0.501706
817 1 0.496928 0.122495 0.749476
788 1 0.504204 0.0687492 0.814091
664 1 0.618958 0.0647374 0.684558
691 1 0.562694 0.123759 0.694457
786 1 0.561015 0.0647371 0.747644
792 1 0.620867 0.0518753 0.802119
819 1 0.559631 0.134735 0.803813
821 1 0.624002 0.124843 0.749845
1020 1 0.752436 0.44067 0.938309
1018 1 0.817299 0.434969 0.872877
668 1 0.751486 0.0573695 0.677129
695 1 0.698872 0.119674 0.675827
699 1 0.816246 0.122531 0.676618
790 1 0.688947 0.0690316 0.742328
794 1 0.821138 0.0600399 0.742503
796 1 0.757068 0.0546041 0.802652
823 1 0.694468 0.12461 0.800627
825 1 0.76873 0.11914 0.738842
827 1 0.812906 0.123411 0.811755
643 1 0.0501908 -5.92363e-05 0.69151
1019 1 0.806723 0.37407 0.937156
1017 1 0.747042 0.370264 0.875502
772 1 0.997006 0.0535071 0.806574
644 1 0.99044 0.0594366 0.68687
801 1 0.993002 0.126721 0.756536
672 1 0.875619 0.055494 0.682362
703 1 0.939192 0.124165 0.693334
798 1 0.936071 0.0737716 0.757909
800 1 0.868291 0.0635776 0.807685
829 1 0.875926 0.128239 0.751585
831 1 0.935429 0.12974 0.812774
797 1 0.874154 -0.000404513 0.742287
707 1 0.0556263 0.251774 0.696711
680 1 0.118708 0.180913 0.688949
712 1 0.119378 0.304266 0.692997
802 1 0.0522728 0.183499 0.750654
808 1 0.116978 0.186122 0.813042
834 1 0.0650235 0.307965 0.757849
835 1 0.0548158 0.250911 0.815633
837 1 0.12425 0.246501 0.755488
840 1 0.125705 0.310745 0.819122
804 1 0.991222 0.182067 0.816051
836 1 1.00023 0.307786 0.812498
684 1 0.242969 0.179123 0.689339
711 1 0.180529 0.242474 0.678691
715 1 0.296806 0.243836 0.680483
716 1 0.239636 0.315628 0.686475
806 1 0.184215 0.190923 0.747853
810 1 0.303037 0.186442 0.74637
812 1 0.249264 0.181989 0.806442
838 1 0.186832 0.308927 0.739618
839 1 0.188015 0.244417 0.806587
841 1 0.241576 0.255045 0.738396
842 1 0.30402 0.307238 0.74574
843 1 0.304737 0.2462 0.802932
844 1 0.24659 0.30372 0.801595
688 1 0.377882 0.192435 0.679763
719 1 0.42971 0.252844 0.697357
720 1 0.378546 0.323547 0.697075
814 1 0.433406 0.1907 0.754445
816 1 0.373991 0.184227 0.814978
845 1 0.36159 0.241251 0.742076
846 1 0.439457 0.313678 0.759209
847 1 0.428156 0.245794 0.816796
848 1 0.371921 0.309123 0.796938
852 1 0.497733 0.317274 0.81326
849 1 0.501191 0.252682 0.74863
820 1 0.507865 0.199137 0.805287
724 1 0.496163 0.316897 0.695156
692 1 0.506025 0.18433 0.688146
728 1 0.617866 0.318996 0.688022
696 1 0.619979 0.186953 0.672651
723 1 0.553482 0.250761 0.687949
818 1 0.567617 0.191631 0.747144
824 1 0.630337 0.190765 0.807133
850 1 0.559062 0.320363 0.75347
851 1 0.568675 0.248824 0.806972
853 1 0.631708 0.25038 0.735735
856 1 0.620022 0.308044 0.802511
700 1 0.759986 0.189419 0.69596
727 1 0.692694 0.247834 0.675482
731 1 0.810419 0.25353 0.689822
732 1 0.75463 0.309717 0.68561
822 1 0.68979 0.179495 0.744208
826 1 0.815414 0.183742 0.756285
828 1 0.751554 0.185565 0.814467
854 1 0.694726 0.300839 0.74541
855 1 0.686532 0.250381 0.814821
857 1 0.753074 0.241206 0.760806
858 1 0.809271 0.302237 0.756024
859 1 0.809098 0.250797 0.819608
860 1 0.748941 0.304101 0.818034
833 1 0.988967 0.246046 0.759601
708 1 -0.000648709 0.31016 0.703955
676 1 0.989317 0.191394 0.687461
704 1 0.870712 0.190609 0.685498
735 1 0.932599 0.248919 0.692461
736 1 0.872093 0.313203 0.685977
830 1 0.931533 0.186894 0.753077
832 1 0.87135 0.187078 0.817477
861 1 0.866211 0.247445 0.75095
862 1 0.924115 0.311442 0.753703
863 1 0.935829 0.254208 0.826497
864 1 0.874317 0.310591 0.825187
1015 1 0.698176 0.375684 0.934376
739 1 0.0648202 0.365561 0.695658
744 1 0.126109 0.437821 0.692199
866 1 0.0631873 0.445961 0.754806
867 1 0.0576614 0.375231 0.821462
869 1 0.128385 0.37367 0.747725
872 1 0.127149 0.438641 0.808126
740 1 0.00762817 0.442297 0.685412
45 1 0.37536 0.121871 0.988677
743 1 0.188738 0.385028 0.680488
747 1 0.308033 0.38307 0.694514
748 1 0.249818 0.440298 0.676815
870 1 0.191717 0.441336 0.753417
871 1 0.186077 0.365591 0.806973
873 1 0.247503 0.375908 0.750256
874 1 0.306751 0.436128 0.757529
875 1 0.304613 0.373249 0.808096
876 1 0.243436 0.439564 0.814886
121 1 0.749315 0.37218 0.994082
917 1 0.637129 0.00186735 0.871762
783 1 0.446339 0.00754966 0.808645
751 1 0.443511 0.375644 0.693761
752 1 0.373353 0.442778 0.706978
877 1 0.374673 0.3796 0.750305
878 1 0.441376 0.436791 0.757235
879 1 0.426521 0.376895 0.814979
880 1 0.369537 0.450914 0.819809
884 1 0.502073 0.440177 0.821472
756 1 0.505577 0.442892 0.686381
881 1 0.502094 0.382964 0.760509
755 1 0.554593 0.375637 0.693089
760 1 0.63257 0.426779 0.681343
882 1 0.556195 0.436149 0.745466
883 1 0.557147 0.371748 0.820254
885 1 0.632913 0.37856 0.75214
888 1 0.619287 0.43493 0.811332
759 1 0.696879 0.376751 0.691475
763 1 0.814532 0.365617 0.697428
764 1 0.760953 0.441024 0.701171
886 1 0.69019 0.433457 0.756655
887 1 0.684685 0.361231 0.80705
889 1 0.756194 0.368779 0.754083
890 1 0.816155 0.445431 0.757736
891 1 0.809353 0.382274 0.814087
892 1 0.746689 0.440346 0.822191
1014 1 0.682119 0.431244 0.871374
865 1 -0.00151426 0.378904 0.757896
868 1 0.00159753 0.443874 0.81449
767 1 0.937642 0.370173 0.69594
768 1 0.868468 0.426908 0.693294
893 1 0.868265 0.376459 0.759626
894 1 0.931537 0.433944 0.745147
895 1 0.936999 0.37364 0.810156
896 1 0.877553 0.436407 0.822302
925 1 0.873147 0.00157866 0.873838
898 1 0.0441245 0.067285 0.877138
904 1 0.119615 0.0603659 0.929353
931 1 0.0558632 0.124028 0.936689
933 1 0.113612 0.114486 0.875652
543 1 0.92614 0.00243669 0.562732
562 1 0.565983 0.183092 0.50808
902 1 0.183436 0.0508095 0.867198
906 1 0.313537 0.061398 0.878783
908 1 0.250061 0.0728919 0.936797
935 1 0.184442 0.121592 0.929019
937 1 0.257689 0.120666 0.867335
939 1 0.314672 0.116284 0.932108
1679 1 0.440455 0.50034 0.688669
514 1 0.0603306 0.0604205 0.503237
1945 1 0.747331 0.497516 0.878108
910 1 0.434963 0.0596289 0.870788
912 1 0.374922 0.0574321 0.934782
941 1 0.372887 0.12224 0.869312
943 1 0.434435 0.114608 0.930962
14 1 0.435882 0.0618483 0.992538
779 1 0.31124 0.000349178 0.812338
1557 1 0.624325 0.498373 0.509046
38 1 0.194198 0.180954 0.994616
531 1 0.547816 -0.000153135 0.566435
945 1 0.5055 0.123571 0.876586
916 1 0.502718 0.0627393 0.930874
914 1 0.568543 0.0633929 0.863703
920 1 0.63258 0.0643072 0.930952
947 1 0.570213 0.122656 0.928598
949 1 0.63264 0.118429 0.853587
1016 1 0.633409 0.42634 0.938382
918 1 0.694462 0.065798 0.87168
922 1 0.807459 0.0657772 0.869592
924 1 0.755491 0.0624861 0.941103
951 1 0.689895 0.120165 0.937106
953 1 0.751307 0.124852 0.871552
955 1 0.809578 0.126412 0.936329
633 1 0.744771 0.378775 0.499537
1005 1 0.380786 0.375211 0.88466
1013 1 0.627975 0.371356 0.869614
900 1 0.991491 0.0593802 0.93756
929 1 0.994028 0.128707 0.885548
785 1 0.498197 0.00951886 0.738618
926 1 0.934134 0.0690403 0.876179
928 1 0.856908 0.066904 0.93986
957 1 0.874849 0.125452 0.874807
959 1 0.933141 0.120112 0.942502
930 1 0.0577081 0.197398 0.877452
936 1 0.116042 0.180194 0.928881
963 1 0.0540585 0.245158 0.940799
965 1 0.125272 0.246244 0.883441
968 1 0.134066 0.309705 0.930987
962 1 0.066298 0.309335 0.874474
932 1 0.995966 0.18644 0.944953
630 1 0.682558 0.442322 0.501293
934 1 0.190878 0.192453 0.869788
938 1 0.312118 0.189564 0.871147
940 1 0.253238 0.179844 0.925541
966 1 0.191256 0.306509 0.871184
967 1 0.18959 0.236171 0.931716
969 1 0.254958 0.250541 0.871906
970 1 0.303994 0.310541 0.867256
971 1 0.311016 0.242291 0.944254
972 1 0.244239 0.313281 0.934956
1012 1 0.496427 0.433214 0.927664
897 1 0.986831 0.00667214 0.871907
18 1 0.565764 0.0663264 0.993265
519 1 0.190338 0.00535165 0.564955
973 1 0.365133 0.252218 0.87657
944 1 0.373352 0.179551 0.928091
942 1 0.44442 0.180798 0.872679
976 1 0.374159 0.311074 0.940807
975 1 0.432 0.242576 0.929726
974 1 0.426388 0.307556 0.864764
977 1 0.500321 0.24881 0.875372
948 1 0.505926 0.183328 0.936436
911 1 0.444172 0.00517051 0.929622
1007 1 0.436166 0.383614 0.940868
37 1 0.130416 0.129221 0.991501
980 1 0.493353 0.304659 0.928653
946 1 0.574055 0.189073 0.871532
984 1 0.630269 0.307443 0.950235
978 1 0.564141 0.306639 0.878085
981 1 0.628796 0.254657 0.868795
979 1 0.564383 0.244506 0.942346
952 1 0.636187 0.192021 0.930282
1010 1 0.564945 0.439115 0.873727
1667 1 0.0701592 0.498124 0.693048
987 1 0.811943 0.235417 0.942296
985 1 0.742316 0.242781 0.878576
950 1 0.688699 0.174694 0.872383
956 1 0.743815 0.179828 0.9373
988 1 0.75894 0.304991 0.94273
986 1 0.811981 0.314427 0.878015
982 1 0.689257 0.314335 0.875063
954 1 0.816133 0.178457 0.87669
983 1 0.694903 0.253975 0.944776
1001 1 0.242001 0.371996 0.871351
90 1 0.814218 0.31113 0.995905
1002 1 0.308414 0.423399 0.87319
601 1 0.753993 0.246355 0.499248
649 1 0.247924 -0.000308399 0.628309
964 1 0.00334504 0.318099 0.937463
960 1 0.881677 0.186489 0.934358
989 1 0.866473 0.248938 0.87831
990 1 0.930736 0.314976 0.89057
961 1 0.996002 0.25354 0.880072
992 1 0.86842 0.295631 0.938749
991 1 0.939569 0.246822 0.937195
958 1 0.931517 0.186353 0.876114
999 1 0.187941 0.3757 0.938903
1951 1 0.950878 0.495948 0.936368
998 1 0.187819 0.442672 0.872944
996 1 0.0088624 0.433209 0.932497
995 1 0.068402 0.375947 0.922987
997 1 0.134206 0.375372 0.861603
994 1 0.0748577 0.447505 0.885015
1000 1 0.130247 0.435772 0.940486
1011 1 0.567563 0.387504 0.941433
1009 1 0.485771 0.373655 0.877974
1008 1 0.379782 0.446366 0.944728
1006 1 0.43592 0.441111 0.863288
1003 1 0.31835 0.376435 0.940627
1004 1 0.249489 0.43662 0.934448
799 1 0.939283 0.00335348 0.801347
82 1 0.556013 0.314907 0.994799
1677 1 0.380245 0.501755 0.631277
795 1 0.815334 -3.384e-05 0.808232
74 1 0.313897 0.309797 0.997158
49 1 0.504302 0.120848 0.988645
1927 1 0.196792 0.503359 0.943409
1543 1 0.182396 0.502311 0.555831
582 1 0.192101 0.303435 0.500877
69 1 0.125911 0.23678 0.987644
651 1 0.309815 -0.00040935 0.69662
659 1 0.563911 0.000248238 0.683493
110 1 0.44057 0.451827 0.999982
101 1 0.119169 0.366648 0.998808
66 1 0.0674756 0.300394 0.995654
73 1 0.247952 0.257665 0.987939
606 1 0.943785 0.307159 0.50419
62 1 0.932253 0.18454 0.996779
525 1 0.374322 0.0122233 0.501617
566 1 0.680236 0.183741 0.501649
85 1 0.625778 0.241897 0.998402
77 1 0.380147 0.241822 0.993669
6 1 0.187402 0.063301 0.996112
5 1 0.121391 0.00920025 0.996098
17 1 0.486697 0.00259467 0.994905
565 1 0.618243 0.123341 0.504487
618 1 0.309394 0.437563 0.50075
53 1 0.623455 0.128579 0.996528
125 1 0.870885 0.364358 0.993431
1 1 0.996788 0.00648991 0.997064
529 1 0.49258 -0.00020734 0.503419
1032 1 0.142052 0.564631 0.0710253
1059 1 0.0624782 0.627405 0.0678674
1154 1 0.0777703 0.551974 0.12792
1189 1 0.135617 0.625038 0.130384
1185 1 0.0054275 0.627255 0.12531
1179 1 0.807393 0.502461 0.173732
1036 1 0.256067 0.554927 0.0557294
1063 1 0.193962 0.628147 0.0662972
1067 1 0.310687 0.634589 0.0718746
1158 1 0.201925 0.563145 0.125068
1162 1 0.304698 0.554521 0.117088
1193 1 0.251886 0.627222 0.131689
283 1 0.799015 0.997035 0.313004
1305 1 0.746333 0.505462 0.247897
1409 1 0.00529815 0.505601 0.367745
1649 1 0.49927 0.875833 0.493895
1303 1 0.682353 0.508023 0.312462
1026 1 0.0767728 0.563682 0.00937252
1421 1 0.373408 0.501669 0.379021
1040 1 0.368035 0.566529 0.0596128
1071 1 0.431155 0.616491 0.0671872
1166 1 0.441237 0.561903 0.132009
1197 1 0.372124 0.62159 0.127936
405 1 0.623791 0.992615 0.379961
1201 1 0.493944 0.623476 0.124782
281 1 0.743113 0.991057 0.243513
1044 1 0.488219 0.557916 0.0645556
1048 1 0.633557 0.558837 0.0588135
1075 1 0.557163 0.617582 0.0712744
1170 1 0.551598 0.558428 0.128441
1205 1 0.621038 0.623778 0.126209
271 1 0.434008 0.996864 0.309997
1078 1 0.685256 0.689323 -0.00214951
1203 1 0.563937 0.627204 0.188165
1047 1 0.697326 0.5042 0.064268
1073 1 0.494004 0.615989 0.00308877
1079 1 0.680345 0.622559 0.0492892
1052 1 0.75254 0.562707 0.0552931
1083 1 0.81611 0.625586 0.0593421
1174 1 0.691619 0.568475 0.116428
1178 1 0.819413 0.56223 0.11354
1209 1 0.761208 0.624154 0.120736
1137 1 0.511901 0.869225 0.000476706
1042 1 0.561652 0.572516 -0.00166775
1505 1 0.00335659 0.876534 0.368009
1129 1 0.257194 0.875178 0.0087104
1028 1 0.0095533 0.560518 0.0627493
1056 1 0.880047 0.573159 0.0523696
1087 1 0.933243 0.627034 0.067256
1182 1 0.946428 0.558791 0.119963
1213 1 0.876272 0.624834 0.13062
31 1 0.941864 0.984783 0.0561114
1175 1 0.693115 0.500918 0.184491
1065 1 0.246794 0.62671 0.00222067
1125 1 0.122366 0.875894 0.00242943
403 1 0.558447 0.995504 0.433348
1634 1 0.061111 0.948087 0.4871
1064 1 0.130847 0.689474 0.0608156
1091 1 0.0706429 0.754337 0.0663665
1096 1 0.129392 0.822084 0.0625236
1186 1 0.0731019 0.691792 0.117359
1218 1 0.0671951 0.820265 0.122208
1221 1 0.125847 0.761415 0.129815
1093 1 0.129534 0.753337 0.00915254
1538 1 0.0528006 0.552601 0.496271
1068 1 0.251834 0.698212 0.0745807
1095 1 0.193508 0.756465 0.0652815
1099 1 0.321203 0.747138 0.0512332
1100 1 0.259783 0.817614 0.0565703
1190 1 0.190065 0.69652 0.127109
1194 1 0.315249 0.682416 0.139171
1222 1 0.192355 0.81692 0.131336
1225 1 0.249945 0.759406 0.133328
1226 1 0.31636 0.817407 0.121192
513 1 1.00098 0.997614 0.49038
1598 1 0.948021 0.690867 0.491193
1629 1 0.885849 0.75174 0.48621
1072 1 0.375382 0.684404 0.0675691
1103 1 0.441118 0.739755 0.0684107
1104 1 0.387609 0.815007 0.0543299
1198 1 0.430937 0.681345 0.134452
1229 1 0.375313 0.752489 0.126169
1230 1 0.436693 0.808598 0.122005
1076 1 0.5113 0.683277 0.0633884
19 1 0.556007 0.999111 0.0698367
1233 1 0.504898 0.752855 0.119898
29 1 0.872971 0.992877 0.00708364
1041 1 0.497756 0.501895 0.00382555
1061 1 0.126913 0.6276 0.00734407
1108 1 0.504849 0.810253 0.0645411
1080 1 0.616449 0.685045 0.0616365
1107 1 0.565851 0.749917 0.0631325
1112 1 0.629009 0.812201 0.0613224
1202 1 0.564897 0.688899 0.125624
1234 1 0.567419 0.811189 0.126106
1237 1 0.626752 0.744922 0.128333
139 1 0.314321 1.00045 0.203431
407 1 0.678999 0.996256 0.440462
1177 1 0.751543 0.503629 0.120918
1084 1 0.751027 0.687156 0.0558953
1111 1 0.694573 0.750278 0.0566177
1115 1 0.815523 0.748129 0.063263
1116 1 0.758961 0.803223 0.0624831
1206 1 0.688129 0.680067 0.117185
1210 1 0.810676 0.686215 0.118943
1238 1 0.690529 0.814579 0.117354
1241 1 0.750532 0.75053 0.12181
1242 1 0.814099 0.813665 0.135864
1413 1 0.128293 0.503285 0.363724
1542 1 0.182162 0.564988 0.49542
1299 1 0.559741 0.505478 0.313644
1217 1 0.00871501 0.761979 0.121565
1060 1 0.999115 0.688382 0.0589984
1092 1 0.994776 0.816535 0.0649805
1088 1 0.875059 0.689337 0.0651387
1119 1 0.936844 0.751341 0.0671741
1120 1 0.873528 0.807041 0.0617875
1214 1 0.941637 0.683114 0.127408
1245 1 0.881099 0.746666 0.131109
1246 1 0.935988 0.813825 0.132076
1516 1 0.250316 0.945239 0.442319
1511 1 0.188142 0.882594 0.429089
1533 1 0.874467 0.876364 0.368749
1123 1 0.0642653 0.87612 0.0641983
1128 1 0.119835 0.939733 0.060244
1250 1 0.0596979 0.940737 0.131547
1253 1 0.126738 0.882014 0.122669
1124 1 0.00660952 0.93493 0.0609865
1249 1 0.00370518 0.878955 0.129255
1097 1 0.255059 0.751384 0.00563175
1110 1 0.69181 0.814673 0.00433819
1601 1 0.00273592 0.75781 0.491261
415 1 0.934364 0.988942 0.435402
1127 1 0.19022 0.881017 0.0609272
1131 1 0.313485 0.873871 0.0686081
1132 1 0.24689 0.939991 0.0716187
1254 1 0.190994 0.937491 0.126982
1257 1 0.251088 0.877459 0.127381
1258 1 0.313659 0.943683 0.115813
1633 1 0.996734 0.873515 0.500154
1534 1 0.931322 0.93069 0.378319
1514 1 0.31336 0.943344 0.385538
1135 1 0.439419 0.877206 0.0599912
1136 1 0.386234 0.938252 0.0637174
1261 1 0.380448 0.872022 0.121242
1262 1 0.443599 0.939454 0.128667
1140 1 0.503382 0.935706 0.0647758
397 1 0.377325 0.99907 0.374339
1535 1 0.931261 0.866504 0.430621
1536 1 0.87155 0.929996 0.442615
1265 1 0.504053 0.875383 0.128382
1139 1 0.567438 0.8746 0.0666947
1144 1 0.616627 0.940353 0.0629979
1266 1 0.557986 0.929475 0.122297
1269 1 0.624729 0.87204 0.131651
1508 1 0.996044 0.930583 0.430732
147 1 0.557494 0.994135 0.184024
1126 1 0.191563 0.94576 0.00554318
1289 1 0.249124 0.503193 0.25613
1143 1 0.67461 0.879818 0.069325
1147 1 0.814298 0.872915 0.0732205
1148 1 0.742764 0.931278 0.0700462
1270 1 0.676691 0.938139 0.123242
1273 1 0.746375 0.879664 0.128677
1274 1 0.809452 0.945851 0.136806
393 1 0.248287 0.993971 0.380502
1141 1 0.619632 0.87997 -0.00106732
1094 1 0.193874 0.813582 0.00363694
1509 1 0.126863 0.883809 0.371504
1297 1 0.499966 0.505592 0.251287
25 1 0.755641 0.992464 0.015439
1043 1 0.56723 0.504027 0.06642
1151 1 0.933699 0.880291 0.0652444
1152 1 0.869926 0.936168 0.0711224
1277 1 0.868995 0.879609 0.13238
1278 1 0.935183 0.940439 0.124242
1419 1 0.313822 0.505308 0.444114
1507 1 0.0618779 0.872047 0.435864
1160 1 0.14074 0.556122 0.198038
1187 1 0.0709308 0.621441 0.191973
1282 1 0.0586099 0.56293 0.246111
1288 1 0.117693 0.564904 0.307671
1315 1 0.0672605 0.62489 0.306282
1317 1 0.130875 0.627987 0.248009
1284 1 0.995419 0.565586 0.313497
1164 1 0.251377 0.553766 0.190179
1191 1 0.191927 0.62191 0.193184
1195 1 0.311883 0.612654 0.190306
1286 1 0.188421 0.561492 0.26297
1290 1 0.313956 0.54936 0.252438
1292 1 0.251314 0.554125 0.31734
1319 1 0.205411 0.633285 0.316198
1321 1 0.256187 0.618661 0.250319
1323 1 0.306609 0.620005 0.321033
1181 1 0.883115 0.506274 0.110137
1168 1 0.374721 0.557743 0.192014
1199 1 0.435498 0.621007 0.190809
1294 1 0.430228 0.558747 0.262624
1296 1 0.365205 0.562403 0.32043
1325 1 0.365451 0.620548 0.258964
1327 1 0.436384 0.620838 0.311852
1657 1 0.750348 0.863941 0.492676
1435 1 0.803586 0.506377 0.428963
1532 1 0.746396 0.933005 0.432098
1329 1 0.496421 0.6285 0.248782
1300 1 0.500653 0.558792 0.303215
1172 1 0.5042 0.566235 0.189136
1176 1 0.615263 0.556312 0.181874
1298 1 0.574122 0.56436 0.247659
1304 1 0.626326 0.565325 0.319309
1331 1 0.567668 0.62928 0.308285
1333 1 0.624949 0.632208 0.251267
259 1 0.0551979 0.996698 0.309369
1526 1 0.67747 0.938923 0.381779
1531 1 0.811783 0.873719 0.423889
1207 1 0.684228 0.625847 0.179066
1180 1 0.745571 0.56878 0.180094
1211 1 0.812992 0.620517 0.182691
1302 1 0.680113 0.562911 0.25706
1306 1 0.81179 0.553503 0.243133
1308 1 0.743143 0.556911 0.308426
1335 1 0.679218 0.629499 0.308111
1337 1 0.743981 0.619346 0.238062
1339 1 0.806818 0.623065 0.305462
1529 1 0.743947 0.876109 0.380053
1512 1 0.126799 0.948555 0.435683
1156 1 0.00610069 0.558672 0.180341
1313 1 0.993246 0.632379 0.248936
1184 1 0.870902 0.555527 0.185651
1215 1 0.935394 0.619336 0.184995
1310 1 0.929015 0.566422 0.244951
1312 1 0.877792 0.56177 0.301241
1341 1 0.868805 0.62396 0.242957
1343 1 0.935064 0.631167 0.309814
1192 1 0.121698 0.693353 0.184826
1219 1 0.0697026 0.754006 0.191466
1224 1 0.133318 0.831071 0.187952
1314 1 0.0693154 0.688841 0.250767
1320 1 0.137623 0.684013 0.308682
1346 1 0.0641328 0.810526 0.24604
1347 1 0.075807 0.749979 0.308715
1349 1 0.130187 0.758451 0.252805
1352 1 0.137011 0.818915 0.316688
1220 1 0.00525486 0.812819 0.18586
1345 1 0.0159319 0.747227 0.250539
1316 1 0.0112946 0.688693 0.31584
1188 1 0.0104933 0.697606 0.182169
1196 1 0.260798 0.69277 0.194544
1223 1 0.18878 0.753659 0.193305
1227 1 0.316293 0.759495 0.184314
1228 1 0.26174 0.818067 0.197043
1318 1 0.195934 0.692141 0.254482
1322 1 0.314374 0.69469 0.258181
1324 1 0.260628 0.702975 0.31942
1350 1 0.19107 0.815114 0.251266
1351 1 0.192676 0.750433 0.3091
1353 1 0.258946 0.757671 0.252374
1355 1 0.326057 0.758063 0.31659
1356 1 0.254001 0.814304 0.313123
1354 1 0.317115 0.821224 0.255744
1200 1 0.376797 0.686787 0.193751
1231 1 0.43915 0.747512 0.194494
1232 1 0.377957 0.807808 0.190159
1326 1 0.445126 0.690704 0.249064
1328 1 0.377693 0.694367 0.314988
1357 1 0.37767 0.758642 0.254395
1358 1 0.439092 0.81208 0.250002
1359 1 0.447814 0.746579 0.318054
1360 1 0.377196 0.826818 0.324077
1236 1 0.499612 0.815571 0.186533
1361 1 0.497456 0.754841 0.249038
1332 1 0.502607 0.683882 0.311039
1204 1 0.505019 0.683013 0.18928
1364 1 0.501318 0.817918 0.315131
1208 1 0.620316 0.692899 0.190046
1235 1 0.559529 0.756205 0.190494
1330 1 0.560609 0.69327 0.251458
1336 1 0.619023 0.690993 0.31346
1363 1 0.557353 0.765867 0.315858
1365 1 0.615553 0.751237 0.253433
1368 1 0.622608 0.822211 0.311955
1240 1 0.629518 0.807231 0.196882
1362 1 0.560141 0.816887 0.245742
1212 1 0.753132 0.687412 0.181006
1239 1 0.684992 0.745594 0.190469
1243 1 0.797241 0.755741 0.190604
1244 1 0.737754 0.810178 0.187659
1334 1 0.679792 0.688769 0.252573
1338 1 0.810997 0.684894 0.236617
1340 1 0.750692 0.685862 0.313213
1366 1 0.684486 0.821155 0.253374
1367 1 0.678987 0.752294 0.314271
1369 1 0.741025 0.754922 0.251487
1370 1 0.812403 0.815758 0.250417
1371 1 0.807115 0.754594 0.314835
1372 1 0.739 0.812274 0.311788
1348 1 -0.0005533 0.818543 0.306265
1216 1 0.87481 0.685943 0.184313
1247 1 0.9495 0.751865 0.191711
1248 1 0.879061 0.808614 0.184625
1342 1 0.929811 0.685651 0.247201
1373 1 0.868079 0.748519 0.2438
1374 1 0.932873 0.814753 0.242408
1375 1 0.941298 0.750545 0.293867
1376 1 0.870339 0.813334 0.309089
1344 1 0.8688 0.692982 0.305619
1251 1 0.0628911 0.872919 0.18631
1256 1 0.123714 0.937386 0.183578
1378 1 0.0600563 0.940736 0.245672
1379 1 0.0571404 0.881553 0.314627
1381 1 0.11808 0.869434 0.25664
1384 1 0.121835 0.94973 0.29822
1380 1 0.989983 0.93707 0.318727
1037 1 0.364839 0.504956 0.00470677
1570 1 0.0743476 0.698754 0.49429
1506 1 0.0628418 0.947092 0.376122
1427 1 0.564503 0.503744 0.441214
1255 1 0.192844 0.891013 0.195612
1260 1 0.254888 0.936001 0.199254
1382 1 0.184395 0.953601 0.24998
1383 1 0.194272 0.874958 0.306196
1385 1 0.257542 0.87888 0.255086
1386 1 0.318986 0.943215 0.261701
1387 1 0.308653 0.881234 0.32289
1388 1 0.248354 0.942484 0.309775
1259 1 0.325846 0.874205 0.187215
1527 1 0.682513 0.878743 0.437323
1263 1 0.434472 0.869067 0.180838
1264 1 0.374447 0.942209 0.185109
1389 1 0.380428 0.872098 0.251491
1390 1 0.440524 0.936713 0.245812
1391 1 0.435611 0.878588 0.310415
1392 1 0.376339 0.939058 0.31617
1393 1 0.497884 0.878028 0.245839
1396 1 0.493847 0.938057 0.315608
1530 1 0.808757 0.931992 0.369763
1268 1 0.501194 0.942646 0.184819
1267 1 0.558124 0.8841 0.194088
1272 1 0.618603 0.945142 0.189206
1394 1 0.557001 0.940706 0.252914
1395 1 0.557668 0.875501 0.313434
1397 1 0.62745 0.88727 0.25147
1400 1 0.622352 0.941148 0.320376
1271 1 0.684504 0.890287 0.190279
1275 1 0.812883 0.880113 0.187699
1276 1 0.748102 0.943257 0.181162
1398 1 0.679037 0.94834 0.250757
1399 1 0.677343 0.882749 0.3145
1401 1 0.752051 0.872164 0.246975
1402 1 0.812277 0.936438 0.255249
1403 1 0.816222 0.870421 0.312372
1404 1 0.745571 0.935589 0.314214
537 1 0.738172 0.997999 0.493782
1521 1 0.497194 0.877062 0.379644
129 1 0.988496 0.999008 0.124611
1252 1 0.996712 0.937402 0.185635
1377 1 0.992743 0.872723 0.24699
1279 1 0.928372 0.885651 0.187599
1280 1 0.861954 0.944158 0.195566
1405 1 0.869781 0.874384 0.246736
1406 1 0.92757 0.937798 0.256133
1407 1 0.93106 0.866937 0.303607
1408 1 0.878086 0.940659 0.319282
1423 1 0.439471 0.510393 0.43971
1410 1 0.0593114 0.571991 0.371723
1416 1 0.118284 0.56237 0.421069
1443 1 0.068024 0.62687 0.440063
1445 1 0.128818 0.629948 0.375934
1441 1 1.00004 0.632385 0.380039
1412 1 0.994161 0.565529 0.42305
1641 1 0.239064 0.885664 0.498748
1562 1 0.806014 0.564465 0.492997
1449 1 0.254187 0.622693 0.381187
1451 1 0.31666 0.637023 0.438726
1420 1 0.242775 0.555979 0.434123
1447 1 0.189321 0.621182 0.438081
1414 1 0.18702 0.566561 0.36365
1418 1 0.305456 0.558846 0.375816
1429 1 0.626771 0.502735 0.384475
1025 1 0.0116481 0.501746 0.0025856
1549 1 0.381136 0.504682 0.498944
1424 1 0.380127 0.566174 0.440325
1455 1 0.440051 0.628761 0.436842
1422 1 0.442782 0.555479 0.3735
1453 1 0.380697 0.631497 0.368801
1513 1 0.249 0.886853 0.377636
1428 1 0.499684 0.562704 0.439295
1457 1 0.497714 0.617553 0.365428
1461 1 0.614893 0.631821 0.381707
1426 1 0.563828 0.564632 0.371351
1432 1 0.627591 0.558517 0.45139
1459 1 0.560439 0.624704 0.44147
1606 1 0.18898 0.824763 0.495067
1510 1 0.186102 0.942335 0.374893
1430 1 0.682827 0.566985 0.376889
1463 1 0.68495 0.626314 0.43638
1436 1 0.750723 0.571205 0.430767
1434 1 0.812551 0.560272 0.369794
1465 1 0.741005 0.624165 0.374791
1467 1 0.812981 0.621896 0.424295
1525 1 0.619793 0.885257 0.37655
1519 1 0.4348 0.876507 0.437631
1528 1 0.620592 0.941948 0.453595
409 1 0.743151 1.00149 0.370157
1618 1 0.562948 0.805654 0.496955
1522 1 0.560084 0.933056 0.376313
1523 1 0.567135 0.878698 0.444339
1307 1 0.811691 0.499987 0.309272
1440 1 0.872636 0.560725 0.427356
1469 1 0.875205 0.62256 0.374743
1471 1 0.933586 0.629289 0.435385
1438 1 0.933898 0.559478 0.368707
1517 1 0.379843 0.883868 0.380607
1520 1 0.376357 0.939073 0.441256
275 1 0.558743 0.993523 0.321572
1448 1 0.133901 0.692978 0.428993
1480 1 0.133245 0.821985 0.438589
1475 1 0.0612961 0.767712 0.433284
1477 1 0.132258 0.747525 0.374197
1474 1 0.0750615 0.815547 0.373241
1442 1 0.0715881 0.696896 0.368128
1473 1 0.991506 0.754386 0.363594
1444 1 0.00467242 0.696828 0.434828
1476 1 0.00170583 0.819331 0.426739
1605 1 0.138957 0.754198 0.501922
23 1 0.684883 0.991911 0.064074
1581 1 0.377854 0.634776 0.496925
1446 1 0.194914 0.69013 0.36991
1482 1 0.310289 0.824739 0.381623
1479 1 0.193751 0.767323 0.439378
1481 1 0.254608 0.764286 0.375706
1483 1 0.316656 0.764359 0.442911
1450 1 0.321046 0.702708 0.386548
1484 1 0.247241 0.822785 0.436643
1478 1 0.189305 0.817295 0.371691
1452 1 0.249308 0.692026 0.429844
1488 1 0.377653 0.819791 0.446423
1487 1 0.448241 0.750344 0.434679
1486 1 0.443642 0.812811 0.381393
1454 1 0.443051 0.685575 0.368993
1485 1 0.378348 0.760019 0.377843
1456 1 0.380934 0.694083 0.441925
1492 1 0.502743 0.817485 0.434907
1524 1 0.497238 0.940085 0.438853
1518 1 0.440551 0.938182 0.376079
1515 1 0.31129 0.880995 0.446396
1460 1 0.503018 0.690783 0.434673
1489 1 0.504323 0.753478 0.377778
1493 1 0.619303 0.742957 0.383997
1496 1 0.627051 0.810891 0.435574
1458 1 0.55353 0.681835 0.376568
1464 1 0.626313 0.678907 0.445789
1491 1 0.565907 0.740074 0.435334
1490 1 0.566483 0.816036 0.375889
7 1 0.18233 0.99759 0.0676705
1661 1 0.871754 0.862182 0.498321
1468 1 0.757689 0.684557 0.433471
1494 1 0.679239 0.820086 0.374616
1462 1 0.678631 0.674814 0.374935
1500 1 0.74369 0.809473 0.434904
1497 1 0.735416 0.740749 0.377039
1495 1 0.684982 0.745403 0.438868
1466 1 0.812583 0.690302 0.362455
1499 1 0.80883 0.752418 0.434614
1498 1 0.800264 0.80984 0.375012
411 1 0.808924 0.991424 0.423366
541 1 0.867032 0.991984 0.499818
1470 1 0.938731 0.692411 0.379239
1472 1 0.87579 0.683671 0.441061
1502 1 0.934357 0.809788 0.368779
1501 1 0.880973 0.752683 0.374781
1504 1 0.866044 0.81445 0.427121
1503 1 0.941631 0.763038 0.432196
1565 1 0.870931 0.504616 0.485239
133 1 0.123651 0.995241 0.118709
265 1 0.25275 1.00158 0.257103
1566 1 0.941298 0.563869 0.487827
157 1 0.878357 0.996631 0.126229
1569 1 0.00851625 0.628704 0.497286
1613 1 0.386201 0.759037 0.500035
1411 1 0.0623519 0.50033 0.438144
1105 1 0.495534 0.7595 0.00153148
1069 1 0.366311 0.633344 0.0040036
1597 1 0.868569 0.620788 0.487702
1637 1 0.119868 0.87481 0.495638
1062 1 0.195628 0.700126 0.00328282
1574 1 0.197986 0.688434 0.49538
1662 1 0.933578 0.926204 0.49944
279 1 0.682251 0.998497 0.316257
1038 1 0.431032 0.565226 0.00351547
1609 1 0.260612 0.75008 0.49775
1573 1 0.121455 0.619351 0.496377
1090 1 0.0632996 0.811195 0.00562457
1030 1 0.188826 0.560964 0.00450386
1582 1 0.443137 0.691296 0.492981
1578 1 0.318504 0.700449 0.494459
1145 1 0.753989 0.871347 0.0115636
1122 1 0.0658472 0.946656 0.00477397
1577 1 0.250759 0.61543 0.496173
1149 1 0.87181 0.870867 0.00216349
1585 1 0.494378 0.620864 0.495195
1054 1 0.942117 0.569167 0.00108259
1642 1 0.321499 0.950352 0.500069
1561 1 0.755172 0.50551 0.496768
1077 1 0.612816 0.636218 0.0039648
1121 1 0.00901296 0.878398 0.0045925
1638 1 0.186984 0.946891 0.492806
1593 1 0.742015 0.62838 0.496865
1053 1 0.878629 0.504322 0.0106853
1070 1 0.434454 0.68477 0.00729365
1544 1 0.117365 0.564763 0.565718
1571 1 0.0607538 0.633836 0.554028
1666 1 0.0597668 0.558357 0.629197
1701 1 0.120495 0.625673 0.622309
1130 1 0.32138 0.943226 0.999682
1617 1 0.496114 0.753633 0.502425
539 1 0.799901 0.996553 0.555692
1805 1 0.376227 0.502108 0.759666
913 1 0.506586 0.997614 0.869351
1548 1 0.250445 0.564979 0.565709
1575 1 0.186502 0.62328 0.559828
1579 1 0.311595 0.636842 0.55328
1670 1 0.192498 0.569467 0.621088
1674 1 0.313364 0.577424 0.62313
1705 1 0.247178 0.627223 0.62322
535 1 0.689155 0.996455 0.566826
907 1 0.311634 1.00226 0.944072
1923 1 0.0690236 0.499849 0.945074
1552 1 0.378428 0.564364 0.553131
1583 1 0.438182 0.621202 0.559072
1678 1 0.433646 0.570047 0.625142
1709 1 0.37888 0.632759 0.628254
1713 1 0.508183 0.624206 0.62318
1933 1 0.375368 0.501188 0.880235
1556 1 0.499519 0.554072 0.561835
1560 1 0.630459 0.558607 0.57259
1587 1 0.566965 0.624874 0.562875
1682 1 0.563537 0.569537 0.628347
1717 1 0.624199 0.625627 0.629433
1689 1 0.754114 0.501779 0.631883
21 1 0.625541 0.991343 0.991901
1564 1 0.750574 0.5668 0.558165
1591 1 0.680833 0.621079 0.566592
1595 1 0.814289 0.628445 0.566971
1686 1 0.690163 0.558333 0.633624
1690 1 0.823725 0.569432 0.620796
1721 1 0.754888 0.612871 0.623594
1101 1 0.387087 0.755862 0.995543
1697 1 0.0118198 0.636868 0.624015
1540 1 0.00627776 0.570445 0.565105
1568 1 0.873636 0.564848 0.549628
1599 1 0.939378 0.623467 0.549946
1694 1 0.937267 0.571465 0.616451
1725 1 0.877353 0.630826 0.625026
781 1 0.373856 0.991 0.744044
775 1 0.188862 0.989035 0.810575
1576 1 0.132023 0.685893 0.550073
1603 1 0.0687134 0.757258 0.565444
1608 1 0.130649 0.814114 0.557001
1698 1 0.0711522 0.69268 0.623886
1730 1 0.0519441 0.819188 0.627644
1733 1 0.121088 0.757523 0.627438
1572 1 0.00168042 0.692501 0.558912
1729 1 0.996307 0.750608 0.612647
1604 1 0.00599888 0.821816 0.558075
1081 1 0.749456 0.625565 0.997817
1580 1 0.251296 0.691574 0.567165
1607 1 0.190521 0.760783 0.566189
1611 1 0.323002 0.748214 0.568272
1612 1 0.262175 0.806938 0.567916
1702 1 0.172964 0.688057 0.621801
1706 1 0.316668 0.683378 0.623007
1734 1 0.182446 0.810353 0.629224
1737 1 0.257619 0.755103 0.623948
1738 1 0.322303 0.806984 0.631251
1797 1 0.134043 0.505357 0.750265
655 1 0.441471 0.991448 0.679381
1683 1 0.569179 0.50262 0.678015
1584 1 0.387667 0.691597 0.562635
1615 1 0.443266 0.751899 0.558462
1616 1 0.374025 0.80687 0.561613
1710 1 0.438219 0.6827 0.632708
1741 1 0.394257 0.750998 0.623695
1742 1 0.441575 0.814699 0.61768
1588 1 0.499889 0.677936 0.555572
1745 1 0.511524 0.750939 0.635332
1620 1 0.503538 0.81649 0.554532
1714 1 0.558908 0.686681 0.63286
1592 1 0.619138 0.684372 0.576777
1619 1 0.553711 0.74765 0.568193
1624 1 0.629339 0.794853 0.564635
1746 1 0.571071 0.810769 0.625586
1749 1 0.619921 0.742508 0.635034
1795 1 0.0683146 0.505729 0.817566
1807 1 0.439782 0.508319 0.814185
1596 1 0.757036 0.678029 0.568162
1623 1 0.693311 0.741693 0.557393
1627 1 0.809707 0.7428 0.569344
1628 1 0.755124 0.801685 0.565053
1718 1 0.696087 0.67602 0.631694
1722 1 0.816068 0.687809 0.640747
1750 1 0.682338 0.809284 0.631691
1753 1 0.749353 0.742377 0.625977
1754 1 0.816617 0.8121 0.62337
663 1 0.684404 0.999124 0.678895
1545 1 0.249551 0.511721 0.503363
527 1 0.434748 0.996979 0.564599
1600 1 0.881331 0.684107 0.552783
1631 1 0.950726 0.747995 0.555816
1632 1 0.880404 0.792364 0.559268
1726 1 0.939089 0.687074 0.623821
1757 1 0.872433 0.747601 0.622803
1758 1 0.938671 0.810645 0.619243
909 1 0.378415 0.997517 0.879066
1118 1 0.947893 0.817147 0.99525
1635 1 0.0653635 0.88909 0.556566
1640 1 0.136223 0.947179 0.562546
1762 1 0.0608262 0.938907 0.624605
1765 1 0.126421 0.875152 0.622644
1761 1 0.997989 0.888772 0.624834
769 1 0.995274 0.994003 0.748859
1636 1 -0.000267055 0.936609 0.549225
647 1 0.183241 0.997666 0.687567
1695 1 0.944397 0.507494 0.692277
1639 1 0.19452 0.864337 0.569298
1643 1 0.311422 0.880653 0.55632
1644 1 0.25445 0.940466 0.563955
1766 1 0.191783 0.937122 0.619647
1769 1 0.255188 0.873726 0.628571
1770 1 0.313112 0.941958 0.636446
1929 1 0.263082 0.509152 0.888192
1089 1 0.00369592 0.748924 0.998552
1809 1 0.50115 0.501441 0.753304
1681 1 0.493828 0.508929 0.629726
1645 1 0.376117 0.885118 0.500398
1610 1 0.313287 0.819879 0.4973
1647 1 0.436982 0.878162 0.56583
1648 1 0.369089 0.941795 0.565211
1773 1 0.381016 0.869285 0.624958
1774 1 0.434961 0.933218 0.618828
1777 1 0.514558 0.871149 0.62473
1559 1 0.691308 0.502684 0.571832
1558 1 0.68689 0.558203 0.508424
1652 1 0.500552 0.927047 0.558365
1651 1 0.566963 0.870494 0.55678
1656 1 0.622287 0.9381 0.562955
1778 1 0.563261 0.932702 0.632551
1781 1 0.623715 0.874761 0.615306
2020 1 0.998136 0.93249 0.946167
2045 1 0.882115 0.879523 0.872993
1811 1 0.559864 0.504546 0.814102
1935 1 0.439356 0.5019 0.943121
777 1 0.251226 0.998578 0.756157
1655 1 0.690603 0.867184 0.562427
1659 1 0.804479 0.877097 0.562847
1660 1 0.742301 0.937599 0.565498
1782 1 0.68449 0.933446 0.630558
1785 1 0.745661 0.880867 0.622871
1786 1 0.81261 0.935945 0.622634
641 1 0.996263 0.988629 0.623421
1675 1 0.305825 0.500687 0.687884
1622 1 0.687315 0.808796 0.504991
1547 1 0.314804 0.511978 0.561866
773 1 0.123552 0.99774 0.75741
1663 1 0.925659 0.865324 0.560355
1664 1 0.866063 0.932211 0.557691
1789 1 0.874891 0.871986 0.625055
1790 1 0.934217 0.942473 0.623927
515 1 0.064742 0.990809 0.559189
899 1 0.0617481 0.992478 0.934584
1672 1 0.121874 0.559817 0.687698
1699 1 0.0667808 0.622855 0.686911
1794 1 0.0690116 0.561527 0.752866
1800 1 0.140158 0.562941 0.809025
1827 1 0.061283 0.627682 0.815833
1829 1 0.134213 0.617116 0.751023
1796 1 0.00625448 0.562965 0.807712
1825 1 0.0101491 0.624636 0.746183
1685 1 0.631375 0.50361 0.628973
1085 1 0.881438 0.630104 0.99375
1626 1 0.815982 0.811408 0.50177
1676 1 0.249957 0.572252 0.685231
1703 1 0.179877 0.630854 0.680127
1707 1 0.317997 0.631179 0.68848
1798 1 0.19698 0.56269 0.751547
1802 1 0.311625 0.573671 0.749752
1804 1 0.252964 0.564644 0.811784
1831 1 0.19469 0.631645 0.815855
1833 1 0.250069 0.632251 0.746887
1835 1 0.305571 0.629602 0.813815
2046 1 0.948228 0.937833 0.883591
905 1 0.250821 0.998733 0.87559
1680 1 0.372993 0.570283 0.688082
1711 1 0.444679 0.632788 0.697591
1806 1 0.440907 0.562008 0.750496
1808 1 0.373348 0.558692 0.818765
1837 1 0.381054 0.619783 0.750858
1839 1 0.435591 0.610416 0.81796
1812 1 0.497434 0.562089 0.825834
1106 1 0.567864 0.80969 0.995995
1841 1 0.504225 0.626003 0.762797
1684 1 0.502298 0.564194 0.69498
1653 1 0.626426 0.865011 0.502816
2047 1 0.933907 0.867862 0.930106
1688 1 0.628062 0.562432 0.69307
1715 1 0.558754 0.621653 0.70007
1810 1 0.562849 0.55668 0.756428
1816 1 0.635113 0.559295 0.803676
1843 1 0.564342 0.628641 0.824801
1845 1 0.613806 0.620028 0.759426
1049 1 0.751646 0.505848 0.995653
919 1 0.695794 0.997038 0.928604
793 1 0.751731 0.996782 0.743968
2048 1 0.87495 0.931921 0.936205
671 1 0.931629 0.996281 0.678543
1567 1 0.94008 0.516096 0.555311
1692 1 0.751975 0.563153 0.692285
1719 1 0.691287 0.619604 0.693965
1723 1 0.816542 0.619171 0.686149
1814 1 0.690153 0.554288 0.749897
1818 1 0.825053 0.564276 0.753932
1820 1 0.750337 0.557809 0.812269
1847 1 0.687534 0.622537 0.814955
1849 1 0.749028 0.615753 0.760289
1851 1 0.8146 0.620403 0.814987
1821 1 0.887921 0.509149 0.750126
1819 1 0.815611 0.502261 0.816156
1589 1 0.62783 0.629119 0.510549
1668 1 0.0107017 0.555185 0.684445
1696 1 0.887746 0.563863 0.684545
1727 1 0.94879 0.61122 0.68669
1822 1 0.940444 0.568559 0.758427
1824 1 0.879448 0.555082 0.817437
1853 1 0.884133 0.629858 0.755323
1855 1 0.942432 0.619894 0.820906
915 1 0.566458 0.999358 0.927098
1704 1 0.122797 0.6879 0.691249
1731 1 0.074062 0.749478 0.689337
1736 1 0.121369 0.819747 0.688963
1826 1 0.0635954 0.689118 0.747693
1832 1 0.120184 0.685088 0.810485
1858 1 0.062885 0.823623 0.745657
1859 1 0.061771 0.745433 0.816458
1861 1 0.124778 0.750743 0.750243
1864 1 0.126735 0.80437 0.823049
1857 1 0.00405109 0.756865 0.750765
1860 1 0.00148547 0.814645 0.810947
1700 1 0.00816868 0.689248 0.681307
1828 1 0.00208553 0.680257 0.809997
1862 1 0.188862 0.811479 0.760874
1740 1 0.254494 0.810147 0.68371
1708 1 0.257914 0.686626 0.68798
1735 1 0.186898 0.744438 0.687639
1739 1 0.313969 0.745826 0.692223
1830 1 0.195133 0.687243 0.744946
1834 1 0.306065 0.690951 0.758188
1836 1 0.250797 0.6954 0.818834
1863 1 0.186381 0.749762 0.813929
1865 1 0.25064 0.749033 0.75359
1866 1 0.311147 0.812042 0.739948
1867 1 0.31032 0.759955 0.813228
1868 1 0.245629 0.810629 0.811604
1744 1 0.386314 0.807617 0.685071
1712 1 0.380762 0.683505 0.699427
1743 1 0.441744 0.744572 0.702562
1838 1 0.441684 0.683537 0.76241
1840 1 0.370409 0.677397 0.811893
1869 1 0.373771 0.748301 0.749014
1870 1 0.436096 0.809647 0.75214
1871 1 0.447137 0.748919 0.817038
1872 1 0.374142 0.805161 0.813725
1716 1 0.50669 0.692026 0.695153
1876 1 0.511725 0.819109 0.823581
1748 1 0.513759 0.805389 0.69302
1844 1 0.498893 0.681377 0.820774
1873 1 0.504174 0.741272 0.764734
1720 1 0.619622 0.673743 0.691711
1747 1 0.569593 0.747808 0.705463
1752 1 0.618882 0.810699 0.698061
1842 1 0.563426 0.678592 0.758533
1848 1 0.62656 0.684678 0.813093
1874 1 0.564434 0.80994 0.75956
1875 1 0.569997 0.749692 0.818437
1877 1 0.636649 0.763562 0.755152
1880 1 0.628005 0.811714 0.817097
1724 1 0.752396 0.687659 0.692061
1751 1 0.684217 0.744751 0.690068
1755 1 0.813776 0.751287 0.683874
1756 1 0.745296 0.807072 0.677898
1846 1 0.67832 0.677916 0.7462
1850 1 0.81032 0.687261 0.753571
1852 1 0.748547 0.687826 0.807388
1878 1 0.694836 0.814721 0.746095
1879 1 0.687761 0.747451 0.815634
1881 1 0.749807 0.756097 0.740857
1882 1 0.816491 0.807044 0.757252
1883 1 0.81688 0.749219 0.811863
1884 1 0.752044 0.802506 0.805571
1732 1 0.00010023 0.806894 0.684898
1728 1 0.882227 0.685092 0.689245
1759 1 0.943321 0.750846 0.686643
1760 1 0.872113 0.804165 0.683458
1854 1 0.944831 0.687658 0.749951
1856 1 0.870787 0.678165 0.818789
1885 1 0.876372 0.744395 0.74326
1886 1 0.935673 0.813766 0.738774
1887 1 0.925094 0.751116 0.813933
1888 1 0.88972 0.824667 0.808924
1931 1 0.319767 0.503496 0.942309
1763 1 0.062022 0.87746 0.687691
1768 1 0.123538 0.932397 0.682748
1890 1 0.0570582 0.942902 0.747027
1891 1 0.0688268 0.871466 0.814956
1893 1 0.119682 0.877443 0.751067
1896 1 0.124039 0.939106 0.818814
1764 1 0.00560643 0.934365 0.689511
1889 1 0.997118 0.874484 0.755217
1892 1 0.00111534 0.934206 0.825172
1621 1 0.626607 0.735 0.499691
1947 1 0.817801 0.501628 0.939735
1767 1 0.194151 0.8673 0.696387
1771 1 0.309462 0.869246 0.688332
1772 1 0.246194 0.941583 0.693754
1894 1 0.179487 0.933368 0.748346
1895 1 0.180728 0.874747 0.811992
1897 1 0.247512 0.877582 0.750926
1898 1 0.312819 0.93475 0.748895
1899 1 0.310558 0.859064 0.812765
1900 1 0.263049 0.935468 0.819357
1775 1 0.442387 0.864554 0.681366
1776 1 0.371717 0.927681 0.688317
1901 1 0.375258 0.86948 0.748245
1902 1 0.453841 0.939449 0.747077
1903 1 0.44286 0.872322 0.814869
1904 1 0.396004 0.938088 0.801017
1905 1 0.495902 0.870015 0.75055
1908 1 0.503009 0.930584 0.812212
1780 1 0.49893 0.931095 0.679878
1541 1 0.114759 0.505733 0.504011
667 1 0.812751 0.997456 0.679115
1779 1 0.563722 0.881262 0.701796
1784 1 0.630153 0.93456 0.697679
1906 1 0.562976 0.944517 0.74669
1907 1 0.572023 0.882226 0.807575
1909 1 0.634571 0.865293 0.759034
1912 1 0.630294 0.938946 0.801772
787 1 0.555774 0.998952 0.799764
1911 1 0.696211 0.876378 0.814524
1783 1 0.68581 0.871497 0.688227
1787 1 0.808486 0.871672 0.679823
1788 1 0.749974 0.934036 0.687528
1910 1 0.692822 0.933372 0.747077
1913 1 0.755255 0.869399 0.750461
1914 1 0.813756 0.931836 0.746483
1915 1 0.821191 0.878079 0.807068
1916 1 0.750668 0.939549 0.808996
901 1 0.130932 0.989872 0.877467
665 1 0.751058 0.999222 0.61864
1925 1 0.129993 0.504924 0.887049
1791 1 0.934801 0.875427 0.687479
1792 1 0.868136 0.938344 0.681629
1917 1 0.875584 0.872701 0.740542
1918 1 0.932977 0.939959 0.74246
1919 1 0.943527 0.881191 0.816534
1920 1 0.871046 0.943096 0.814192
1590 1 0.690605 0.680965 0.507121
1922 1 0.0723241 0.561783 0.880817
1928 1 0.129359 0.568738 0.947931
1955 1 0.0750683 0.630869 0.936864
1957 1 0.136676 0.627999 0.875619
1150 1 0.930482 0.922361 0.995052
771 1 0.0585822 0.996878 0.828185
1114 1 0.819189 0.811662 1.00025
645 1 0.12744 0.999782 0.620905
1930 1 0.311405 0.571676 0.872204
1926 1 0.192516 0.561388 0.885214
1963 1 0.318251 0.62244 0.937971
1961 1 0.24674 0.623457 0.878098
1932 1 0.256205 0.570637 0.941882
1959 1 0.185862 0.623127 0.946872
2034 1 0.563049 0.932594 0.870842
1066 1 0.303285 0.679528 1.001
1965 1 0.374702 0.625733 0.873942
1934 1 0.437674 0.564139 0.883237
1936 1 0.375494 0.565033 0.941494
1967 1 0.4415 0.622969 0.947298
1969 1 0.500462 0.62869 0.880905
1082 1 0.813069 0.684871 0.995164
13 1 0.380195 0.99222 0.991026
2035 1 0.571382 0.876822 0.938606
1102 1 0.439052 0.817036 0.990257
2039 1 0.676799 0.876878 0.945154
1940 1 0.505192 0.567618 0.935929
1938 1 0.562719 0.558001 0.882562
1944 1 0.623026 0.559916 0.940914
1971 1 0.566483 0.626019 0.936229
1973 1 0.623499 0.609693 0.870559
1646 1 0.437173 0.939254 0.500203
657 1 0.49544 0.997812 0.619007
791 1 0.689555 0.998722 0.803598
2042 1 0.815815 0.932565 0.870591
1942 1 0.686436 0.559792 0.876538
1946 1 0.822213 0.561197 0.871365
1977 1 0.753564 0.622699 0.870634
1979 1 0.810395 0.617131 0.92901
1948 1 0.746853 0.566083 0.931023
1975 1 0.691023 0.62692 0.931906
1949 1 0.879105 0.50011 0.873942
1654 1 0.682899 0.93963 0.504364
1650 1 0.557599 0.946045 0.505276
1602 1 0.0647622 0.813027 0.506327
1924 1 0.013649 0.559237 0.935838
1953 1 0.00172223 0.614633 0.879386
1950 1 0.9387 0.560356 0.886513
1981 1 0.881592 0.623036 0.88251
1983 1 0.947504 0.626062 0.937341
1952 1 0.873729 0.564508 0.94248
1554 1 0.569628 0.559775 0.51168
923 1 0.808389 0.995236 0.940876
1960 1 0.131131 0.692544 0.946432
1989 1 0.132578 0.746047 0.878663
1954 1 0.0649695 0.686575 0.881688
1987 1 0.0707553 0.752804 0.934198
1992 1 0.125049 0.812251 0.947387
1986 1 0.0616538 0.81468 0.876346
1793 1 0.00219982 0.506279 0.751255
2040 1 0.625047 0.94004 0.929726
1995 1 0.320599 0.745274 0.94704
1958 1 0.19198 0.686922 0.876126
1994 1 0.314868 0.813245 0.881401
1991 1 0.193126 0.745229 0.942419
1990 1 0.190613 0.822843 0.879456
1993 1 0.258105 0.75253 0.88051
1964 1 0.255661 0.69013 0.935411
1962 1 0.311469 0.686226 0.874982
1996 1 0.248487 0.809279 0.940541
1998 1 0.431709 0.82221 0.876852
1966 1 0.435307 0.681628 0.860965
1968 1 0.37917 0.682212 0.939992
1999 1 0.43292 0.753513 0.917017
2000 1 0.379943 0.818181 0.936075
1997 1 0.377573 0.739194 0.86489
2004 1 0.504009 0.814045 0.929932
1972 1 0.496554 0.696921 0.943134
2001 1 0.505298 0.746784 0.876117
1976 1 0.635433 0.683804 0.936888
2005 1 0.631328 0.755073 0.879038
2003 1 0.560293 0.750657 0.935921
2008 1 0.631827 0.809978 0.937044
2002 1 0.568501 0.823113 0.880059
1970 1 0.570699 0.687531 0.883037
1625 1 0.751407 0.745274 0.499958
2007 1 0.687706 0.748828 0.934752
1974 1 0.693388 0.686529 0.875273
2009 1 0.753608 0.747305 0.878777
1978 1 0.813551 0.685856 0.870691
2006 1 0.690871 0.812692 0.871241
2011 1 0.80726 0.747819 0.932827
1980 1 0.747107 0.680304 0.934923
2010 1 0.808082 0.807058 0.869307
2012 1 0.749731 0.812109 0.936475
2038 1 0.693132 0.932595 0.87831
2016 1 0.873844 0.809443 0.937322
1982 1 0.942294 0.687019 0.872982
1984 1 0.870032 0.687954 0.932589
2015 1 0.938703 0.757411 0.939319
1985 1 0.00814689 0.74154 0.866171
2014 1 0.944204 0.804233 0.875985
2013 1 0.872679 0.749058 0.876629
1988 1 0.00830551 0.797113 0.934812
1956 1 0.00408883 0.68784 0.93343
653 1 0.375156 0.998548 0.626808
1941 1 0.628262 0.504356 0.869765
2030 1 0.432706 0.939562 0.872485
2036 1 0.495994 0.945278 0.93662
2032 1 0.372965 0.936987 0.930137
2021 1 0.136323 0.882252 0.886013
2018 1 0.0672623 0.930517 0.881535
2019 1 0.0714161 0.86351 0.939304
2024 1 0.127771 0.936813 0.94665
2017 1 0.994539 0.869848 0.879574
2044 1 0.750796 0.936004 0.937592
2029 1 0.374874 0.882301 0.865492
2043 1 0.807953 0.865233 0.92826
2037 1 0.629204 0.875219 0.877811
2026 1 0.310767 0.937104 0.880998
2023 1 0.184541 0.875215 0.943298
2025 1 0.254034 0.873924 0.879325
2022 1 0.19933 0.938089 0.882525
2028 1 0.252443 0.938424 0.94403
2027 1 0.311894 0.873393 0.939076
2033 1 0.502912 0.877257 0.882766
2041 1 0.754998 0.871828 0.869649
2031 1 0.433706 0.88008 0.933393
1133 1 0.376666 0.880639 0.994421
1586 1 0.558015 0.690761 0.500631
1823 1 0.942698 0.50141 0.821287
1937 1 0.504469 0.500033 0.882354
661 1 0.620089 0.991382 0.625478
1921 1 0.0145332 0.503372 0.877567
1799 1 0.198215 0.500757 0.819554
921 1 0.74905 0.98946 0.870167
1086 1 0.943692 0.683703 0.992984
1614 1 0.440617 0.81903 0.501631
1109 1 0.623178 0.746867 0.995033
1113 1 0.751595 0.756465 0.99521
1046 1 0.690069 0.564183 0.990447
1669 1 0.13407 0.507688 0.626742
1671 1 0.193311 0.506956 0.688238
1817 1 0.763635 0.506688 0.752941
1058 1 0.0679213 0.693432 0.993918
1098 1 0.320164 0.813797 0.995936
1034 1 0.310286 0.567838 0.997497
1074 1 0.555948 0.694886 0.997242
1138 1 0.553895 0.94636 0.998468
1630 1 0.945674 0.811588 0.506818
1050 1 0.814907 0.569474 0.996052
1142 1 0.697717 0.941403 0.993081
1057 1 0.0110402 0.61793 0.995355
1146 1 0.814856 0.926025 0.99801
1550 1 0.445715 0.557917 0.506874
1117 1 0.8825 0.747862 0.998005
1546 1 0.317883 0.56956 0.500444
1594 1 0.8116 0.689156 0.504245
1134 1 0.447604 0.931865 0.999193
1658 1 0.797676 0.930274 0.505095
| [
"scheuclu@gmail.com"
] | scheuclu@gmail.com |
5bcdd778d1e6f731488beb83daa9a83c0f4996a2 | b8e29b6e957b0a55571f7cffc4357666a43fb56e | /mxnet/insightface/insightface/src/symbols/fdpn.py | 0544f8347e0d24662ee3f007fe9468e1d3a22ddd | [
"MIT",
"LicenseRef-scancode-proprietary-license",
"Apache-2.0"
] | permissive | aliyun/alibabacloud-aiacc-demo | b9bbe565021757ecaea0e7d7209632cbdb5cc8ab | 2e49deeb38d12d4af4c5e50bb15d731c4bbf4cf1 | refs/heads/master | 2023-05-14T08:09:33.067050 | 2023-05-04T08:19:51 | 2023-05-04T08:19:51 | 228,604,743 | 38 | 14 | Apache-2.0 | 2022-06-22T02:41:01 | 2019-12-17T11:46:44 | Python | UTF-8 | Python | false | false | 9,635 | py | import mxnet as mx
import symbol_utils
bn_momentum = 0.9
def BK(data):
return mx.symbol.BlockGrad(data=data)
# - - - - - - - - - - - - - - - - - - - - - - -
# Fundamental Elements
def BN(data, fix_gamma=False, momentum=bn_momentum, name=None):
bn = mx.symbol.BatchNorm( data=data, fix_gamma=fix_gamma, momentum=bn_momentum, name=('%s__bn'%name))
return bn
def AC(data, act_type='relu', name=None):
act = mx.symbol.Activation(data=data, act_type=act_type, name=('%s__%s' % (name, act_type)))
return act
def BN_AC(data, momentum=bn_momentum, name=None):
bn = BN(data=data, name=name, fix_gamma=False, momentum=momentum)
bn_ac = AC(data=bn, name=name)
return bn_ac
def Conv(data, num_filter, kernel, stride=(1,1), pad=(0, 0), name=None, no_bias=True, w=None, b=None, attr=None, num_group=1):
Convolution = mx.symbol.Convolution
if w is None:
conv = Convolution(data=data, num_filter=num_filter, num_group=num_group, kernel=kernel, pad=pad, stride=stride, name=('%s__conv' %name), no_bias=no_bias, attr=attr)
else:
if b is None:
conv = Convolution(data=data, num_filter=num_filter, num_group=num_group, kernel=kernel, pad=pad, stride=stride, name=('%s__conv' %name), no_bias=no_bias, weight=w, attr=attr)
else:
conv = Convolution(data=data, num_filter=num_filter, num_group=num_group, kernel=kernel, pad=pad, stride=stride, name=('%s__conv' %name), no_bias=False, bias=b, weight=w, attr=attr)
return conv
# - - - - - - - - - - - - - - - - - - - - - - -
# Standard Common functions < CVPR >
def Conv_BN( data, num_filter, kernel, pad, stride=(1,1), name=None, w=None, b=None, no_bias=True, attr=None, num_group=1):
cov = Conv( data=data, num_filter=num_filter, num_group=num_group, kernel=kernel, pad=pad, stride=stride, name=name, w=w, b=b, no_bias=no_bias, attr=attr)
cov_bn = BN( data=cov, name=('%s__bn' % name))
return cov_bn
def Conv_BN_AC(data, num_filter, kernel, pad, stride=(1,1), name=None, w=None, b=None, no_bias=True, attr=None, num_group=1):
cov_bn = Conv_BN(data=data, num_filter=num_filter, num_group=num_group, kernel=kernel, pad=pad, stride=stride, name=name, w=w, b=b, no_bias=no_bias, attr=attr)
cov_ba = AC( data=cov_bn, name=('%s__ac' % name))
return cov_ba
# - - - - - - - - - - - - - - - - - - - - - - -
# Standard Common functions < ECCV >
def BN_Conv( data, num_filter, kernel, pad, stride=(1,1), name=None, w=None, b=None, no_bias=True, attr=None, num_group=1):
bn = BN( data=data, name=('%s__bn' % name))
bn_cov = Conv( data=bn, num_filter=num_filter, num_group=num_group, kernel=kernel, pad=pad, stride=stride, name=name, w=w, b=b, no_bias=no_bias, attr=attr)
return bn_cov
def AC_Conv( data, num_filter, kernel, pad, stride=(1,1), name=None, w=None, b=None, no_bias=True, attr=None, num_group=1):
ac = AC( data=data, name=('%s__ac' % name))
ac_cov = Conv( data=ac, num_filter=num_filter, num_group=num_group, kernel=kernel, pad=pad, stride=stride, name=name, w=w, b=b, no_bias=no_bias, attr=attr)
return ac_cov
def BN_AC_Conv(data, num_filter, kernel, pad, stride=(1,1), name=None, w=None, b=None, no_bias=True, attr=None, num_group=1):
bn = BN( data=data, name=('%s__bn' % name))
ba_cov = AC_Conv(data=bn, num_filter=num_filter, num_group=num_group, kernel=kernel, pad=pad, stride=stride, name=name, w=w, b=b, no_bias=no_bias, attr=attr)
return ba_cov
def DualPathFactory(data, num_1x1_a, num_3x3_b, num_1x1_c, name, inc, G, _type='normal'):
kw = 3
kh = 3
pw = (kw-1)/2
ph = (kh-1)/2
# type
if _type is 'proj':
key_stride = 1
has_proj = True
if _type is 'down':
key_stride = 2
has_proj = True
if _type is 'normal':
key_stride = 1
has_proj = False
# PROJ
if type(data) is list:
data_in = mx.symbol.Concat(*[data[0], data[1]], name=('%s_cat-input' % name))
else:
data_in = data
if has_proj:
c1x1_w = BN_AC_Conv( data=data_in, num_filter=(num_1x1_c+2*inc), kernel=( 1, 1), stride=(key_stride, key_stride), name=('%s_c1x1-w(s/%d)' %(name, key_stride)), pad=(0, 0))
data_o1 = mx.symbol.slice_axis(data=c1x1_w, axis=1, begin=0, end=num_1x1_c, name=('%s_c1x1-w(s/%d)-split1' %(name, key_stride)))
data_o2 = mx.symbol.slice_axis(data=c1x1_w, axis=1, begin=num_1x1_c, end=(num_1x1_c+2*inc), name=('%s_c1x1-w(s/%d)-split2' %(name, key_stride)))
else:
data_o1 = data[0]
data_o2 = data[1]
# MAIN
c1x1_a = BN_AC_Conv( data=data_in, num_filter=num_1x1_a, kernel=( 1, 1), pad=( 0, 0), name=('%s_c1x1-a' % name))
c3x3_b = BN_AC_Conv( data=c1x1_a, num_filter=num_3x3_b, kernel=(kw, kh), pad=(pw, ph), name=('%s_c%dx%d-b' % (name,kw,kh)), stride=(key_stride,key_stride), num_group=G)
c1x1_c = BN_AC_Conv( data=c3x3_b, num_filter=(num_1x1_c+inc), kernel=( 1, 1), pad=( 0, 0), name=('%s_c1x1-c' % name))
c1x1_c1= mx.symbol.slice_axis(data=c1x1_c, axis=1, begin=0, end=num_1x1_c, name=('%s_c1x1-c-split1' % name))
c1x1_c2= mx.symbol.slice_axis(data=c1x1_c, axis=1, begin=num_1x1_c, end=(num_1x1_c+inc), name=('%s_c1x1-c-split2' % name))
# OUTPUTS
summ = mx.symbol.ElementWiseSum(*[data_o1, c1x1_c1], name=('%s_sum' % name))
dense = mx.symbol.Concat( *[data_o2, c1x1_c2], name=('%s_cat' % name))
return [summ, dense]
k_R = 160
G = 40
k_sec = { 2: 4, \
3: 8, \
4: 28, \
5: 3 }
inc_sec= { 2: 16, \
3: 32, \
4: 32, \
5: 128 }
def get_symbol(num_classes = 1000, num_layers=92, **kwargs):
if num_layers==68:
k_R = 128
G = 32
k_sec = { 2: 3, \
3: 4, \
4: 12, \
5: 3 }
inc_sec= { 2: 16, \
3: 32, \
4: 32, \
5: 64 }
elif num_layers==92:
k_R = 96
G = 32
k_sec = { 2: 3, \
3: 4, \
4: 20, \
5: 3 }
inc_sec= { 2: 16, \
3: 32, \
4: 24, \
5: 128 }
elif num_layers==107:
k_R = 200
G = 50
k_sec = { 2: 4, \
3: 8, \
4: 20, \
5: 3 }
inc_sec= { 2: 20, \
3: 64, \
4: 64, \
5: 128 }
elif num_layers==131:
k_R = 160
G = 40
k_sec = { 2: 4, \
3: 8, \
4: 28, \
5: 3 }
inc_sec= { 2: 16, \
3: 32, \
4: 32, \
5: 128 }
else:
raise ValueError("no experiments done on dpn num_layers {}, you can do it yourself".format(num_layers))
version_se = kwargs.get('version_se', 1)
version_input = kwargs.get('version_input', 1)
assert version_input>=0
version_output = kwargs.get('version_output', 'E')
fc_type = version_output
version_unit = kwargs.get('version_unit', 3)
print(version_se, version_input, version_output, version_unit)
## define Dual Path Network
data = mx.symbol.Variable(name="data")
#data = data-127.5
#data = data*0.0078125
#if version_input==0:
# conv1_x_1 = Conv(data=data, num_filter=128, kernel=(7, 7), name='conv1_x_1', pad=(3,3), stride=(2,2))
#else:
# conv1_x_1 = Conv(data=data, num_filter=128, kernel=(3, 3), name='conv1_x_1', pad=(3,3), stride=(1,1))
#conv1_x_1 = BN_AC(conv1_x_1, name='conv1_x_1__relu-sp')
#conv1_x_x = mx.symbol.Pooling(data=conv1_x_1, pool_type="max", kernel=(3, 3), pad=(1,1), stride=(2,2), name="pool1")
conv1_x_x = symbol_utils.get_head(data, version_input, 128)
# conv2
bw = 256
inc= inc_sec[2]
R = (k_R*bw)/256
conv2_x_x = DualPathFactory( conv1_x_x, R, R, bw, 'conv2_x__1', inc, G, 'proj' )
for i_ly in range(2, k_sec[2]+1):
conv2_x_x = DualPathFactory( conv2_x_x, R, R, bw, ('conv2_x__%d'% i_ly), inc, G, 'normal')
# conv3
bw = 512
inc= inc_sec[3]
R = (k_R*bw)/256
conv3_x_x = DualPathFactory( conv2_x_x, R, R, bw, 'conv3_x__1', inc, G, 'down' )
for i_ly in range(2, k_sec[3]+1):
conv3_x_x = DualPathFactory( conv3_x_x, R, R, bw, ('conv3_x__%d'% i_ly), inc, G, 'normal')
# conv4
bw = 1024
inc= inc_sec[4]
R = (k_R*bw)/256
conv4_x_x = DualPathFactory( conv3_x_x, R, R, bw, 'conv4_x__1', inc, G, 'down' )
for i_ly in range(2, k_sec[4]+1):
conv4_x_x = DualPathFactory( conv4_x_x, R, R, bw, ('conv4_x__%d'% i_ly), inc, G, 'normal')
# conv5
bw = 2048
inc= inc_sec[5]
R = (k_R*bw)/256
conv5_x_x = DualPathFactory( conv4_x_x, R, R, bw, 'conv5_x__1', inc, G, 'down' )
for i_ly in range(2, k_sec[5]+1):
conv5_x_x = DualPathFactory( conv5_x_x, R, R, bw, ('conv5_x__%d'% i_ly), inc, G, 'normal')
# output: concat
conv5_x_x = mx.symbol.Concat(*[conv5_x_x[0], conv5_x_x[1]], name='conv5_x_x_cat-final')
#conv5_x_x = BN_AC(conv5_x_x, name='conv5_x_x__relu-sp')
before_pool = conv5_x_x
fc1 = symbol_utils.get_fc1(before_pool, num_classes, fc_type)
return fc1
| [
"ziqi.yzq@alibaba-inc.com"
] | ziqi.yzq@alibaba-inc.com |
593e2746323dfc3130f56fe671a34f9ddd763f58 | f55818124fe2e82e18a03174637c7166bfb5bda6 | /File Display.py | 0dae5fca52780dc144dc1925fbc03f7081870fac | [] | no_license | jfranck99/Python-Projects | 46b12aca9a0c99ecf7316dc3ae110b37173bf123 | b61c42030c17eb4171862ce81e99291d0f439b63 | refs/heads/main | 2023-04-12T05:30:03.713769 | 2021-05-12T18:28:56 | 2021-05-12T18:28:56 | 366,805,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | #Open the file
myfile = open('numbers.txt', 'r')
#Read and display file
for line in myfile:
number = int(line)
print(number)
#Close the file
myfile.close()
| [
"noreply@github.com"
] | noreply@github.com |
f0cb718b6420d11174583a18554cb1278090ca7f | b67693ed0ab64bc540dd5d4d24a8fb81efc08031 | /build/catkin_tools_prebuild/catkin_generated/pkg.develspace.context.pc.py | f30dc76d7d900620f2d7aaa1c1354eee7c788d31 | [] | no_license | kbhakt/epicIMU | 35cc609404ae3661ec435e4fc19a74d8d7c3ce89 | 0904281e4956bac47732577d65e25afee69aff06 | refs/heads/main | 2023-06-06T11:22:28.041534 | 2021-06-22T13:25:51 | 2021-06-22T13:25:51 | 379,272,854 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 420 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "catkin_tools_prebuild"
PROJECT_SPACE_DIR = "/home/kbhakta/my_git/epicIMU/devel/.private/catkin_tools_prebuild"
PROJECT_VERSION = "0.0.0"
| [
"kbhakta3@gatech.edu"
] | kbhakta3@gatech.edu |
2984f1f901892baa21d75ff5c8020bbf9ce301ea | 0a7aa62ff09fef26f1e3af5cafa95cd22ae146f9 | /bin/calc_edge_activity.py | 2b4c1452450044f4816605a10d38983a175ff8f6 | [] | no_license | Boston123456/MIDAS | bd32313d8aced4dfc4c9a77ca63f9229fe06f818 | fcacae06be387fd692bb5d33e40c56bf7a268e89 | refs/heads/master | 2022-04-10T08:34:52.626800 | 2020-04-09T12:05:17 | 2020-04-09T12:05:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,980 | py | import networkx as nx
import sys
import csv
import matplotlib.pyplot as plt
from graph_node import Graph_node
from centrality import calc_centrality
from calc_edge_act_parallel import *
def dic_return(temp_dic, temp_key):
result=0.0
if temp_key in temp_dic:
result= temp_dic[temp_key]
else:
result= 0.0
return result
######################################################################
### Main part ###############
######################################################################
#arg1 : graph input file
#arg2 : expr data
#arg3 : max_exp file
#arg4 : edge info file
#arg5 : used edge status file from SPIA
#arg6 : edge weight (1 : file / 0 : no weight) (1 : ew / 0 : act_iht)
#arg7 : edge weight file (only when arg6 ==1)
#arg8 : centrality case (0 : betweeness / 1: close)
#arg9 : act_iht case (0 : A(1-B) / 1: max(A(1-B), (1-A)B))
#arg10 : edge weight output file
#arg11 : pa_id & subtype file (Pa1 c1 \n Pa2 c2)
#arg12 : pathway sas mean result file
with open(sys.argv[1], 'r') as graph_input:
reader = csv.reader(graph_input, delimiter="\t")
graph_dic = {}
# print "[INFO] Make graph ..."
for row in reader:
s = row[0]
t = row[1]
graph_dic[s] = t
#expr data read
expr_dic = {}
expr_file = open(sys.argv[2], 'r')
expr_file_reader = csv.reader(expr_file, delimiter="\t")
sample_num = 0
#save expr data
for row in expr_file_reader:
id = str(row[0])
val = map(float, row[1:])
sample_num = len(val)
expr_dic[id] = val
# max_expr dic
max_expr_dic= {}
max_expr_file = open(sys.argv[3], 'r')
max_expr_file_reader = csv.reader(max_expr_file, delimiter="\t")
for row in max_expr_file_reader:
id = str(row[0])
val = float(row[1])
max_expr_dic[id]=val
#edge info file read
edge_info_dic = {}
edge_info_file = open(sys.argv[4], 'r')
edge_info_reader = csv.reader(edge_info_file, delimiter="\t")
next(edge_info_reader)
for row in edge_info_reader:
entry1 = str(row[0])
entry2 = str(row[1])
status = str(row[2]) #activation, inhibition, and so on
edge_id = entry1 + "_" + entry2
edge_info_dic[edge_id] = status
edge_info_file.close()
#act_iht filter from SPIA
act_iht_filter_dic ={}
act_iht_filter_file = open(sys.argv[5], 'r')
act_iht_filter_reader = csv.reader(act_iht_filter_file, delimiter="\t")
next(act_iht_filter_reader)
for row in act_iht_filter_reader :
rel = str(row[0])
beta = int(row[1])
act_iht_filter_dic[rel]=beta
act_iht_filter_file.close()
# edge weight
edge_weight_case = int(sys.argv[6])
edge_weight_dic={}
if edge_weight_case == 1 :
edge_weight_file = open(sys.argv[7], 'r')
edge_weight_reader = csv.reader(edge_weight_file, delimiter="\t")
for row in edge_weight_reader :
id = str(row[0])
val = float(row[1])
edge_weight_dic[id] =val
# class Graph_node Flow network
flow_graph_dic={}
for key in graph_dic:
t = graph_dic[key].split(" ")
if t[0] != "" :
cur_node = Graph_node("temp", 0)
if not key in flow_graph_dic:
cur_node_exp_level = dic_return(expr_dic,key)
cur_node = Graph_node(key, cur_node_exp_level)
else:
cur_node = flow_graph_dic[key]
for elem in t:
down_node = Graph_node("temp", 0)
if elem in flow_graph_dic:
down_node = flow_graph_dic[elem]
else:
down_node_exp_level = dic_return(expr_dic,elem)
down_node = Graph_node(elem, down_node_exp_level)
cur_node.add_out_node(elem)
down_node.add_in_node(key)
flow_graph_dic[key] = cur_node
flow_graph_dic[elem] = down_node
# centrality
centrality_case = int(sys.argv[8])
centrality_dic = calc_centrality(graph_dic, centrality_case)
# act_iht method
act_iht_case = int(sys.argv[9])
# parallel cores
p_cores = int(sys.argv[12])
# calc edge activity
(total_SAS_mean_results, edge_activity_dic) = calc_edge_activity_whole(flow_graph_dic, centrality_dic, edge_info_dic, act_iht_filter_dic,max_expr_dic, edge_weight_case, edge_weight_dic, act_iht_case, sample_num, p_cores)
#uni & act_iht output
#uni_result_output_file=sys.argv[10] + ".uni_result.txt.MGD"
act_iht_result_output_file=sys.argv[10] + ".act_iht_result.txt.MGD"
#uni_result_output = open(uni_result_output_file, 'w')
act_iht_result_output = open(act_iht_result_output_file, 'w')
#pa_info_file
pa_info_file = open(sys.argv[11], 'r')
pa_info_reader = csv.reader(pa_info_file, delimiter="\t")
pa_id_list = []
subtype_list = []
for row in pa_info_reader:
pa_id = str(row[0])
subtype = str(row[1])
pa_id_list.append(pa_id)
subtype_list.append(subtype)
#header line
header_line = "Edge_id" + "\t" + "\t".join(pa_id_list) + "\n"
#uni_result_output.write(header_line)
act_iht_result_output.write(header_line)
for edge in edge_activity_dic.keys():
#uni_result_output.write(edge + '\t'+'\t'.join(map(lambda x : str(x[0]), edge_activity_dic[edge])) + '\n')
act_iht_result_output.write(edge + '\t'+'\t'.join(map(lambda x : str(x[1]), edge_activity_dic[edge])) + '\n')
| [
"noreply@github.com"
] | noreply@github.com |
f1516933ea445803defec8a1fa0c6335c45eb5e6 | 491d3ad04c852d2efe3e49842ccfcd20e40eab96 | /mysite/blog/admin.py | 6f0dd47e26f5ddf14bfd772d3edc6b2cfbd7becd | [] | no_license | marianwitkowski/python-24082019 | 746c9824c15c2072caceeac8a9b610d79c63f0f6 | df849d09aa7d9b7a08d8276a9c2b557d9f9d7ce7 | refs/heads/master | 2020-07-07T03:55:55.398961 | 2019-09-29T16:12:40 | 2019-09-29T16:12:40 | 203,239,578 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | from django.contrib import admin
from .models import Post
# Register your models here.
class PostAdmin(admin.ModelAdmin):
list_display = ('title', 'slug', 'status','created_on')
list_filter = ("status",)
search_fields = ['title', 'content']
prepopulated_fields = {'slug': ('title',)}
admin.site.register(Post) | [
"marian.witkowski@gmail.com"
] | marian.witkowski@gmail.com |
2adf1b16dc0fe58417825d349d3e29ccf10e3135 | a247e3a40bca426f604ee057319ae3f7fce5c22f | /django1/venv/bin/django-admin | 8279c65ae89eb5715eb0a7f394b21f42bacec363 | [
"MIT"
] | permissive | stephenndele/django-1 | fcb5cd2a8598b5d68855814fb588a231e06efc09 | 11be0289bc3b6b3234b1e34979f282bd06cbce2e | refs/heads/main | 2023-03-26T14:55:11.769279 | 2021-03-25T12:12:55 | 2021-03-25T12:12:55 | 349,335,362 | 0 | 2 | null | 2021-03-25T12:12:56 | 2021-03-19T07:22:05 | Python | UTF-8 | Python | false | false | 345 | #!/home/moringa/Desktop/moringa-school-projects/core/Django/django-playlist/django1/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"stephenndele09346@gmail.com"
] | stephenndele09346@gmail.com | |
d7833d20c9de724dea0ff27dce90bb80523ae797 | a22cc323b29f50da397d8363ac2521e3542a0fd7 | /tests/dpaycli/test_witness.py | a4b82ed9882df3b715a284b0fdf967a5516a4db1 | [
"MIT"
] | permissive | dpays/dpay-cli | 1a58c7dae45218e3b05b7e17ff5ce03e918d27b9 | dfa80898e1faea2cee92ebec6fe04873381bd40f | refs/heads/master | 2020-04-01T09:26:43.200933 | 2018-10-15T08:03:06 | 2018-10-15T08:03:06 | 153,075,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,801 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import super
import unittest
from parameterized import parameterized
from pprint import pprint
from dpaycli import DPay
from dpaycli.witness import Witness, Witnesses, WitnessesVotedByAccount, WitnessesRankedByVote
from dpaycli.instance import set_shared_dpay_instance
from dpaycli.nodelist import NodeList
wif = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"
class Testcases(unittest.TestCase):
@classmethod
def setUpClass(cls):
nodelist = NodeList()
nodelist.update_nodes(dpay_instance=DPay(node=nodelist.get_nodes(normal=True, appbase=True), num_retries=10))
cls.bts = DPay(
node=nodelist.get_nodes(),
nobroadcast=True,
unsigned=True,
keys={"active": wif},
num_retries=10
)
cls.testnet = DPay(
# node="https://testnet.timcliff.com",
node=nodelist.get_nodes(),
nobroadcast=True,
unsigned=True,
keys={"active": wif},
num_retries=10
)
# from getpass import getpass
# self.bts.wallet.unlock(getpass())
set_shared_dpay_instance(cls.bts)
cls.bts.set_default_account("test")
@parameterized.expand([
("normal"),
("testnet"),
])
def test_feed_publish(self, node_param):
if node_param == "normal":
bts = self.bts
else:
bts = self.testnet
bts.txbuffer.clear()
w = Witness("gtg", dpay_instance=bts)
tx = w.feed_publish("4 BBD", "1 BEX")
self.assertEqual(
(tx["operations"][0][0]),
"feed_publish"
)
op = tx["operations"][0][1]
self.assertIn(
"gtg",
op["publisher"])
@parameterized.expand([
("normal"),
("testnet"),
])
def test_update(self, node_param):
if node_param == "normal":
bts = self.bts
else:
bts = self.testnet
bts.txbuffer.clear()
w = Witness("gtg", dpay_instance=bts)
props = {"account_creation_fee": "0.1 BEX",
"maximum_block_size": 32000,
"bbd_interest_rate": 0}
tx = w.update(wif, "", props)
self.assertEqual((tx["operations"][0][0]), "witness_update")
op = tx["operations"][0][1]
self.assertIn(
"gtg",
op["owner"])
@parameterized.expand([
("normal"),
("testnet"),
])
def test_witnesses(self, node_param):
if node_param == "normal":
bts = self.bts
else:
bts = self.testnet
w = Witnesses(dpay_instance=bts)
w.printAsTable()
self.assertTrue(len(w) > 0)
self.assertTrue(isinstance(w[0], Witness))
@parameterized.expand([
("normal"),
("testnet"),
])
def test_WitnessesVotedByAccount(self, node_param):
if node_param == "normal":
bts = self.bts
else:
bts = self.testnet
w = WitnessesVotedByAccount("gtg", dpay_instance=bts)
w.printAsTable()
self.assertTrue(len(w) > 0)
self.assertTrue(isinstance(w[0], Witness))
@parameterized.expand([
("normal"),
("testnet"),
])
def test_WitnessesRankedByVote(self, node_param):
if node_param == "normal":
bts = self.bts
else:
bts = self.testnet
w = WitnessesRankedByVote(dpay_instance=bts)
w.printAsTable()
self.assertTrue(len(w) > 0)
self.assertTrue(isinstance(w[0], Witness))
@parameterized.expand([
("normal"),
("testnet"),
])
def test_export(self, node_param):
if node_param == "normal":
bts = self.bts
else:
bts = self.testnet
owner = "gtg"
if bts.rpc.get_use_appbase():
witness = bts.rpc.find_witnesses({'owners': [owner]}, api="database")['witnesses']
if len(witness) > 0:
witness = witness[0]
else:
witness = bts.rpc.get_witness_by_account(owner)
w = Witness(owner, dpay_instance=bts)
keys = list(witness.keys())
json_witness = w.json()
exclude_list = ['votes', 'virtual_last_update', 'virtual_scheduled_time']
for k in keys:
if k not in exclude_list:
if isinstance(witness[k], dict) and isinstance(json_witness[k], list):
self.assertEqual(list(witness[k].values()), json_witness[k])
else:
self.assertEqual(witness[k], json_witness[k])
| [
"jaredricelegal@gmail.com"
] | jaredricelegal@gmail.com |
c89fddca810ff0a6a6816e86a3546737492b1e5e | 56b3f373de5189f128d84ea191cc5dfc6f88636a | /TrackProblems/trackproblems/controllers/__init__.py | 32c45f64e71509d5257896d8ff5c8bf2795962c0 | [
"Apache-2.0"
] | permissive | tongpa/TrackProblems | 0e301d01735ebc523de18553d670207b9a75427e | 41e827a21907f0e9e4cc036ec0f96ab379b94544 | refs/heads/master | 2021-01-02T08:53:47.648786 | 2017-09-11T09:56:39 | 2017-09-11T09:56:39 | 99,089,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 77 | py | # -*- coding: utf-8 -*-
"""Controllers for the TrackProblems application."""
| [
"tong_pa@hotmail.com"
] | tong_pa@hotmail.com |
4dc1e5bc9c09fba76c3d8b83675b6caf6567e2f6 | 3da0a07e0c89f2cb0331752f531d8c2c3e728c0c | /testing/cjdemo/test_order.py | 51dd338c1244e82e0faae1b8522dfae580c649c8 | [] | no_license | yindan01/yindan | 4e1dffa418d7b4d90396b12b7c1fbda4e492d036 | ec596632c34e90acf1496128fa0d04f2906fd5b7 | refs/heads/master | 2022-12-22T22:37:32.177854 | 2020-09-22T14:46:52 | 2020-09-22T14:46:52 | 283,661,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | import pytest
@pytest.mark.last
def test_foo():
assert True
@pytest.mark.third
def test_bar():
assert True
@pytest.mark.fourth
def test_aar():
assert True | [
"1130083087@qq.com"
] | 1130083087@qq.com |
4df0a799a412b846b2b31ff74dc86ea649ff1808 | dbffdb62709e36618162d708377bbc63ca76ff65 | /Towers.py | 6cd0a4f0bd63f0cffc4cd3b7f9465468b6458b8c | [] | no_license | Yupiter5505/Project-4th-sem- | 21f8dfc61ab30d77795d642a7a31e2677473286c | 201917745dc30a50a12ec62e8115ca6174a25693 | refs/heads/master | 2021-04-27T03:57:24.588626 | 2018-05-21T07:32:55 | 2018-05-21T07:32:55 | 122,723,661 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,190 | py | import pygame as pg
import SingleNumbers as SN
import random as rnd
import Effects
class Bullet:
def __init__(self, position: SN.Vector, direction: SN.Vector, damage, effect):
self.position = position
self.direction = direction # Радиус-вектор тайла прибытия
self.damage = damage
self.effect = effect
def update(self, dt):
self.position = self.position + SN.SingleVelocity * 20 * (self.direction - self.position) * dt
class TypeOfTower:
def __init__(self, toe, damage, radius, rof, eob=None, aot=None):
self.toe = toe # Type Of Enemies
self.damage = damage
self.radius = radius
self.rof = rof # Rate Of Fire
self.eob = eob # Effect Of Bullet
self.aot = aot # Ability Of Tower
class Tower:
def __init__(self, type, x, y, upgrades=None, act=None):
self.type = type
self.x = x
self.y = y
self.upgrades = upgrades or [0, 0, 0]
self.act = act or False
def activate(self):
self.act = True
def deactivate(self):
self.act = False
def upgrade(self, key, gold):
if gold >= 10:
if key == pg.K_KP1:
self.upgrades[0] += 1
gold -= 10
elif key == pg.K_KP2:
self.upgrades[1] += 1
gold -= 10
else:
self.upgrades[2] += 1
gold -= 10
print('Всего золота: ', gold)
return gold
print('Недостаточно золота!')
return gold
def render_tower(self, canvas):
if self.type == GroundTower:
Color = (150, 75, 0)
pg.draw.rect(canvas, Color, pg.Rect(self.x + 5, self.y + 5, SN.Tile_size - 10, SN.Tile_size - 10))
if self.act:
Color = (75, 38, 0)
pg.draw.rect(canvas, Color, pg.Rect(self.x + 5, self.y + 5, SN.Tile_size - 10, SN.Tile_size - 10), 3)
pg.draw.circle(canvas, (25, 25, 25), (self.x + SN.Tile_size // 2, self.y + SN.Tile_size // 2),
self.type.radius, 2)
if self.type == FlyTower:
Color = (102, 0, 255)
pointlist = [
(self.x + SN.Tile_size//2, self.y),
(self.x, self.y + 2*SN.Tile_size//5),
(self.x + SN.Tile_size//5, self.y + SN.Tile_size),
(self.x + 4*SN.Tile_size//5, self.y + SN.Tile_size),
(self.x + SN.Tile_size, self.y + 2*SN.Tile_size//5)
]
pg.draw.polygon(canvas, Color, pointlist)
if self.act:
Color = (51, 0, 128)
pg.draw.polygon(canvas, Color, pointlist, 3)
pg.draw.circle(canvas, (25, 25, 25), (self.x + SN.Tile_size // 2, self.y + SN.Tile_size // 2),
self.type.radius, 2)
if self.type == EffectTower:
Color = (124, 146, 124)
pointlist = [
(self.x + SN.Tile_size//2, self.y),
(self.x, self.y + SN.Tile_size),
(self.x + SN.Tile_size, self.y + SN.Tile_size)
]
pg.draw.polygon(canvas, Color, pointlist)
if self.act:
Color = (62, 73, 62)
pg.draw.polygon(canvas, Color, pointlist, 3)
pg.draw.circle(canvas, (25, 25, 25), (self.x + SN.Tile_size // 2, self.y + SN.Tile_size // 2),
self.type.radius, 2)
def shoot(self, enemies, bullets):
pos = SN.Vector(self.x + SN.Tile_size // 2, self.y + SN.Tile_size // 2)
m = 0
for i in range(len(enemies)):
if enemies[i][1][1] > m:
m = enemies[i][1][1]
for i in range(len(enemies)):
dist = (pos * enemies[i][1][0]) ** 0.5
if dist <= self.type.radius and enemies[i][1][1] == m:
bullet = Bullet(pos, enemies[i][1][0], self.type.damage, self.type.eob)
bullets.append(bullet)
return bullets
SingleDamage = SN.SingleDamage
SingleRadius = SN.SingleRadius
SingleRate = SN.Tile_size
GroundTower = TypeOfTower('Ground', SingleDamage, SingleRadius, SingleRate)
FlyTower = TypeOfTower('Fly', SingleDamage, SingleRadius, SingleRate)
EffectTower = TypeOfTower('All', SingleDamage/2, SingleRadius, SingleRate, rnd.choice(Effects.ListOfEffects))
def BuildTower(gold, key, tile):
# if type(tile) != Tower:
if gold >= SN.CoastOfTower:
if key == pg.K_g:
tower = Tower(GroundTower, tile.x, tile.y)
elif key == pg.K_f:
tower = Tower(FlyTower, tile.x, tile.y)
elif key == pg.K_e:
tower = Tower(EffectTower, tile.x, tile.y)
gold -= SN.CoastOfTower
print('Всего золота: ', gold)
tower.activate()
return tower, gold
print('Недостаточно золота!')
return tile, gold
| [
"noreply@github.com"
] | noreply@github.com |
f6a760119a4c4b2c583957abb4a7066cbb64a2eb | dc67e70a303f265ee6cb4c1a2d61fe811053fb3d | /beginner/066/A.py | cabb38041ad8e0ea035492830c9cef953fb894b2 | [] | no_license | cry999/AtCoder | d39ce22d49dfce805cb7bab9d1ff0dd21825823a | 879d0e43e3fac0aadc4d772dc57374ae72571fe6 | refs/heads/master | 2020-04-23T13:55:00.018156 | 2019-12-11T05:23:03 | 2019-12-11T05:23:03 | 171,214,066 | 0 | 0 | null | 2019-05-13T15:17:02 | 2019-02-18T04:24:01 | Python | UTF-8 | Python | false | false | 193 | py | def ringring(a: int, b: int, c: int)->int:
return sum(sorted([a, b, c])[:2])
if __name__ == "__main__":
a, b, c = map(int, input().split())
ans = ringring(a, b, c)
print(ans)
| [
"when.the.cry999@gmail.com"
] | when.the.cry999@gmail.com |
e62b2815fac299fb1b126e73cee5e4166cd51d2f | 4296c26a6669870671ae92d0588ad08c18590bdb | /imageboard_withLinuxPaths.py | cfa0f4ceb767bc03fd5d60932cfa9b32a793e4d7 | [] | no_license | neisor/imageboard | 4c4af1d8567ecb9892d6d5279029cfd0d7323881 | 18b6db42fe67ef4e65cc63c96addf08f42089be0 | refs/heads/master | 2022-12-12T09:06:40.527836 | 2020-09-11T12:04:43 | 2020-09-11T12:04:43 | 294,684,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,986 | py | # -*- coding: utf-8 -*-
from flask import Flask, flash, redirect, render_template, request, session, abort, jsonify
import sqlite3
import random
import os
from datetime import datetime
from werkzeug.utils import secure_filename
#Flask configuration variables
UPLOAD_FOLDER = os.getcwd() + r'/static/images'
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif'}
#Initiate Flask app
app = Flask(__name__, template_folder=os.getcwd(), static_folder=os.getcwd() + r'/static')
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
#Set secret key for sessions
app.secret_key = b'\xd8b-\xcc\xab\xb2K\x29j\xe7\x23S\xd4\xbd\x9e\x0cq\xd2\xcc\x8d'
#Initiate DB connection
conn = sqlite3.connect('imageboard.db')
c = conn.cursor()
#Create table for posts
c.execute('''CREATE TABLE IF NOT EXISTS posts
(id integer primary key, time text, date text, user text, title text, posttext text, imagepath text)''')
#Create table for comments
c.execute('''CREATE TABLE IF NOT EXISTS comments
(id integer primary key, idofpost integer, numberofcommentofpost int, user text, commenttext text)''')
conn.commit()
conn.close()
#Function for checking allowed extensions of file when uploaded
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route("/", methods=["GET", "POST"])
def main():
#Define session for numberOfPosts to continuosly load content
if session.get('numberOfPosts'):
session.pop('numberOfPosts')
session['numberOfPosts'] = 0
numberOfPostsVariable = str(session.get('numberOfPosts'))
#Initiate DB connection
conn = sqlite3.connect('imageboard.db')
c = conn.cursor()
#Defines what to do when the request is GET
if request.method == 'GET':
#Load content from DB
if int(numberOfPostsVariable) == 0:
allPosts = c.execute('SELECT * FROM posts ORDER BY id DESC LIMIT 10')
else:
allPosts = c.execute('SELECT * FROM posts ORDER BY id DESC LIMIT 10 {number}'.format(number=numberOfPostsVariable))
return render_template('index.html', allPosts = allPosts)
#Defines what to do when the request is POST
if request.method == 'POST':
pass
@app.route("/upload", methods=["GET", "POST"])
def upload():
#Initiate DB connection
conn = sqlite3.connect('imageboard.db')
c = conn.cursor()
#Define what to do if method is GET
if request.method == 'GET':
return render_template('upload.html')
#Define what to do if method is POST
if request.method == 'POST':
#Get the data from the form
username = request.form['username']
title = request.form['nadpis']
post = request.form['prispevok']
#Check if username is empty, if yes set username to anonym
if username == "":
username = "anonym"
#Set ID of post (last post + 1)
try:
c.execute('SELECT id FROM posts ORDER BY id DESC LIMIT 1')
idOfPost = c.fetchall()
idOfPost = idOfPost[0]
idOfPost = int(idOfPost[0]) + 1
except:
#If no post exists yet, give the first post the id of number 1
idOfPost = 1
#Get current timestamp
actualTime = datetime.now()
actualTimeForInsertingIntoDB = str(actualTime.hour) + ':' + str(actualTime.minute) + ':' + str(actualTime.second)
actualDateForInsertingIntoDB = str(actualTime.day) + '.' + str(actualTime.month) + '.' + str(actualTime.year)
#Check if images have the correct file extension
if 'image' not in request.files:
flash('No image part')
return redirect('/upload', code=302)
image = request.files['image']
# if user does not select file, browser also submit an empty part without filename
if image.filename == '':
flash('No selected image')
return redirect('/upload', code=302)
if image and allowed_file(image.filename):
filename = secure_filename(image.filename)
filename = str(idOfPost) + '_' + filename
image.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
imageName = filename
else:
message= 'Nahratý neplatný súbor. Nahraj súbor .gif, .png, .jpg alebo .jpeg'
return render_template('/upload-message.html', message=message)
#Save the post into DB
data = [(idOfPost, actualTimeForInsertingIntoDB, actualDateForInsertingIntoDB, username, title, post, imageName)]
c.executemany('INSERT INTO posts VALUES (?,?,?,?,?,?,?)', data)
conn.commit()
conn.close()
return redirect('/', code=302)
@app.route("/load", methods=["POST"])
def load():
#Initiate DB connection
conn = sqlite3.connect('imageboard.db')
c = conn.cursor()
#Defines what to do when the request is POST
if request.method == 'POST':
session['numberOfPosts'] = int(session.get('numberOfPosts')) + 10
numberOfPostsVariable = str(session.get('numberOfPosts'))
c.execute('SELECT * FROM posts ORDER BY id DESC LIMIT 10 OFFSET ' + str(numberOfPostsVariable))
loadedPosts = c.fetchall()
return render_template('loadNewPosts.html', loadedPosts = loadedPosts)
#Show post route and function
@app.route("/post", methods=["GET", "POST"])
def mainPost():
return redirect('/', code=302)
#Show post route and function
@app.route("/post/<int:idofpost>", methods=["GET", "POST"])
def post(idofpost):
#Initiate DB connection
conn = sqlite3.connect('imageboard.db')
c = conn.cursor()
#Define what to do when request method is GET
if request.method == 'GET':
#Get post's details (image, text, headline, etc.)
c.execute('SELECT * FROM posts WHERE id LIKE ' + str(idofpost))
postDetails = c.fetchall()
#Get post's comments
c.execute('SELECT * FROM comments WHERE idofpost LIKE ' + str(idofpost) + ' ORDER BY id ASC')
comments = c.fetchall()
#Create a variable for post's id to create a URL or posting a comment on the webpage
idOfPostForButtonURL = postDetails[0]
idOfPostForButtonURL = idOfPostForButtonURL[0]
return render_template('show_post.html', postDetails = postDetails, comments = comments, idOfPostForButtonURL = idOfPostForButtonURL)
#Post a comment route and function
@app.route("/post_comment/<int:idofpost>", methods=["GET", "POST"])
def post_comment(idofpost):
#Initiate DB connection
conn = sqlite3.connect('imageboard.db')
c = conn.cursor()
#Define what to do when request method is GET
if request.method == 'GET':
return render_template('post_comment.html')
#Define what to do when request method is POST
if request.method == 'POST':
#Get data from HTML
user = session['usernameForComment'] = request.form['username']
commenttext = session['commentText'] = request.form['commentText']
idofpost = int(idofpost)
#Set ID of comment (last comment + 1)
try:
c.execute('SELECT id FROM comments ORDER BY id DESC LIMIT 1')
idofcomment = c.fetchall()
idofcomment = idofcomment[0]
idofcomment = int(idofcomment[0]) + 1
except:
idofcomment = 1
#Set number of comment of a post
try:
c.execute('SELECT numberofcommentofpost FROM comments WHERE idofpost LIKE ' + str(idofpost) + ' ORDER BY numberofcommentofpost DESC LIMIT 1')
numberofcommentofpost = c.fetchall()
numberofcommentofpost = numberofcommentofpost[0]
numberofcommentofpost = int(numberofcommentofpost[0]) + 1
except:
numberofcommentofpost = 1
#Check if username is empty, if yes set username to anonym
if user == "":
user = "anonym"
#Save the comment into DB
commentData = [(idofcomment, idofpost, numberofcommentofpost, user, commenttext)]
c.executemany('INSERT INTO comments VALUES (?,?,?,?,?)', commentData)
conn.commit()
conn.close()
#Redirect back to the post's URL
return redirect('/post/' + str(idofpost), code=302)
#Define route and function for o nas (about us)
@app.route("/o-nas", methods=["GET"])
def onas():
return render_template('o-nas.html')
@app.route('/<path:path>')
def catch_all(path):
return redirect('/', code=404)
#Run Flask instance
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True, threaded=True) | [
"noreply@github.com"
] | noreply@github.com |
9f70e24acb6247d89104f02908ac2638143ee173 | ca4910e944cca453050299cb6c8e856c06a76fb0 | /blog/settings.py | 1175ab8bc4cd442f1245d312eacf024ca32835cc | [] | no_license | SonerArslan2019/djangoRESTAPI | f8e33cd8570f86f14810ef8fabea918503e0fc90 | 5398b578524fc5c6eb3b7ed51db68bc4f3687221 | refs/heads/master | 2023-04-18T01:45:45.093105 | 2021-04-24T09:50:22 | 2021-04-24T09:50:22 | 360,634,992 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,603 | py | from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-$eiq=w_$+n^n#iy6c45zc0hsni!wjycxipc!4yrx+zq+!(js43'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# REST_FRAMEWORK = {
# 'DEFAULT_AUTHENTICATION_CLASSES': [
# 'rest_framework_simplejwt.authentication.JWTAuthentication',
# 'rest_framework.authentication.SessionAuthentication'
# ],
# 'DEFAULT_THROTTLE_CLASSES': (
# 'rest_framework.throttling.ScopedRateThrottle',
# ),
# 'DEFAULT_THROTTLE_RATES': {
# 'registerthrottle': '15/hour',
# # 'hasan' : '5/hour'
# }
# }
# SIMPLE_JWT = {
# 'ACCESS_TOKEN_LIFETIME': timedelta(minutes=15)
# }
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'post',
'comment',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
# AUTH_PASSWORD_VALIDATORS = [
# {
# 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
# },
# ]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
| [
"soner@arslanyapi.com.tr"
] | soner@arslanyapi.com.tr |
186088d2e2d202f72cb654fef5b82afc371e8bb8 | 1643b8c9dfec62cb41e1172a8602ffc8d510b01b | /00-学习/1.0-类型-常量-输出-分支.py | 25e68268c74f7d42d92296498c68941b5ecf6bbf | [] | no_license | yangyiko/helloworld | 56b922f0b26342f37e5c4dee1c002c31940fb02b | 40da8a23b3de304a092574a2856d26d3920b176b | refs/heads/master | 2020-04-16T18:53:44.712759 | 2019-01-15T11:40:22 | 2019-01-15T11:40:22 | 165,839,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,278 | py | """注释"""
#===============================================================================
# """注释"""
#===============================================================================
# 特殊注释
# encoding=utf-8
# coding=utf-8
# _*_ coding:utf-8 _*_
# _*_ coding:utf-8 _*_
# Python2.x 版本
from test.test_tools.test_unparse import elif1
from os import name
# 特殊注释
#!/usr/bin/python
#!/usr/bin/env python
# 单行注释 - 在此处写下下一行代码的含义
''' 多行行首添加#
注释信息
'''
"""
注释信息
"""
"""变量 比较运算"""
print ("========================================变量 比较运算====================================")
#===============================================================================
# """变量"""
# 比较运算 和 js || 很像
# 非布尔类型的值, 如果作为真假来判定, 一般都是非零即真, 非空即真
# 整个逻辑表达式的结果不一定只是True和False
#===============================================================================
a = [1];b = [1];c, d = 1, 1;e = f = 3;
print ("值比较="+ str(a == b))
print ("类型比较="+ str(a is b))
print ("id取值="+ str(id(d)))
#链式运算
print ('链式运算='+ str(c < 2 < f))
# not 非, 取反, 真 -> 假; 假 -> 真
b = False
#0是false 非0是true
#and 指最后一个真才是真 ,or是第一真就 是真了,和js很像
print ('false取反='+ str(not b))
print ('0 and True='+ str(0 and True))
print ('1 and 3='+ str(1 and 3))
print ('0 or False or 6='+ str(0 or False or 6))
"""输入 输出"""
#===============================================================================
#输入 输出
#raw_input 相当于是输入
#input 相当于是eval
#
#===============================================================================
print ("========================python2================输出====================================")
# 格式化输出
name = 'sz'
age = 18
# 我的名字是xxx, 年龄是xxx
print("我的名字是%s, 年龄是%d"%(name, age))
print("我的名字是{0}, 年龄是{1}".format(name, age))
# python2输出到文件中
# print >>open("test.txt", "w"), "12345"
#f = open("test.txt", "w")
import sys
# python3输出到文件中
f = open("test.txt", "w") #w只能写
print("xxxxxxxx", file=sys.stdout)
# 输出不自动换行 默认是 \n
print("abc", end="")
print("---abc", end="我是分隔符11111111111111111")
print(list("abc"))
# flush 参数的说明
# print("请输入账号", end="", flush=True)
# 休眠5s
from time import sleep
# sleep(5)
"""分支控制"""
#===============================================================================
#分支控制
#===============================================================================
print ("===================================分支控制====================================")
# age = input("请输入年龄")
# age = int(age)
age = 6
if 0<age<5:
print ('if else 控制='+'幼儿')
elif 5<age<10 :
print ('if else 控制='+'少年')
"""循环控制 pass"""
#===============================================================================
#循环控制
# for i in
# for range
# pass
# exit break时不会执行else
#===============================================================================
print ("===================================循环控制====================================")
# 遍历一个集合
# 字符串, 列表
notice = "社会我顺哥, 人狠话不多";pets = ["小花", "小黑", "小黄", "小红"]
for c in notice:
# print(c)
pass
else:
pass
# print("for else="+"循环完毕,如果使用break的话将不执行 else里面的内容")
for num in range(1,5):
if num%2==0:
print("for range1-5 偶数学习="+ str(num))
# while True :
"""数据类型"""
#===============================================================================
#数据类型
# int float
# oct
#str
#===============================================================================
print ("===================================数据类型====================================")
print("complex(1, 2)="+str(complex(1, 2)))
a =0b1111
if a is str:
print("0b1111是str="+str(type(a)))
else:
print("0b1111是int="+str(type(a)))
name="123456"
print("string方法="+name[1:3]) | [
"yangyiko@163.com"
] | yangyiko@163.com |
72d7de871b2fb085d76442aa9a24ad3405cd961b | 444a9480bce2035565332d4d4654244c0b5cd47b | /research/cv/centerface/preprocess.py | a985c6b64428994c27265c5fcd6ff413bee6b92a | [
"MIT",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] | permissive | mindspore-ai/models | 7ede9c6454e77e995e674628204e1c6e76bd7b27 | eab643f51336dbf7d711f02d27e6516e5affee59 | refs/heads/master | 2023-07-20T01:49:34.614616 | 2023-07-17T11:43:18 | 2023-07-17T11:43:18 | 417,393,380 | 301 | 92 | Apache-2.0 | 2023-05-17T11:22:28 | 2021-10-15T06:38:37 | Python | UTF-8 | Python | false | false | 2,585 | py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""pre process for 310 inference"""
import os
import shutil
import cv2
import numpy as np
from src.model_utils.config import config
from dependency.centernet.src.lib.detectors.base_detector import CenterFaceDetector
def preprocess(dataset_path, preprocess_path):
event_list = os.listdir(dataset_path)
input_path = os.path.join(preprocess_path, "input")
meta_path = os.path.join(preprocess_path, "meta/meta")
if not os.path.exists(input_path):
os.makedirs(os.path.join(preprocess_path, "input"))
if not os.path.exists(meta_path):
os.makedirs(os.path.join(preprocess_path, "meta/meta"))
detector = CenterFaceDetector(config, None)
name_list = []
meta_list = []
i = 0
for _, event in enumerate(event_list):
file_list_item = os.listdir(os.path.join(dataset_path, event))
im_dir = event
for _, file in enumerate(file_list_item):
im_name = file.split('.')[0]
zip_name = '%s/%s' % (im_dir, file)
img_path = os.path.join(dataset_path, zip_name)
image = cv2.imread(img_path)
for scale in config.test_scales:
_, meta = detector.pre_process(image, scale)
img_file_path = os.path.join(input_path, file)
shutil.copyfile(img_path, img_file_path)
meta_file_path = os.path.join(preprocess_path + "/meta/meta", im_name + ".txt")
with open(meta_file_path, 'w+') as f:
f.write(str(meta))
name_list.append(im_name)
meta_list.append(meta)
i += 1
print(f"preprocess: no.[{i}], img_name:{im_name}")
np.save(os.path.join(preprocess_path + "/meta", "name_list.npy"), np.array(name_list))
np.save(os.path.join(preprocess_path + "/meta", "meta_list.npy"), np.array(meta_list))
if __name__ == '__main__':
preprocess(config.dataset_path, config.preprocess_path)
| [
"chenhaozhe1@huawei.com"
] | chenhaozhe1@huawei.com |
fa3dc1be669911bfd6a1f491293bb3b01b26b80d | 288a4eaad55d56f86d61d8354e4d35ca98b75308 | /Python /is_number_a_prime.py | a36ab6e29bd6d218fd640f74ae2dc840edae11d4 | [] | no_license | KaniahDunn/codewars-solutions | 003a9e40654203bf2bbf89a33164a7b6057de1e8 | 141cc2c9dcdba04641528208e3df20d50abef1a7 | refs/heads/master | 2021-06-25T23:25:47.658294 | 2021-03-16T01:24:50 | 2021-03-16T01:24:50 | 217,607,552 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,005 | py | """
IS NUMBER A PRIME ?
Define a function that takes one integer argument and returns logical value true or false depending on if the integer is a prime.
Per Wikipedia, a prime number (or a prime) is a natural number greater than 1 that has no positive divisors other than 1 and itself.
Requirements
You can assume you will be given an integer input.
You can not assume that the integer will be only positive. You may be given negative numbers as well (or 0).
NOTE on performance: There are no fancy optimizations required, but still the most trivial solutions might time out. Numbers go up to 2^31 (or similar, depends on language version). Looping all the way up to n, or n/2, will be too slow.
Example
is_prime(1) /* false */
is_prime(2) /* true */
is_prime(-1) /* false */
"""
def is_prime(num):
if num > 1:
for i in range(2, num):
if (num % i) == 0:
return False
break
else:
return True
else:
return False | [
"kaniahdunn@gmail.com"
] | kaniahdunn@gmail.com |
4eaa794568039ff5217f0ae9c40798be17b54adb | 2eecc6419b28b5f5b1d84f0fd04861e7de05027d | /read_files.py | 1be2b02e96cb572e659e749ed22a5097f3e7bae5 | [] | no_license | priyanka36/RetinalDiseaseClassification | 61f49b16c9bc447c2454f1969e6830610ee6f5e0 | 3cf5932dc0dcec1632f90a7798215fee5df0ba2c | refs/heads/main | 2023-06-26T19:29:05.365292 | 2021-08-01T00:15:30 | 2021-08-01T00:15:30 | 390,621,647 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,762 | py | import os
import numpy as np
import cv2
from glob import glob
from tqdm import tqdm
import imageio
from albumentations import HorizontalFlip,VerticalFlip,ElasticTransform,GridDistortion,OpticalDistortion,CoarseDropout
def create_dir(path):
if not os.path.exists(path):
os.makedirs(path)
def load_data(path):
""" X = images and Y = masks"""
train_X = sorted(glob(os.path.join(path,"training","images","*.tif")))
train_Y = sorted(glob(os.path.join(path,"training","1st_manual","*.gif")))
test_x = sorted(glob(os.path.join(path,"test","images","*.tif")))
test_y = sorted(glob(os.path.join(path,"test","1st_manual","*.gif")))
return(train_X,train_Y),(test_x,test_y)
def augment_data(images,masks,save_path,augment=False):
H= 512
W= 512
for idx,(x,y) in tqdm(enumerate(zip(images,masks)),total = len(images)):
""" Extracting names"""
print(x,y)
names = str(x.split("/")[-1].split(".")[0])
extension = ".jpg"
x = cv2.imread(x,cv2.IMREAD_COLOR)
y = imageio.mimread(y)[0]
print(type(y))
y = np.asarray(y)
print(x.shape,y.shape)
if augment == True:
aug = HorizontalFlip(p=1.0)
augmented = aug(image=x,mask=y)
x1 = augmented["image"]
y1 = augmented["mask"]
aug = VerticalFlip(p=1.0)
augmented = aug(image=x,mask=y)
x2 = augmented["image"]
y2 = augmented["mask"]
aug = VerticalFlip(p=1.0)
augmented = aug(image=x,mask=y)
x3 = augmented["image"]
y3 = augmented["mask"]
aug = OpticalDistortion(p=1.0)
augmented = aug(image=x,mask=y)
x4 = augmented["image"]
y4 = augmented["mask"]
X = [x,x1,x2,x3,x4]
Y = [y,y1,y2,y3,y4]
print(len(X))
print(len(Y))
else:
X=[x]
Y=[y]
index = 0
for i,m in zip(X,Y):
i = cv2.resize(i,(W,H))
m = cv2.resize(m,(W,H))
if len(X) == 1:
tmp_image_name = f"{names}.jpg"
tmp_mask_name = f"{names}.jpg"
else:
tmp_image_name= f"{names}_{index}.jpg"
tmp_mask_name= f"{names}_{index}.jpg"
image_path = os.path.join(save_path,"images",tmp_image_name)
test_or_train = str(image_path.split("/")[1])
mask_path = os.path.join(save_path,"mask",tmp_mask_name)
if test_or_train == "test":
cv2.imwrite(f"newdata/{test_or_train}/images/{tmp_image_name}",i)
cv2.imwrite(f"newdata/{test_or_train}/mask/{tmp_mask_name}",m)
elif test_or_train == "train" :
cv2.imwrite(f"newdata/{test_or_train}/images/{tmp_image_name}",i)
cv2.imwrite(f"newdata/{test_or_train}/mask/{tmp_mask_name}",m)
index += 1
if __name__ == "__main__":
"""Seeding"""
np.random.seed(42)
"""Load the data"""
data_path="/home/priyanka/RetinaBloodVessel/dataset"
(train_X,train_Y),(test_x,test_y)=load_data(data_path)
create_dir("newdata/train/images")
create_dir("newdata/train/mask")
create_dir("newdata/test/images")
create_dir("newdata/test/mask")
augment_data(test_x, test_y, f"new_data/test/", augment=False)
augment_data(train_X,train_Y,f"new_data/train/",augment=True)
| [
"poudelnipriyanka@gmail.com"
] | poudelnipriyanka@gmail.com |
701bd093bcd667f743047b4f0a4a5aac7dc88001 | 9b5d7dde000f5619ff4a752fc453f8ed5b2618e0 | /.history/main_20210924153614.py | d47094295983741c4ba3c32169bb49e2b9386307 | [] | no_license | Luyzr/EE7207_A1 | 4a2031221c5264ef70da516e370fbd9d6a068616 | 1c8592a0f8c7629848e494dd9dda8d14421a26a4 | refs/heads/master | 2023-08-14T10:10:51.113766 | 2021-09-29T10:58:00 | 2021-09-29T10:58:00 | 411,636,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,831 | py | import numpy as np
import math
import scipy.io
from sklearn.model_selection import KFold
# data_train.shape => (330, 33)
# label_train.shape => (330, 1)
# data_test.shape => (21, 33)
def Gaussian(x, c, t):
return math.exp(-1 * np.sum(np.square(x - c))/(2 * t**2))
def EstimateC(data, label, pn=30, pretrained=False):
print('Getting center vector...')
if pretrained:
return np.load('{}_center_vector.npy'.format(pn))
n, d = data.shape
e = np.zeros(n)
candi = [i for i in range(0, 330)]
for i in range(0, n):
c = data[i]
o, w = EstimateOW(data, c, 0.707, label)
f = np.dot(o, w)
e[i] = 1/2 * np.sum(np.square(f - label))
first = np.argmin(e)
err = e[first]
old_err = np.Inf
c = data[first].reshape((1,-1))
candi.pop(first)
m = 1
# print('round:{} error:{:.2f}\n'.format(m, err))
while m < pn and err <= old_err and np.abs(err - old_err) > 0.15:
m += 1
old_err = err
e = np.Inf * np.ones(n)
for k in range(0, n - m):
i = candi[k]
nc = np.concatenate((c, data[i].reshape(1,-1)), axis=0)
t = EstimateT(nc, m)
o, w = EstimateOW(data, nc, t, label)
f = np.dot(o, w)
e[i] = 1/2 * np.sum(np.square(f - label))
first = np.argmin(e)
err = e[first]
c = np.concatenate((c, data[first].reshape(1,-1)), axis=0)
candi.pop(candi.index(first))
# print('round:{} error:{:.2f}\n'.format(m, err))
print('Number of center vector:{}, saving'.format(m))
np.save('{}_center_vector.npy'.format(m), c)
return c
def EstimateT(c, m):
# Estimate the parameter of Gaussian
dis = [0]*m
for i in range(m):
for j in range(i, m):
dis[j] = max(dis[j], np.sqrt(np.sum(np.square(c[i] - c[j]))))
t = max(dis)/np.sqrt(2*m)
return t
def getO(data, c, t):
m = c.shape[0]
n, d = data.shape
o = [[0]*m for i in range(n)]
for i in range(n):
for j in range(m):
o[i][j] = Gaussian(data[i], c[j], t)
o = np.array(o).reshape(n, m)
return o
def EstimateOW(data, c, t, label):
# Estimate W
n, d = data.shape
m = c.shape[0]
o = getO(data, c, t)
w = np.dot(np.dot(np.linalg.pinv((np.dot(o.T,o))),o.T),np.array(label))
return o, w
def LinearRBF(data, label, pn, pretrained=False):
c = EstimateC(data, label, pn=pn, pretrained=pretrained)
m, _ = c.shape
t = EstimateT(c, m)
o, w = EstimateOW(data, c, t, label)
return c, w, t
def Dataloader():
train_data = scipy.io.loadmat('data_train.mat')['data_train']
train_label = scipy.io.loadmat('label_train.mat')['label_train']
test_data = scipy.io.loadmat('data_test.mat')['data_test']
return train_data, train_label, test_data
def Train(data_train, label_train, pn=4, pretrained=False):
c, w, t = LinearRBF(data_train, label_train, pn=pn , pretrained=pretrained)
m, d = c.shape
o = getO(data_train, c, t)
f = np.dot(o, w)
label_train = np.heaviside(label_train, 0.5)
f = np.heaviside(f, 0.5)
err = 0
n, _ = label_train.shape
for i in range(0, n):
if label_train[i] != f[i]:
err += 1
print('Train accuracy is {:.2f}%'.format(100 * (1 - err/n)))
return c, w, t
def Evaluate(data_test, label_test, c, w, t, mode='t'):
o = getO(data_test, c, t)
f = np.dot(o, w)
f = np.heaviside(f, 0.5)
err = 0
if mode == 't':
label_test = np.heaviside(label_test, 0.5)
print('Truth is {}'.format(label_test.reshape(1, -1)))
print('Result is {}'.format(f.reshape(1,-1)))
n, _ = label_test.shape
for i in range(0, n):
if label_test[i] != f[i]:
err += 1
print('Test accuracy is {:.2f}%'.format(100 * (1 - err/n)))
return 1 - err/n
if mode == 'e':
print('Result is {}'.format(f.reshape(1,-1)))
return
if __name__ == "__main__":
'''
参数都是一脉相承的
pn 表示设定的CV的数量
m 是计算过程中实际用到的CV的数量
c 是CV
w 是权重
t 是高斯参数
'''
print('Loading data')
train_data, train_label, test_data = Dataloader()
print('1:{}'.format(train_label.count(1)))
# 调参用这个
getCV = False
pn = 2
# # 得结果用这个(你得先调过参才行)
# getCV = True
# pn = 15
kf = KFold(5, shuffle=True, random_state=42)
rr = 1
if getCV:
best_pn = 0
best_score = 0
for pn in range(2, 20):
scores = []
rr = 1
for train_index, test_index in kf.split(train_data):
print('========================== The {}th experiment with pn={} =========================='.format(rr, pn))
rr += 1
data_train, label_train = train_data[train_index], train_label[train_index]
data_test, label_test = train_data[test_index], train_label[test_index]
print('Start Training...')
c, w, t = Train(data_train, label_train, pn, pretrained=False)
print('Start Evaluating..')
score = Evaluate(data_test, label_test, c, w, t, mode='t')
scores.append(score)
mean_score = np.mean(np.array(scores))
print('The mean score with pn={} is {}\n'.format(pn, mean_score))
if mean_score > best_score:
best_pn = pn
best_score = mean_score
print('The best pn is {}, with the best score: {}'.format(best_pn, best_score))
else:
c, w, t = Train(train_data, train_label, pn, pretrained=True)
print('pn is: {}; t is: {:.4f}'.format(pn, t))
Evaluate(test_data, None, c, w, t, mode='e') | [
"gentlelu@outlook.com"
] | gentlelu@outlook.com |
91489aef1cfcb6675882a5ed78249f485727af5a | 975b2d421d3661e6770b601929d5f11d981d8985 | /msgraph/generated/models/access_package_request_state.py | 8d2207eae29267f561c8c719c8dc968d0f903cb0 | [
"MIT"
] | permissive | microsoftgraph/msgraph-sdk-python | a7c551b85daadeebf76ec4ae12668664ea639b42 | 27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949 | refs/heads/main | 2023-09-03T21:45:27.989672 | 2023-08-31T06:22:18 | 2023-08-31T06:22:18 | 534,665,999 | 135 | 18 | MIT | 2023-09-14T11:04:11 | 2022-09-09T14:00:17 | Python | UTF-8 | Python | false | false | 410 | py | from enum import Enum
class AccessPackageRequestState(str, Enum):
Submitted = "submitted",
PendingApproval = "pendingApproval",
Delivering = "delivering",
Delivered = "delivered",
DeliveryFailed = "deliveryFailed",
Denied = "denied",
Scheduled = "scheduled",
Canceled = "canceled",
PartiallyDelivered = "partiallyDelivered",
UnknownFutureValue = "unknownFutureValue",
| [
"GraphTooling@service.microsoft.com"
] | GraphTooling@service.microsoft.com |
071f4455fe0d1f60f156ab41e827e9bf8ac15021 | 3fe0db879006076936376ef1b8b573f300e4a9f8 | /client/gui/resources_rc.py | e3f8f80c0ebffd774d80e97f838f0ab2558ad24c | [
"MIT"
] | permissive | Laikos38/ADS-TP-Seguridad | a3e6eef37a208341481b8781528acf77acb4bec8 | d6cf0403a6c2e4eaed3adc344c28b3e82354d001 | refs/heads/master | 2023-05-27T11:27:10.789356 | 2021-06-04T21:42:03 | 2021-06-04T21:42:03 | 362,307,644 | 0 | 0 | MIT | 2021-06-04T19:38:57 | 2021-04-28T02:00:26 | Python | UTF-8 | Python | false | false | 24,439 | py | # -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.15.2)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x12\xc6\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x01\x2c\x00\x00\x00\x32\x08\x06\x00\x00\x00\xe6\xd6\xe6\x2a\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x2e\x23\x00\x00\x2e\x23\
\x01\x78\xa5\x3f\x76\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x12\x53\x49\x44\
\x41\x54\x78\x9c\xed\x9d\x7b\x74\xdc\x65\x99\xc7\x3f\xcf\x3b\x33\
\xb9\x94\xa6\xdc\x5a\x4a\xa9\xbb\x5b\xd0\x52\x2d\xa2\xd4\x69\x93\
\x19\x14\x88\x36\xbd\x24\x99\x49\x10\x28\x88\x2e\xc7\xf5\xac\xc8\
\x22\x78\x41\x8e\xca\xea\xf1\x72\x16\x5d\x57\x59\x77\x61\x05\xef\
\x17\xf4\x80\x16\x2a\xd0\x26\x93\xa4\x37\x30\x2a\x65\x66\x92\x86\
\x0a\x85\x4a\x01\xb5\x8a\x48\x29\x68\x4d\xaf\xb9\xcc\xbc\xcf\xfe\
\x91\xdb\x64\x32\x93\xcc\x64\x66\x32\x49\xf9\x7d\xce\xc9\x39\xfd\
\xfd\xde\xdb\x77\xa6\xf3\x7b\x7e\xef\xe5\x79\x9f\x17\x1c\x1c\x1c\
\x1c\x1c\x1c\x1c\x1c\x1c\xf2\x8b\x0c\xfd\xc3\xbf\x4e\xcb\xb5\x87\
\x1f\x01\x6f\x2c\xa2\x9e\x7c\xb0\x17\xe5\xc1\x48\x48\xee\x2b\xb6\
\x10\x07\x07\x87\xfc\x22\x89\x17\x2b\xea\xf4\x4c\xb7\x8b\x6d\x08\
\x6f\x2e\x96\xa0\xc9\xa2\xf0\xaa\x28\x5f\x8e\xc3\x26\x03\x5f\x89\
\x34\xcb\x7b\x8a\xad\xc9\xc1\xc1\x21\xbf\x98\xc4\x8b\xce\x56\xd9\
\x1f\x8b\xb3\x4a\xe1\xc9\x62\x09\xca\x1a\xa5\x47\x95\x6f\xf4\xf6\
\xb3\x34\xdc\x2c\xb7\xbb\x84\x2f\x02\x73\x8a\x2d\xcb\xc1\xc1\x21\
\xff\x98\xe4\x1b\x9d\xad\xb2\x5f\x61\xb5\x2a\x4f\x14\x43\x50\xa6\
\xa8\xa2\x40\x8b\x35\x78\x23\xcd\xf2\xd1\x5d\x6d\xf2\x4a\x55\x50\
\x7d\xc0\xfb\x8a\xad\xcd\xc1\xc1\xa1\x30\xb8\x53\xdd\x8c\x36\xc9\
\xcb\xcb\x6a\x75\x55\x99\x87\xad\xc0\x05\x69\xca\xc6\x14\x6c\xe1\
\xa4\x8d\xcb\x33\x62\xf9\x54\xb8\x45\xb6\x8c\xdc\x52\x97\x08\xb7\
\x03\xae\x22\x69\x72\x70\x70\x28\x30\x29\x0d\x16\xc0\xae\x36\x79\
\x65\x59\xad\xae\x2e\x75\xb3\x59\x84\xb7\x25\xa7\xab\xf2\x04\x2e\
\xae\xb0\x87\x39\x5c\x58\x89\x63\xe9\x88\xd3\x4d\xbb\xc4\x12\xef\
\xf9\x82\x5c\x83\x52\x39\x7a\x56\xce\xc1\xc1\xe1\x44\x62\xc2\xc7\
\xdb\x1b\xd0\xb9\x25\x86\x36\x60\x79\x72\x9a\x2a\xbf\xd4\x18\x0d\
\xd1\x36\x39\x54\x10\x75\x19\xe2\xad\xd1\x93\x3d\xe5\x3c\x2d\xc2\
\xc2\x41\x5d\x6d\x91\x66\xa9\x2b\xa6\x26\x07\x07\x87\xfc\x33\x66\
\x0e\x2b\x99\xae\x90\xbc\x1a\x3f\xca\x5a\x94\xce\xe4\x34\x11\x2e\
\x31\x1e\x42\xde\x1a\x3d\xb9\x30\xf2\x32\xc3\x53\xc6\x67\x86\x8c\
\x95\x83\x83\xc3\x89\xcb\x84\x06\x0b\xa0\xe3\x61\xf9\xeb\x11\xcb\
\x1a\x55\x3a\x52\x24\x5f\x54\x52\x4e\xdb\x05\x97\xea\x29\x79\xd6\
\x96\x11\x2b\x56\xeb\x3f\x88\xe1\x23\xc5\x68\xdb\xc1\xc1\x61\x6a\
\xc9\xc8\x60\x01\xec\x6e\x91\x83\x6e\xcb\x5a\x85\xc8\x98\x44\xc1\
\x5f\x66\x69\x3b\xbf\x5e\x4f\xcd\xab\xba\x0c\x10\x37\xf3\x81\xf2\
\xa9\x6e\xd7\xc1\xc1\x61\xea\xc9\xd8\x60\x01\x3c\xda\x22\x07\x7b\
\x0c\xb5\xc0\x63\xc9\x69\x02\xbe\xd9\x86\x2d\x95\xef\xd6\xd3\xf3\
\xa6\xce\xc1\xc1\xc1\x21\x81\xb4\xab\x84\xe9\xf8\xcd\x46\xf9\xbb\
\xb7\x46\xeb\x3c\xe5\x84\x44\x78\xc7\xa8\x44\x61\x85\x89\xb1\x65\
\x59\xad\xd6\xee\x6a\x93\x57\xf2\xa6\x72\x8a\x58\x51\xa7\x17\x18\
\x17\x17\x0e\x5d\xab\xf2\x97\x8e\x90\x6c\xcc\xb4\xfc\xa2\x6a\x2d\
\x5b\x50\xc1\xfb\x6d\x82\x6b\x45\xec\x38\xf7\x76\x6d\x97\xee\xc1\
\x1a\xc5\x1f\xe4\x43\x36\x17\xd7\x0b\xe1\xb7\xd1\x26\xf9\x45\xba\
\xe4\xaa\x7a\x7d\x27\x86\x37\x65\x55\xa7\x25\x1a\x6d\x91\xae\x54\
\x49\x95\x01\xbd\xd4\x58\x5e\x8a\xb4\x4a\x34\x4b\xa5\xf8\x03\x5a\
\x89\xe2\x0e\xb7\xc8\xa8\x17\x9c\xaf\x5e\x6b\xd4\x70\x6e\xb6\xf5\
\x8d\x22\x85\xe6\xaa\x06\x5d\x85\xb2\x38\x9b\x6a\x54\xd9\xd1\x11\
\x92\x94\x3e\x87\x55\x41\xbd\x1c\x98\x3f\x74\x2d\x82\xaa\x12\x17\
\xa1\xd7\x5a\xba\xd5\xb2\xcf\x7d\x90\xbd\xe1\xb0\x1c\x9f\xa8\x9d\
\xaa\x46\x5d\x8a\xa5\x3a\xf1\x9e\x81\xb8\x2a\x71\x2b\x1c\x35\x86\
\x57\x4c\x3f\xcf\x3d\xd6\xca\x0b\x20\xe3\xba\x0b\x55\x06\xf5\x6c\
\x81\xda\x54\x75\xa9\xe1\x88\x2a\x07\xfa\x85\x67\x76\x35\xf1\x12\
\x88\x8e\xaf\x4c\xc5\x17\xe4\xfd\x65\x87\x59\xdf\xde\x2e\x3d\xe3\
\xe5\xf4\xaf\xd3\x72\x7a\x09\xa0\xd4\x28\x2c\x11\xe1\x64\x55\xac\
\xc0\x41\x85\x3f\x20\x3c\x1e\x57\x7e\xd5\xd9\xcc\x6f\x53\x7d\x86\
\xca\x3a\x3d\x57\x5c\xd4\x8c\xaf\x67\x34\x22\xfc\x39\xd2\x24\x4d\
\x43\xd7\x59\x1b\x2c\x80\xae\xed\xd2\x5d\x55\xab\xf5\xe2\x21\x04\
\x5c\x94\xd4\x80\xb7\xcc\xc3\x96\x65\xb5\xba\x66\xc6\x19\x2d\x17\
\x1e\x81\x3b\x45\x06\x57\x4f\x85\xbf\xbf\xbd\x41\x1f\xde\xd1\x24\
\x19\xb9\x6e\x9c\x39\x9b\x55\x0a\xdf\x32\x43\xe5\x95\x3f\x56\xc4\
\xf8\x6e\x42\x16\x03\xdc\x6e\x84\xb2\x1c\x54\xfe\x10\x48\x6b\xb0\
\x8c\xf0\xcf\x2a\x9c\x8d\xf2\xdb\x4c\x2b\x14\x17\xbf\x1f\xa7\xbe\
\x8f\x8b\x9b\x73\xab\x1a\x74\x59\xb4\x49\x5e\xce\x46\xa8\x1a\x82\
\x58\xca\x49\xee\x91\x1b\x5e\x27\x70\x5e\x5a\x3d\x70\x31\xc2\x41\
\x55\x76\x67\xa3\xd9\x28\x1f\x40\x58\xa8\xca\x53\x99\x6a\x34\xb0\
\x77\x1c\x1d\x9f\x02\x5e\x00\x06\x3e\xb7\x82\x01\x97\x2a\xa5\x2e\
\xc3\xa9\x18\xce\x61\x1e\x67\xf8\x1b\xb4\x45\xfb\xb8\x35\xb2\x59\
\xf6\xa5\x6d\xc7\xf2\x0e\x85\x1b\x81\x87\x86\xee\x29\xb8\x10\x4a\
\x0c\xcc\x41\x59\x68\xdd\x2c\xf1\x05\xe9\x51\xd5\xbb\xa2\x47\xf8\
\x4e\xb2\xeb\x4e\x82\xae\xb7\x18\xe1\xa3\xaa\x3c\x3c\x5c\x97\x60\
\x10\x5c\x02\x15\x46\x58\xe8\x82\x25\xbe\x20\x2f\xa0\xfa\xbf\x91\
\x10\x3f\x4d\x6b\xb8\xaa\x71\x01\x5f\xe9\x2e\xa7\x05\x48\x6b\xb0\
\xfc\x0d\xba\x56\x7b\xf8\x26\xf0\x0c\xc2\x43\xc0\x4f\xe2\x96\x23\
\xc6\x30\x5b\x2d\x8b\xc4\x70\x1e\x70\x85\x5b\xf8\x9a\x3f\xc0\x8a\
\x70\x88\x67\xc6\xe8\x76\x53\x69\xe0\x93\xaa\xb4\xa6\x6b\x67\x0c\
\x76\xb4\x8d\x9a\x94\xc1\x02\x88\xb6\xc9\xa1\xaa\x5a\x0d\x18\x37\
\x4d\x08\x97\x24\x25\x2f\x2b\x75\xb3\xad\xaa\x41\xd7\x64\xfb\x23\
\x2f\x26\x9d\xcd\x3c\xee\x0b\xf2\x0c\x0c\xf7\x50\x4e\x89\x2b\xb5\
\xc0\xfd\x99\x94\x17\xe1\x6a\x24\xc1\x55\x44\xb8\xbf\x3d\xcd\x8f\
\xae\x90\x88\x72\x6f\xb8\x59\x7e\x90\xb7\x0a\x95\x2e\x81\xbb\x40\
\xd7\x4d\xfc\xc6\x4e\xd0\x61\xd1\x54\x99\x23\xcd\x72\x37\x70\x77\
\xba\x72\xbe\xa0\xde\x25\xc2\x9e\x48\x93\xdc\x95\xbd\x54\xd6\x47\
\x9a\xe5\x5b\xd9\x96\x4b\x4b\x9c\xdb\xc6\xeb\x5d\xfa\x03\xba\x10\
\xc3\xe7\x29\x61\xa7\xaf\x51\xeb\x23\x9b\xc6\xed\x89\xee\x8e\x34\
\xcb\x67\xd3\x27\xab\x59\x11\xc4\xeb\x36\xfc\xb7\xbf\x82\x9a\x3e\
\xaf\x5e\xd9\xd5\x25\xfd\xa9\xb3\xd2\x11\x69\x96\x1b\xd2\xd5\xf4\
\x86\x5a\x2d\x3d\xdd\xf0\x6e\xe3\xe2\x0e\x7f\x03\xef\x0a\x37\xe9\
\xb5\x13\xf5\xdc\xd2\x51\x15\xd0\x3a\x94\xbb\xc5\x72\xcd\x68\x67\
\xed\x61\x76\x0c\xe7\xad\xd5\x39\xd1\xb6\x71\x7c\x33\x95\xa7\xc6\
\xd3\x3d\x11\x59\xcd\x61\x25\x13\x6d\x93\x43\x7f\x13\x82\xc0\x23\
\xc9\x69\x22\xbc\xd5\xc0\x36\xef\x1a\x5d\x90\x4b\x1b\x53\x8b\xc4\
\x05\xd6\x8f\xba\x23\x64\xb4\x89\xda\x5b\xa3\x27\xab\x30\xec\xfb\
\x35\xb8\x0b\xe0\x67\x79\x16\x58\x14\xe2\x96\x3b\x05\xe6\xfa\x03\
\x5c\x93\x55\x41\x21\x63\xe3\x36\x53\x09\x87\xe4\xc5\x70\x93\x5c\
\x07\xfc\x18\xe5\x67\xfe\x75\x9a\xc3\x02\x90\xd8\xce\x66\xe9\xec\
\x2e\x65\x15\xc2\x69\x9e\xb3\xf8\xc4\x64\x6b\x7a\xbe\x4d\x7a\xa3\
\x2d\xb2\x1e\xa1\x46\x95\xab\xfc\x41\x3e\x30\x99\x7a\x16\x55\x6b\
\x99\x31\xdc\x65\xe1\xfa\x34\xc6\x6a\x14\x03\x3e\x99\x99\xbf\xd4\
\xb2\x25\x27\x83\x05\xb0\xb7\x49\x0e\x77\x1f\xa2\x91\x84\xee\x69\
\x02\xe7\x97\x94\xb2\xed\xed\x0d\x7a\x56\xae\xed\x4c\x15\xfd\xc2\
\x7d\xc0\x70\xaf\x48\x95\xd5\xfe\x35\x7a\xda\x44\xe5\x5c\xe5\xd4\
\x09\x0c\xfb\xa3\x09\x3c\x1d\x6e\x4a\xde\x8f\x29\xf1\x58\x2f\xe7\
\xf6\x59\xfe\x29\xf9\x4f\xe1\xc1\xc4\x9c\x1a\xa7\x31\x55\x3e\x7a\
\xf9\x64\xce\x1f\x32\x4b\xc4\x85\x8d\xc7\xf9\x10\xc2\x7f\x78\x03\
\xfa\x8f\x99\x96\x2b\xd6\xbe\xad\x62\xe0\x82\x2f\x0a\xcc\xb5\xc7\
\xa9\xcf\xb5\xae\x3d\x1b\xa4\x4f\xe1\x33\x08\xd7\x51\xad\x93\x1e\
\x05\x01\x84\x37\xc9\x6e\x11\xee\x56\xf8\x30\x68\xd6\xfb\x40\xe6\
\xcf\xe1\x22\x55\x8e\x47\x9b\x47\x86\xb2\xc5\x24\xa7\x2f\x63\x88\
\x3d\xed\x72\xe4\x2d\xab\xb5\x71\x56\x19\x1b\x85\x31\x93\x6a\xe7\
\x59\xe5\x61\x7f\x40\x6b\xc2\x21\x79\x31\x1f\xed\x15\x92\x9d\x4d\
\xb2\xd7\x17\xd4\xc7\x45\xa8\x04\x10\xe1\x24\x2d\x25\x08\xfc\x78\
\xbc\x72\x06\xae\x4e\xbc\x56\xe5\xbe\x54\x5d\xf0\xce\xad\xf2\x42\
\xaa\xf2\xbe\xa0\x1e\x4b\xdc\x77\x20\xc2\xfe\xae\x90\xfc\x69\x12\
\x1f\x21\xef\x68\x1c\xd3\xd1\x2a\xcf\xfa\x82\xfa\xf5\x12\xe1\x7b\
\xa0\x6b\x33\x79\x8b\x8a\xa0\x27\x7e\x1f\x6b\x80\x1d\x4d\x72\xd8\
\x1f\xd4\xb0\x18\x56\x00\x3f\xcf\xb5\xbe\xfd\x87\xe8\x5a\x50\xc1\
\x29\xfe\xd9\xcc\x0f\x43\x4e\xcf\x8d\xc2\x56\x84\x6b\xbd\x01\xca\
\xbb\x42\x1c\xcb\xa6\xac\x28\x6f\x44\xe8\x9a\xec\x70\x32\xdf\xe4\
\xdc\xc3\x1a\xe2\xc9\xad\x72\xb4\xdf\xd2\x08\x8c\xed\x36\x0a\x6f\
\x54\xc3\x23\xd9\xbc\x9d\x8b\x8a\x72\x6f\xe2\xa5\x30\xfe\xb0\xd0\
\x1b\xd0\xb9\x02\xab\x12\xca\xc7\xfa\x63\xa3\x87\x96\x33\x19\x97\
\x6b\xc0\x94\x46\x9a\xb9\x0b\x70\xf9\x1b\xc8\x74\x0e\xe2\x35\x62\
\xae\x86\x79\x55\x94\xbc\xec\xfa\xd8\xd7\x4e\x9f\xc2\x31\x1b\xcf\
\xbd\x3e\x11\x5e\x15\x28\x71\x79\xb2\xf7\x57\x14\xc5\x8a\xe6\xcf\
\x4e\xe4\x4a\x5e\x85\x74\x85\xe4\x58\x9f\xe5\x32\x55\xda\x92\xd3\
\x04\xce\xf5\x08\xdb\x7d\x6b\x75\x51\x3e\xdb\x2c\x04\xfd\xfd\x6c\
\x50\x18\x9e\xec\x54\x58\xb9\xac\x56\xe7\xa5\xcb\xef\x81\x77\x93\
\xb8\xf2\x27\x74\x74\xb5\xc9\xef\x0a\x2c\x73\xca\xb0\x76\xe8\x77\
\x22\x36\x0e\xd7\x02\xb7\x2c\x6f\xd0\x25\x13\x17\x2c\xac\xae\x69\
\xc8\x5c\x2b\xfc\x2d\x1f\x15\x2d\xaa\xa6\x44\x60\x16\x90\xf3\x3e\
\x5d\x55\xe6\x02\xc7\x5d\xfb\xb3\xeb\x5d\x01\x58\xc3\xb3\x2a\x5c\
\x00\x3a\x2d\x8c\x56\xde\x45\x74\x85\xe4\x98\xbc\xca\xe5\xa9\x96\
\x2e\x45\x58\x2c\x25\x6c\xf7\x5d\x3a\xbd\x8d\x56\xd7\x16\x79\x49\
\x74\xc4\x75\x40\xc0\x53\xea\xe2\xf2\x74\xf9\xc5\x8c\xee\x81\x59\
\xcb\x4f\x0b\xa9\x6f\xaa\x31\x32\xf2\x3b\xe9\x68\x96\x3f\xa0\xdc\
\xea\x86\x1f\x78\xbd\xea\x19\xaf\x9c\xbc\x06\x26\xdd\x87\x58\x54\
\xad\x65\x08\x95\x9a\xb0\x62\x96\x0b\x67\xcc\x66\x09\x42\x4f\xec\
\x00\xf9\x58\x65\xbf\x44\xe1\xf1\x4c\xfc\xc5\x92\x79\xf9\x10\xbf\
\x06\xca\x7d\x0d\x59\x2e\xb8\x14\x88\x82\x58\xcd\x70\x58\x8e\xef\
\x3f\xcc\xe5\xaa\x34\xa7\x48\x7e\xbd\x58\x7e\xe1\xad\xd5\xd7\x17\
\xa2\xed\xbc\x91\x3c\x2c\x4c\xb3\x5a\xe8\x0f\xe8\x42\x94\x8b\x47\
\x8a\xd1\xd7\x17\xcf\xcc\x0d\x62\xa6\xa0\x3a\x3a\xaa\x47\xb8\x99\
\xef\xa1\x74\x7b\xce\x9a\x60\x01\xe0\x35\x64\xb0\xe6\xcf\xe1\x06\
\x94\x17\xe3\x2f\xb2\x35\x1f\xf5\xb9\x84\x9b\x55\xd9\x90\xd6\xad\
\x21\x43\x06\x57\xe9\xdf\x2f\x70\xc7\x64\xca\xef\x6b\x97\x1e\xab\
\x7c\x10\xb8\xc3\x17\xd4\x9b\x26\x7a\x49\x15\x9a\xbc\x4c\xba\xa7\
\x62\x5f\xbb\xf4\x50\xad\x57\x2e\xa8\x60\x3d\x42\x63\x52\xf2\xa2\
\x12\x0f\xdb\xab\x02\xba\x3a\x1a\x92\xe7\x0a\xa5\x21\x17\xfa\x7a\
\xd8\x54\x32\x8b\x23\xc0\x6c\x00\x15\x2e\xf4\x06\xf4\x1f\x93\x27\
\xc2\xad\xe1\x0a\x93\xf8\x3d\x2a\xdb\x8b\xed\x30\x6b\xa1\xd4\x1b\
\xd0\x59\x99\xe4\xfd\xeb\x11\xec\xbe\x09\x3c\x9c\xc7\x22\x36\x6e\
\xf4\xdf\xdc\x96\xf0\x8a\x3a\x6d\xed\x6c\x95\xdf\xa4\xd1\x31\xe5\
\xe1\xc9\x44\x0b\xfd\xd9\x93\x51\xa9\x6a\xe0\x2a\x03\x37\xc5\x5c\
\x04\x72\x35\x30\x8b\xaa\xb5\xec\xcc\x0a\x3e\x07\xbc\xbd\x37\x86\
\x2f\x97\xba\xaa\x6a\xf5\x75\xe2\xe1\xe7\xc0\x86\x70\xd3\xe4\x17\
\x02\x3a\x9a\xe5\x91\x15\xf5\x5a\xe7\x76\xf1\x5d\xcf\x59\xfc\x4b\
\xd5\x42\xbd\xed\xe5\x43\xfc\x7c\x32\xdf\x9d\x82\x2b\xd3\xff\x1f\
\x18\x18\xb1\x25\x5e\x17\xcc\x60\xc1\x80\xd1\x72\xd7\xea\x55\xf3\
\xdc\xac\x47\xb8\x34\x29\x79\x91\x31\x6c\xaf\xac\xd3\x55\x1d\xad\
\xf2\x6c\x21\x75\x4c\x86\xae\xed\xd2\xed\x0b\x6a\xab\x08\x57\xc2\
\xc0\xb0\xb0\x44\xb8\x0c\xb8\x3d\x31\x9f\x51\xae\x1a\xf5\x54\x6a\
\xf1\x87\x83\x46\xf8\x1f\x0f\x7c\x2d\x93\xbc\x0b\xe6\x10\xdd\x07\
\x2b\xc7\xcd\xa4\x63\xed\x4e\xe7\x26\x79\xa1\xaa\x41\x6f\x71\xbb\
\xf9\xe1\x1b\x6a\xd5\xff\x7c\x9b\xf4\x26\xe7\x91\x22\x4c\xba\xab\
\xf0\x55\x0f\x7c\x29\x93\xbc\x67\x56\xb0\x6b\x5f\xd2\x4e\x8d\x64\
\xc4\xf0\xfa\xe5\xf5\x7a\x74\xe8\xda\xe5\xc2\x18\x4b\x59\x5c\x99\
\x67\x0c\x6f\x16\x68\x44\x39\xd6\x2f\xac\xdc\xb9\x51\xd2\x7a\xcd\
\x03\x88\xe0\x19\x0e\xc5\x74\x90\x63\x15\x15\xe8\x61\x37\x27\x49\
\x29\xa7\x7b\x60\xb1\x1a\x2e\x46\xb8\x5a\xe0\xcf\xda\xc7\xca\x5d\
\x9b\xd3\xbf\xf8\x14\x4e\x59\x5e\xaf\xc3\x87\xc5\x94\x00\x56\x28\
\x11\xa1\xc2\x2a\xe7\x88\xe1\x12\x51\xde\xa9\x96\xaf\x47\x42\xdc\
\x99\xab\x6f\x54\x67\x8b\x3c\xb6\x74\xa9\xbe\xad\xe2\x0d\x5c\x26\
\xca\x75\x0b\xe6\xf0\xd5\x05\x41\xfd\x49\x5f\x8c\xef\x67\x33\x5f\
\x2b\xc2\x2a\x0f\x1c\xc8\x34\xff\xb2\x5a\x3d\x3b\xb1\x03\x50\x50\
\x83\x35\xe3\x11\xee\x85\x01\x83\x05\xa0\x03\xc3\xc2\x61\x83\x35\
\x38\xac\x5d\x31\x9c\x0e\xdd\x1a\x4f\x39\x0c\x9e\x5a\x94\x1b\x22\
\xf9\xf4\x74\x4f\x43\xb4\x49\xee\xf1\x37\x68\xe3\x5c\x37\x9f\x7f\
\x1e\xc6\x7a\x70\x17\xc7\xa9\xe1\xe3\x79\xf5\x74\x17\xfe\xdd\x63\
\x38\x98\x78\x4b\x05\xeb\x12\x16\xa9\xf0\x1c\x70\x43\xb8\x49\x76\
\x65\x58\x5b\xc0\x53\x3e\xe8\xa2\x50\x0e\xbd\x82\x96\x0c\x84\x1a\
\x3f\x08\xbc\x20\xca\xe3\x28\x1f\x08\x37\xcb\x2f\x33\xd0\x75\xa1\
\xc7\xc5\x37\x87\x35\x31\xd0\x9b\x55\x30\x46\x58\x6e\x2d\xd7\x1f\
\x3a\xc2\x8d\x7b\xda\xe5\x48\x86\xda\x26\x64\xcf\x1e\xe9\x63\x0f\
\xeb\x81\xf5\x55\x01\x5d\x8c\xe1\xda\x12\x0f\x8f\xf9\x82\x1a\x8d\
\xc3\xad\x9d\xcd\x32\x26\x66\xde\x18\x94\xcd\x91\x66\x09\x4e\x56\
\x43\x41\x0d\xd6\xa2\x6a\x2d\x1b\xec\x5d\x25\x0f\x09\x01\xf6\xc5\
\x63\xac\x99\x8e\xbd\xab\x21\xa4\x94\x6d\xf4\x72\x00\x38\x03\x40\
\x14\x6f\x55\x40\x17\x0f\x0d\x63\x3d\x6e\xae\x40\x46\x0d\x07\x5b\
\x8a\x1d\x7d\x75\xaa\x91\x1e\x6e\xa0\x94\xce\xca\x80\x86\x3a\x42\
\x12\x2e\xb6\x9e\x7c\x63\x95\x9b\xa3\xcd\x32\x66\x5e\xaa\xb2\x4e\
\x97\xbb\x5c\x6c\xec\x53\x52\xfa\xd5\xa5\x42\x95\x7b\xfa\x95\x1b\
\x87\xae\xbb\x63\xc4\x9f\x6f\xa3\x6f\x92\xbd\x9f\x1d\xe1\x26\x49\
\xf1\x5c\xa9\xf1\x05\x79\x04\x98\x9f\x4f\x63\x95\xcc\xe0\x33\xf0\
\xa9\xaa\x5a\xfd\x92\x71\x71\x9d\x5b\x68\xf1\x05\xf5\x9e\xfe\xbf\
\xf0\xe9\x5c\x87\xc5\xe3\x51\xb0\xa5\xca\xc1\xb1\xf8\xfd\xe9\x8c\
\x55\x5f\x3f\x35\xd3\xd9\x58\x01\x84\x37\xc8\x71\x60\x24\x5a\x83\
\xe0\x36\x86\x2b\x06\x2e\x54\x80\x75\x89\xf9\xad\x14\x7f\x38\x58\
\x10\xc6\x89\x2d\xf1\xd8\x56\x39\x80\xe1\xe3\x2e\xe1\x7b\x29\xe6\
\x26\x4e\xd8\x49\xf7\x8e\x56\xd9\xa9\xf0\xa0\x47\xf8\xaf\x8c\x0b\
\x09\xb1\xae\x90\x1c\x1b\xfa\x1b\x18\x46\xe7\x7b\x1b\x8b\xd8\x98\
\xe5\x46\x31\xdc\x3c\x15\x0b\x5b\xd1\x36\x39\x14\x0e\xc9\x6d\x7d\
\xca\x72\x11\x2e\x2a\x59\xc8\x9d\x85\x6c\xaf\x20\x06\xcb\xef\xd7\
\xf2\x33\x2b\x78\x40\x06\xf6\x19\x26\xf3\x3b\x35\xbc\x73\xc6\xf8\
\x29\x29\x3f\xd5\xd1\x43\x9b\x75\xa0\x52\x59\xc7\x62\x91\x91\x13\
\x85\x54\xd9\x7f\xe0\x50\xca\xed\x49\x27\x3c\xe1\x4d\xf2\x90\x2a\
\x3b\x4b\x0c\xff\x99\x94\x14\x2f\x8a\xa0\x29\xa2\xff\x38\x9f\x13\
\x58\xe5\x0f\x6a\xf2\xe6\xff\xa2\xb2\xb3\x45\x9e\x42\xf9\x8e\xc7\
\xcd\xff\x4d\x55\x9b\x5d\x21\xf9\x93\x85\x00\x70\xe9\xe0\x71\x7b\
\x05\x21\xef\x06\xcb\x1b\xd0\x59\x3a\x97\x07\x24\x61\x23\xf0\x10\
\xaa\x3c\xa7\x7d\xd4\x44\x36\xa6\x0f\xc3\x31\xdd\x08\x1f\x66\x07\
\xc2\x1f\x87\xae\x15\xde\xea\x0d\xb0\x44\x0c\x97\x31\xba\xef\xf1\
\x60\xee\x2b\x4e\x33\x17\xb7\x72\x13\x4a\xd0\x17\xd0\xea\xa1\x7b\
\x72\x82\x1b\xac\xae\xed\xd2\x6d\x2d\x9f\x56\xe1\x1b\x8b\xaa\x35\
\x97\x90\x41\x79\xe7\x58\x2f\x5f\x01\x16\xfb\x1b\x74\xdd\x84\x99\
\xf3\xc4\x60\x64\x96\x0d\x86\x94\xa3\xaa\xbc\x90\x57\x83\xe5\x0d\
\xe8\xac\x12\xc3\x03\x22\xa3\x83\x8b\x01\x28\x3c\xdb\xaf\xd4\x8c\
\x17\x33\x68\x5a\x32\x10\x1e\x66\xd8\xaf\x4a\xc0\x78\x84\xcb\xcc\
\xc0\x8a\x21\x30\x78\xa8\xeb\x89\x3a\x1c\xcc\x90\x47\x5b\xe4\xa0\
\x55\xae\xc7\xf0\x9d\xa5\xd5\x3a\xe0\x0a\x72\x82\x1b\x2c\x80\x68\
\x0b\xf7\x0b\xbc\xb8\x60\x0e\x37\x15\x5b\x4b\x22\x4f\x6e\x95\xa3\
\xc0\xc7\x14\xbe\x7e\x41\xf5\x14\x9e\xb7\xa0\x3c\x0f\x85\x3b\x10\
\x26\x6f\x06\xeb\x2d\xab\xf5\x24\x8f\x61\x13\xb0\x36\x45\xf2\x33\
\x62\x79\xd7\x74\xd9\xcc\x9b\x2d\xd6\x26\x0d\x0b\x85\x0f\xaa\xe0\
\x1d\xb9\xe4\x0f\x91\x26\x4e\xb8\x09\xe7\x21\x92\x1d\x47\xd3\x11\
\x0d\xc9\x56\x94\xed\x73\x2a\x06\x87\x86\x7a\xe2\x1b\x2c\x10\x1b\
\x8f\xf1\x31\x85\x4f\x4c\x37\x67\xe8\x48\xb3\xb4\xa1\x3c\x56\x3e\
\x87\x5b\xa7\xaa\x4d\x15\x4a\x94\xc2\x9d\x55\x9a\x17\x83\xb5\xb4\
\x5a\x67\x9f\x54\xca\xa6\x14\x91\x1a\x00\x9e\xc6\x32\x23\x22\x35\
\xa4\xa3\x23\x24\x4f\x08\x3c\x3d\x74\x2d\x70\xb6\x24\x7c\x77\x0a\
\xeb\xa7\xcb\x6e\xf6\x62\x73\xf0\x6f\xdc\x82\xb0\xd2\x17\xd0\xea\
\x13\x7d\x48\x38\x44\x47\xab\x3c\x8b\xe5\x4e\x8f\x9b\x6f\x4c\x26\
\x84\x4b\x21\x11\xe5\x66\xe0\x4a\x5f\xa3\x56\x4d\x51\x7b\x7e\x94\
\x4c\xdd\x3c\xb2\x26\x67\x83\xb5\xa4\x41\x2b\x4e\x9e\xc3\x26\x24\
\x85\xf3\xa1\xf2\x54\x5f\x2f\xab\x66\xb2\xb1\x1a\x26\xcd\x90\x4f\
\x15\x8d\xf3\xda\x1e\x0e\x26\xb2\x77\x87\x1c\x8e\xc3\xf5\x18\xee\
\x50\xa1\xa4\xd8\x7a\xa6\x8a\xfd\x47\xf8\xaa\x08\xe7\xf8\x02\x23\
\x7e\x7b\xd3\x81\xc1\x67\xef\x56\x51\xee\x2c\xf4\xb6\x9a\xaa\xa0\
\xfa\x10\x2e\xb4\xc7\x0b\x17\x3b\x2b\x27\x83\x55\x55\xab\x73\x4e\
\x1b\xd8\x2f\xf8\xae\xe4\x34\x55\x9e\xb0\x42\x4d\xd7\x16\x79\x29\
\x97\x36\xa6\x0b\xfd\xf1\xd1\x81\xfd\x86\x10\xe1\x37\x9d\xcd\xec\
\x29\x82\xa4\x69\x4b\x67\x93\xfc\x4a\x61\x8b\x30\xe2\x73\x74\xa2\
\xb3\xaf\x5d\x7a\x2c\x7c\x04\xc3\x6d\xc5\x38\xee\x6e\x3c\x4a\x0f\
\xf1\x6d\x85\x58\xc9\x59\x19\x87\x05\x02\xc0\xeb\x55\x8f\x2f\xa0\
\x5f\xc8\x24\x32\x87\x3f\xa0\x2b\x8d\xf0\x80\xb5\xdc\xd2\xf1\xb0\
\xfc\x75\xf2\x6a\xc7\x67\xd2\x8e\xa3\x55\xb5\x3a\xc7\xa4\x38\x84\
\x62\x90\x5d\xbd\x31\x66\xde\x21\x14\xe3\xb0\xb3\x45\x7e\xef\x0b\
\x6a\x64\xcc\x49\x41\x96\x9f\x15\x32\x24\xec\x64\x50\xe1\x7d\xbe\
\xa0\xbe\x2d\xe3\x02\x82\xed\x7f\x91\x4f\xe4\xd3\xe1\xcf\xbc\xc2\
\x17\x74\x1e\x6b\xa7\xda\x13\x4b\xe0\x3d\xbe\xe0\xc8\x96\x95\x0c\
\x0a\xd8\xfd\x87\xf8\x64\x3e\x56\x78\xa3\x4d\xb2\xcd\x1f\xd4\x47\
\x67\xbb\xb8\x95\x69\x64\xac\xdb\xdb\x25\x56\x55\xaf\x37\xe2\x62\
\xcb\x8a\x46\x7d\xa0\x73\x53\xea\x20\x92\xc9\xb8\x17\x72\x9a\x28\
\x17\x7b\xe0\xb3\xfe\x06\x7d\x02\xd8\x66\x2d\x8f\x1b\xf8\x53\xcc\
\xd2\x27\x2e\x4e\x13\xe1\x7c\x81\x46\x85\xf3\xc5\x72\x73\x34\x24\
\x3f\x19\xb7\x52\xe1\xcd\xbe\xa0\x66\x15\xab\x5f\x84\xef\x0f\xed\
\x26\x98\x94\xc1\xf2\xd6\xe8\xc9\xe2\x26\x04\x49\x0f\x2f\xa0\x4a\
\x57\x6f\x8c\x19\x79\xcc\xd7\x44\xc4\xe3\x7c\xc4\x6d\x58\x9a\x78\
\xaf\x8f\xfc\xec\xce\x17\xe5\x6e\xcb\xc8\xc4\xbd\xab\x8f\x7d\x93\
\xa9\xc7\x2a\xf7\xa0\xd9\x1d\xf3\x25\x16\xdb\x75\x0e\x96\x54\x07\
\x7d\x59\xbe\x84\x1d\x7b\x02\xca\x44\x84\xc3\x72\xdc\xdf\xa0\x97\
\x61\xa8\xc8\xb6\x6c\x3c\xce\x7d\x1e\x9b\x7d\x5c\x29\x2b\xfc\x28\
\xdb\x63\xbe\xc4\x62\x67\x1d\x48\x1d\xb9\x4b\xe1\x6b\xb6\x2f\xf3\
\xd3\x87\x06\x35\xdc\x84\xd2\xe8\xf5\xaa\x27\xf1\x05\x60\x0d\x8f\
\x4a\x9c\xbc\x38\x4a\x2b\x3c\x29\xca\xb7\xb3\x29\x13\x6d\x91\xae\
\xaa\xa0\xfe\xab\x0c\x1c\x5b\x36\x62\xb0\xda\x89\x4b\x23\x1f\x3e\
\x9c\x22\xee\xd6\xa0\x9b\xc2\x4a\x5f\xbd\xbe\x49\x84\x00\xc2\x85\
\x62\x68\x04\x4e\x75\x1b\x5c\xaa\x74\x8b\xb2\x57\x95\x8d\xd6\xc3\
\xba\x8e\xa6\xf1\x7b\x56\x1a\xa3\xc3\xba\xb8\x2d\x1b\xdd\x00\xfd\
\xfd\x23\xda\xb2\x9e\x20\xbc\xe0\x52\x3d\xa5\xdc\xd2\x02\x23\xe7\
\xf7\x8d\x28\xa2\x33\xee\xa6\xb6\xe3\xa1\xc2\x75\x09\x93\xa9\xac\
\xd3\xe5\x2e\x37\xa3\xf6\x30\xa9\xd2\x16\x69\x96\x31\x7e\x60\x0e\
\x0e\x0e\xf9\x40\x5d\xac\x03\x36\x60\xa7\x7a\x74\x91\x55\x0f\xeb\
\x1d\xf5\x7a\x6a\xcc\xd2\x0a\x63\xc3\x5e\x28\x44\x8e\x5a\xea\x76\
\x3f\x24\x07\x53\x14\x9d\x4a\x8e\xa3\xe4\x6f\xf3\xab\x83\x83\x43\
\x12\x12\x67\x43\x71\x5a\xce\xd8\x60\x9d\x5f\xaf\xa7\xc6\x0c\x9b\
\x85\x81\xc3\x19\x46\xa1\x84\x8f\x5a\xea\x77\xb7\x14\xd9\x58\x29\
\xbd\x16\xae\x8e\x86\xa4\xf8\x11\x13\x1c\x1c\x1c\xf2\x4e\x46\x06\
\xab\x72\xa5\x9e\xee\x32\xb4\x21\x23\xa1\x54\x12\xf8\x75\xdf\x71\
\x82\xbb\x87\x8f\x63\x2f\x12\x4a\x2f\xca\x7b\xa2\x21\xd9\x54\x54\
\x1d\x0e\x0e\x0e\x05\x63\x42\x83\xe5\x0d\xe8\x5c\x23\x6c\x26\xc1\
\xb3\x7b\x08\x55\x7e\xa9\x31\x1a\xba\xb6\x17\x39\xa4\x8a\xd2\x83\
\xf2\xde\x70\x48\x36\x4e\x9c\xd9\xc1\xc1\x61\xa6\x32\xee\xa4\xfb\
\xb2\x5a\x9d\x57\xea\x66\xb3\x08\x63\x96\xc8\x55\xe9\xa2\x9f\x2b\
\x6c\x7f\xe1\xdc\xf0\x33\x41\xca\x79\x93\x18\xce\x88\x34\xc9\x83\
\x13\xe7\x76\x70\x70\x98\xc9\xa4\x35\x58\xcb\x6a\x75\x5e\x99\x87\
\xad\x30\x12\x42\x25\x89\x98\x16\xfb\x20\x27\xa5\x1f\xcb\x7b\x23\
\x2d\xd2\x54\x54\x1d\x0e\x0e\x0e\x53\x42\x4a\x83\x55\xd5\xa0\xf3\
\x45\xd9\x22\xc2\x5b\xa7\x5a\x50\xc6\x0c\x4c\xb0\xbf\x2f\xda\x2c\
\x0f\x14\x5b\x8a\x83\x83\xc3\xd4\x30\x66\x6b\xce\x8a\x3a\x3d\x53\
\x60\xeb\x74\x37\x56\x08\xd7\x38\xc6\xca\xc1\xe1\xb5\xc5\xa8\x49\
\x77\xef\x1a\x5d\xe0\x76\xb3\x0d\x38\xaf\x48\x7a\x26\x44\xa1\x4f\
\x84\x6b\xc2\x4d\x52\x24\x4f\x10\x07\x07\x87\x62\x31\x3c\x24\xf4\
\xaf\xd3\x72\xed\xe1\x47\x90\xdd\xb6\x8e\x29\x46\x45\xf8\xb2\x63\
\xac\x1c\x1c\x1c\x1c\x1c\x1c\x1c\x1c\x1c\x1c\x1c\x1c\xf2\xc1\xff\
\x03\x12\x5c\xec\xfd\x42\xa6\x16\x08\x00\x00\x00\x00\x49\x45\x4e\
\x44\xae\x42\x60\x82\
\x00\x00\x02\x6c\
\x00\
\x00\x10\xbe\x78\x9c\xc5\x98\xcf\x6b\x13\x41\x14\xc7\x5f\x6a\x4a\
\x4b\x2f\x06\x6a\x6a\xfd\x01\xc9\x45\x2a\x22\x08\x82\x60\x9b\x62\
\xf7\x20\x78\x50\xb1\x1e\xb4\xe2\x25\x6d\x3c\x79\x16\xbc\x56\xff\
\x0a\xf1\xa0\xe4\x18\xe8\xa1\x7f\x82\x77\x41\x1d\x35\x56\x8f\x16\
\x11\x31\x22\x5e\xfc\x81\x89\xe9\xf3\x3b\xdd\xb7\xed\x23\xdd\x9d\
\x6c\xb2\x9b\x66\xcb\x27\xb3\xd9\x37\x9d\xef\x77\x67\x66\x67\xdf\
\x84\x28\x83\xbf\x62\x91\xec\x27\xd5\x72\x44\x53\x44\x74\x12\xe0\
\x12\x79\xe4\x5f\xdf\x3e\x10\x3b\x7e\xdf\x27\x38\x78\xb9\xb4\xc3\
\x30\x0e\x5e\x29\xdd\x03\x6b\xe0\x31\x78\x0a\x9e\xec\x33\x05\x50\
\x07\x0c\xda\x52\xee\x27\xb6\x0f\x72\xe0\xb9\x7c\xff\x03\xfe\x81\
\xe6\x00\xfd\xd8\xf6\x5b\xc2\x01\xf1\x30\x09\x5e\x74\xc4\xef\x02\
\x0f\x5c\x07\x57\xc0\xd5\x84\x5c\x96\xb6\x1a\xa2\xb3\x25\xda\x81\
\x87\x43\xe0\xa5\xf2\x69\xc0\x61\x89\x65\x12\xce\xb3\x0c\x57\xd0\
\x4e\xa5\x34\x86\xf3\x4d\x35\xde\x14\xe2\xe1\x95\xf2\xf0\x11\x9c\
\x90\xd8\x98\xd4\xeb\x87\x2c\x9e\x31\xeb\x61\x22\x42\x5f\x7b\xc8\
\x83\xd7\xca\xc3\xa6\xf2\x90\xd5\xff\x93\xf4\xfe\x25\x16\xe6\x61\
\x0a\xbc\x51\x1e\x3e\x81\x99\x7e\x3d\xb8\xf4\x1d\x1e\xec\xd8\xbf\
\x55\x1e\x3e\x83\x53\xfd\x78\xe8\xa6\xef\xf0\x30\xcd\xbb\xeb\x83\
\xe5\x0b\x38\xdd\xab\x87\x38\xfa\x0e\x0f\x47\xc0\x3b\xde\x7d\x66\
\xbe\x82\x33\xbd\x78\x88\xab\xdf\xc5\xc3\x86\xf2\xf0\x0d\x9c\x8d\
\xeb\xa1\x17\x7d\x87\x87\xa3\xe0\xbd\xf2\xf0\x1d\x9c\x8b\xe3\xa1\
\x57\x7d\x87\x87\x63\xe0\x83\x9a\x0f\x3f\xc0\x79\x89\x8d\xa4\x75\
\xff\x5d\x3c\xe4\xd5\x7c\xb0\x6b\xc8\x4f\x70\xd1\xe5\x61\x80\xfa\
\x4d\x29\x6f\xeb\x3a\x69\xe8\xc7\xe8\xff\x96\xcc\x83\xa5\xb4\xfb\
\x3f\xc6\xfc\x6b\x76\x68\xa7\x36\xff\x1c\xda\x1b\x1d\xda\x37\x25\
\x36\xca\xcb\xf3\xc4\x77\x66\x13\x3f\x7f\x11\xda\x7a\xfd\xd9\xab\
\xbd\x32\x87\x76\xe7\x13\xaf\x3f\x11\xda\x7a\xfd\x0d\xd1\x4e\x67\
\xfd\x8d\xd0\xd6\xef\x9f\xf0\x3e\xaf\x24\x7f\xff\x44\x68\xeb\xf7\
\x6f\xa0\x7d\x63\x47\xbb\xbc\x80\xb6\xa2\xc7\x3b\xae\x7e\x84\xb6\
\xce\x3f\xf6\x6a\x57\x2e\xd0\xf6\x98\xa7\x90\x7f\x84\x68\xeb\xfc\
\xab\x25\xe5\x2d\x89\x8d\xf3\x60\xf2\xaf\xb0\xfc\xd3\xc6\x7e\x83\
\x4b\xdc\x65\x6d\x49\x78\xff\x61\xf9\xf7\x5f\x29\xed\xfe\x64\x96\
\xfd\x9c\x39\x69\xee\xed\xca\xbf\xc3\xf6\x1f\xc1\xfe\x60\xd0\xfb\
\x8f\x02\x0f\x77\xff\x35\xdc\xfd\xe7\x90\xf7\xdf\x65\x66\x0e\xce\
\x1f\x90\xff\x7b\x41\xb5\xcc\x66\xb1\x48\x54\xcb\xb3\xa9\x4f\xb0\
\x69\x8c\xae\x9a\x5f\x23\xcf\x4c\xf3\x61\xd9\xb4\xbd\x75\xb3\x55\
\x5d\x32\xbc\x38\x63\xb8\x36\xe9\x83\x73\x7b\xad\xed\x3d\x42\x9d\
\x83\xa8\x5b\x30\x8d\xec\x82\xa9\x8f\xaf\x9a\x5a\xce\x6f\xab\x7a\
\x8d\x8d\x27\x1a\x5a\xfb\x3f\x1f\x6b\x46\xa0\
"
qt_resource_name = b"\
\x00\x05\
\x00\x6f\xa6\x4f\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x6f\
\x00\x04\
\x00\x07\x35\xdf\
\x00\x6c\
\x00\x6f\x00\x67\x00\x6f\
\x00\x17\
\x0c\x11\x24\xe7\
\x00\x76\
\x00\x74\x00\x2d\x00\x65\x00\x6e\x00\x74\x00\x65\x00\x72\x00\x70\x00\x72\x00\x69\x00\x73\x00\x65\x00\x5f\x00\x63\x00\x68\x00\x69\
\x00\x63\x00\x6f\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x09\
\x06\x52\x90\xdf\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x6f\x00\x2e\x00\x69\x00\x63\x00\x6f\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x01\
\x00\x00\x00\x10\x00\x02\x00\x00\x00\x01\x00\x00\x00\x04\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x52\x00\x01\x00\x00\x00\x01\x00\x00\x12\xca\
\x00\x00\x00\x1e\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x10\x00\x02\x00\x00\x00\x01\x00\x00\x00\x04\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x52\x00\x01\x00\x00\x00\x01\x00\x00\x12\xca\
\x00\x00\x01\x79\x23\xe8\x13\x6a\
\x00\x00\x00\x1e\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x79\x23\xe8\x13\x6a\
"
qt_version = [int(v) for v in QtCore.qVersion().split('.')]
if qt_version < [5, 8, 0]:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
def qInitResources():
QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| [
"j.f.maurino@gmail.com"
] | j.f.maurino@gmail.com |
95869793a95931568444941801533d4d5e6cb5eb | d6be2453d1c4428a4b9d9f78ea80e7e1a39f0f5b | /src/utils.py | 20225ec0e46d35e08388cbfdfc634ce8c9a2e343 | [] | no_license | bcrestel/sls | 8f6a6356264747285fb193b2ebfa1c2914aa0fe3 | f0392135e5c4072e3341998651091c8455a882fb | refs/heads/master | 2020-12-15T16:51:03.663284 | 2020-10-06T14:22:58 | 2020-10-06T14:22:58 | 235,185,248 | 0 | 0 | null | 2020-01-20T19:47:07 | 2020-01-20T19:47:06 | null | UTF-8 | Python | false | false | 1,478 | py | import hashlib
import pickle
import json
import os
import itertools
import torch
import numpy as np
def hash_dict(dictionary):
"""Create a hash for a dictionary."""
dict2hash = ""
for k in sorted(dictionary.keys()):
if isinstance(dictionary[k], dict):
v = hash_dict(dictionary[k])
else:
v = dictionary[k]
dict2hash += "%s_%s_" % (str(k), str(v))
return hashlib.md5(dict2hash.encode()).hexdigest()
def save_pkl(fname, data):
"""Save data in pkl format."""
# Save file
fname_tmp = fname + "_tmp.pkl"
with open(fname_tmp, "wb") as f:
pickle.dump(data, f)
os.rename(fname_tmp, fname)
def load_pkl(fname):
"""Load the content of a pkl file."""
with open(fname, "rb") as f:
return pickle.load(f)
def load_json(fname, decode=None):
with open(fname, "r") as json_file:
d = json.load(json_file)
return d
def save_json(fname, data):
with open(fname, "w") as json_file:
json.dump(data, json_file, indent=4, sort_keys=True)
def torch_save(fname, obj):
""""Save data in torch format."""
# Define names of temporal files
fname_tmp = fname + ".tmp"
torch.save(obj, fname_tmp)
os.rename(fname_tmp, fname)
def read_text(fname):
# READS LINES
with open(fname, "r", encoding="utf-8") as f:
lines = f.readlines()
# lines = [line.decode('utf-8').strip() for line in f.readlines()]
return lines
| [
"issam.laradji@gmail.com"
] | issam.laradji@gmail.com |
12dc8e0255927fe426d8d84f615a6566ebf6cdb7 | ef2b79578ebb2fd8cbccbd2af53fc60a0a188f5f | /poc/polls/admin.py | 2881a09dbab98ca870e3c66313c6043cfdb728e2 | [] | no_license | oscarmyepes/django-polls | 15e999fdbd49b60747118bba3db3a44d3372c3f7 | c503349243b20d55285086faba106bbff535c095 | refs/heads/master | 2023-04-27T01:54:38.938285 | 2020-01-04T03:40:32 | 2020-01-04T03:40:32 | 231,703,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | from django.contrib import admin
# Register your models here.
from .models import Question, Choice
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 3
class QuestionAdmin(admin.ModelAdmin):
fieldsets = [(None, {'fields': ['question_text']}), ('Date information', {
'fields': ['pub_date'], 'classes': ['collapse']}), ]
inlines = [ChoiceInline]
list_display = ('question_text', 'pub_date', 'was_published_recently')
list_filter = ['pub_date']
search_fields = ['question_text']
admin.site.register(Question, QuestionAdmin)
| [
"oscarmyepes@gmail.com"
] | oscarmyepes@gmail.com |
10912b08ab87df2a95943513ea9d012cdd01ef7b | b78ef5518bf4c735b86a8ecf50fba6cc95dd3fc8 | /django/09_DRF/drf/urls.py | bcbada83c30c75c633435b07318323fa2a5e78f5 | [] | no_license | AmberPark/TIL | 58ff9dcc4607ae199deb278d8f4971aafa6addac | b5a3455f15b3eeb35dc994116e06b74e67234669 | refs/heads/master | 2023-07-10T20:24:30.439653 | 2021-08-22T12:57:38 | 2021-08-22T12:57:38 | 332,991,061 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 793 | py | """drf URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include('api.urls')),
]
| [
"amberso1996@gmail.com"
] | amberso1996@gmail.com |
a31faa28ea7fa887dcbc8ad53795258aa189f931 | 498e792e16ab1a74ac034c53177c4cccbeef2749 | /classification/resnet/train.py | 662ceca52750777835c1b05e25f7eaacf8d247aa | [] | no_license | ydwisroad/imageprocessingpytorch | f97bec4469c087f6bbbca5d42da180c95be8b13f | bd8d1af228619c9c6c9c1a2b880422f7d5048dd5 | refs/heads/master | 2023-07-29T05:05:11.145832 | 2022-02-21T23:32:03 | 2022-02-21T23:32:03 | 284,976,501 | 7 | 3 | null | 2023-07-24T01:08:22 | 2020-08-04T12:43:24 | Jupyter Notebook | UTF-8 | Python | false | false | 4,052 | py | import torch
import torch.nn as nn
from torchvision import transforms, datasets
import json
import matplotlib.pyplot as plt
import os
import torch.optim as optim
from model import resnet34, resnet101
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
data_transform = {
"train": transforms.Compose([transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]),
"val": transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])}
data_root = os.path.abspath(os.path.join(os.getcwd(), "../../data")) # get data root path
image_path = data_root + "/flower_photos_simple/" # flower data set path
train_dataset = datasets.ImageFolder(root=image_path+"train",
transform=data_transform["train"])
train_num = len(train_dataset)
# {'daisy':0, 'dandelion':1, 'roses':2, 'sunflower':3, 'tulips':4}
flower_list = train_dataset.class_to_idx
cla_dict = dict((val, key) for key, val in flower_list.items())
# write dict into json file
json_str = json.dumps(cla_dict, indent=4)
with open('class_indices.json', 'w') as json_file:
json_file.write(json_str)
batch_size = 4
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=batch_size, shuffle=True,
num_workers=0)
validate_dataset = datasets.ImageFolder(root=image_path + "val",
transform=data_transform["val"])
val_num = len(validate_dataset)
validate_loader = torch.utils.data.DataLoader(validate_dataset,
batch_size=batch_size, shuffle=False,
num_workers=0)
net = resnet34()
# load pretrain weights
#model_weight_path = "./resnet34-pre.pth"
#missing_keys, unexpected_keys = net.load_state_dict(torch.load(model_weight_path), strict=False)
# for param in net.parameters():
# param.requires_grad = False
# change fc layer structure
inchannel = net.fc.in_features
net.fc = nn.Linear(inchannel, 5)
net.to(device)
loss_function = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=0.0001)
best_acc = 0.0
save_path = './resNet34.pth'
for epoch in range(10):
# train
net.train()
running_loss = 0.0
for step, data in enumerate(train_loader, start=0):
images, labels = data
optimizer.zero_grad()
logits = net(images.to(device))
loss = loss_function(logits, labels.to(device))
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
# print train process
rate = (step+1)/len(train_loader)
a = "*" * int(rate * 50)
b = "." * int((1 - rate) * 50)
print("\rtrain loss: {:^3.0f}%[{}->{}]{:.4f}".format(int(rate*100), a, b, loss), end="")
print()
# validate
net.eval()
acc = 0.0 # accumulate accurate number / epoch
with torch.no_grad():
for val_data in validate_loader:
val_images, val_labels = val_data
outputs = net(val_images.to(device)) # eval model only have last output layer
# loss = loss_function(outputs, test_labels)
predict_y = torch.max(outputs, dim=1)[1]
acc += (predict_y == val_labels.to(device)).sum().item()
val_accurate = acc / val_num
if val_accurate > best_acc:
best_acc = val_accurate
torch.save(net.state_dict(), save_path)
print('[epoch %d] train_loss: %.3f test_accuracy: %.3f' %
(epoch + 1, running_loss / step, val_accurate))
print('Finished Training')
| [
"wandf12345@163.com"
] | wandf12345@163.com |
c7d6ae9174a1d5de81776048a2bf38d10148c42d | 38c7e9a2752c03498d4807f263b60f7021f6667d | /src/doublebook/ebook.py | c08a9de4c04edcdb0884c40381c3e22df2176555 | [
"MIT"
] | permissive | plysytsya/doublebook | 2915ebbc8ae67cd77796776c990ffb3241af21f8 | 09dcd5399288c9544df928136a9e2f2e54639cbd | refs/heads/master | 2020-09-21T19:08:21.663868 | 2019-11-29T20:25:41 | 2019-11-29T20:25:41 | 224,893,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | from .text_tokenizer import TextTokenizer
class Ebook:
def __init__(self, path):
self.path = path
self.read()
self.tokenize()
def read(self):
print("Reading text into memory.")
with open(self.path) as file:
self.content = file.read()
def tokenize(self):
print("Tokenizing text into sentences.")
self.sentences = TextTokenizer(self.content).tokenize()
| [
"pavlo.lysytsya@outfittery.de"
] | pavlo.lysytsya@outfittery.de |
977cd1f34ed3ff2b174cb7a5bb2ad1829606c277 | fbff973537eae45b724b23e9b6fc8692da959b21 | /app/core/config.py | 979658548ef83b1914a5730ab318dedd6ab5b824 | [
"MIT"
] | permissive | lsetiawan/cava-metadata | d4a8878480cd9da4bfa163b9d9c42d705a0fb263 | e45c469a4b5cbdebfba74ab0031fb94eb59fd724 | refs/heads/main | 2023-04-08T02:28:24.402853 | 2021-01-27T20:02:23 | 2021-01-27T20:02:23 | 358,033,596 | 0 | 0 | MIT | 2021-04-14T20:26:35 | 2021-04-14T20:26:35 | null | UTF-8 | Python | false | false | 1,418 | py | import os
import fsspec
# API SETTINGS
SERVICE_NAME = "Metadata Service"
SERVICE_ID = "metadata"
OPENAPI_URL = f"/{SERVICE_ID}/openapi.json"
DOCS_URL = f"/{SERVICE_ID}/"
SERVICE_DESCRIPTION = """Metadata service for Interactive Oceans."""
CORS_ORIGINS = [
"http://localhost",
"http://localhost:8000",
"http://localhost:5000",
"http://localhost:4000",
"https://appdev.ooica.net",
"https://app-dev.ooica.net",
"https://app.interactiveoceans.washington.edu",
"https://api-dev.ooica.net",
"https://api.interactiveoceans.washington.edu",
]
BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# API VERSION
CURRENT_API_VERSION = 2.0
# Redis configurations
REDIS_HOST = os.environ.get("REDIS_HOST", "localhost")
REDIS_PORT = os.environ.get("REDIS_PORT", 6379)
# OOI Configurations
BASE_URL = "https://ooinet.oceanobservatories.org"
M2M_URL = "api/m2m"
USERNAME = os.environ.get("OOI_USERNAME", "")
TOKEN = os.environ.get("OOI_TOKEN", "")
# File Systems Configurations
FILE_SYSTEMS = {
"minio_s3": fsspec.filesystem(
"s3", client_kwargs={"endpoint_url": "http://minio:9000"}
),
"aws_s3": fsspec.filesystem(
"s3",
skip_instance_cache=True,
use_listings_cache=False,
config_kwargs={"max_pool_connections": 1000},
),
}
GOOGLE_SERVICE_JSON = os.environ.get("GOOGLE_SERVICE_JSON", "",)
DATA_BUCKET = 'ooi-data'
| [
"landungs@uw.edu"
] | landungs@uw.edu |
5bf7470e827eea42e7c8955e6c2fb564dbc45de9 | f453f183834e3bf587a120023615ed2ddd38c157 | /tsa/lib/encoders.py | 969cdf1f6c1712d900097659bf0862df709f2d35 | [
"MIT"
] | permissive | chbrown/topic-sentiment-authorship | 72c21638eb72888c370cd3b1b5f06504df09ce2e | e8cacf11b06583d9ed85ff790e1d5322e59f2fd6 | refs/heads/master | 2022-07-05T22:58:24.456139 | 2020-03-29T16:12:21 | 2020-03-29T16:12:21 | 13,025,589 | 0 | 0 | MIT | 2020-03-29T16:13:35 | 2013-09-23T02:53:40 | Jupyter Notebook | UTF-8 | Python | false | false | 492 | py | import json
from datetime import datetime
class JSONEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, '__json__'):
return obj.__json__()
if isinstance(obj, datetime):
return obj.strftime('%Y-%m-%dT%H:%M:%S')
# return super(JSONEncoder, self).default(obj)
return obj
# encoder = JSONEncoder()
# def json(obj):
# return encoder.encode(obj)
# c'mon, just DIY
def csv(obj):
return ','.join(map(str, obj))
| [
"io@henrian.com"
] | io@henrian.com |
f18101feaea2825e198453f972be02107ee83e77 | ed0a3ebb8d26ea8451e5fab3af65aa37fe343c13 | /joins/forms.py | 0cb18bfebd20b82af03de008bb8afb8ba08e7236 | [] | no_license | alisaleh65/first_app | 7fbf516bae300a11ab31f36c14d5002750c3d3fb | 90e2c8a4a44e1f8e62a5f49215bfc4d4c49d8c4b | refs/heads/master | 2021-01-20T22:19:58.806751 | 2016-06-26T08:54:29 | 2016-06-26T08:54:29 | 61,530,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | from django import forms
from .models import Join
class EmailForm(forms.Form):
name = forms.CharField(required=False)
email = forms.EmailField()
class JoinForm(forms.ModelForm):
class Meta:
model = Join
fields = ["email"]
| [
"alisaleh65@yahoo.com"
] | alisaleh65@yahoo.com |
f9b589aa7e5cb26eda1a3b56bc67249768ee6093 | 4b819b9c7aee9d60689f487557e437445101188d | /lanuch/accounts/views.py | e04d7ebbd2e15bedabf699d153c0170baa54e03b | [] | no_license | Damidara16/dev | c2fe90fb70d4644bdee964ce9b7b85bf9f71c99a | f3c8666bc32b19ffb623b83019fdbf404433ece8 | refs/heads/master | 2020-03-10T20:14:11.173397 | 2018-04-15T00:56:56 | 2018-04-15T00:56:56 | 129,565,993 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,485 | py | from django.shortcuts import render, redirect
from .forms import RegistrationForm, EditProfileForm, AddInfo
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth import update_session_auth_hash
from .models import Author
from blog.models import waste
from django.contrib.auth.models import User
def ViewProfile(request, author_pk):
if author_pk == request.user.id:
if request.user.is_authenicated():
user = User.objects.get(user=request.user)
#print('suceess')
return render(request, 'accounts/profile.html', {'user':user})
else:
user = User.objects.get(pk=author_pk)
#user.author_set.views += 1
#user.views += 1
#user.save()
return render(request, 'accounts/profile.html', {'user':user})
def register(request):
if request.method == 'POST':
form = RegistrationForm(request.POST, request.FILES)
if form.is_valid():
form.save()
return redirect('/home')
#return render(request, 'blog/home.html', context)
else: return redirect('/accounts/register')
else:
form = RegistrationForm()
title = 'Change Your Password'
btnName = 'Register'
context = {'form':form, 'title':title, 'btnName':btnName}
return render(request, 'accounts/edit.html', context)
'''
def jregister(request):
if request.method =='POST':
form = RegistrationForm(request.POST)
if form.is_valid():
form.save()
return redirect(reverse('accounts:home'))
else:
form = RegistrationForm()
args = {'form': form}
return render(request, 'accounts/reg_form.html', args)
'''
def EditProfile(request):
if request.Method == 'POST':
form = EditProfileForm(request.Post, instance=request.User)
if form.is_valid():
form.save()
return re
else:
form = EditProfileForm(instance=request.user)
title = 'Edit Your Profile'
btnName = 'Done editing'
context = {'form':form, 'title':title, 'btnName':btnName}
return render(request, 'accounts/edit.html', context)
def AddInfo(request):
if request.Method == 'POST' and request.user.is_authenicated():
form = AddInfo(request.POST)
if form.is_valid():
instance = form.save(commit=False)
instance = form.cleaned_data['description']
instance = form.cleaned_data['link']
form.save()
return redirect('/home/')
else:
return redirect('/accounts/add')
else:
form = RegistrationForm
title = 'Tell Us More'
btnName = 'Finish'
context = {'form':form, 'title':title, 'btnName':btnName}
return render(request, 'accounts/edit.html', context)
def Changepassword(request):
if request.Method == 'POST':
form = PasswordChangeForm(data=request.Post, user=request.User)
if form.is_valid():
form.save()
update_session_auth_hash(request, form.user)
return redirect('/accounts/profile')
else:
return redirect('/accounts/Changepassword')
else:
form = PasswordChangeForm(instance=request.user)
title = 'Change Your Password'
btnName = 'Change Password'
context = {'form':form, 'title':title, 'btnName':btnName}
return render(request, 'accounts/edit.html', context)
| [
"sajala8624@gmail.com"
] | sajala8624@gmail.com |
68f576c1e6ca803266988f8b0b0c5c830237888e | 6a2c43da788a27910bb11881e0e32b734b700e8e | /src/components/elements/groups_element.py | c1a3fbb027a3e7e22ddbb5efd5cf7daf6d68fa00 | [] | no_license | pitikdmitry/homework-4 | fa5ae5cd1e153dd98ccbbf299ee9ce0463efa275 | f456669dca4987b384f09bf4f00a1dcbc8e16467 | refs/heads/master | 2020-03-17T21:04:53.197276 | 2018-05-25T11:58:30 | 2018-05-25T11:58:30 | 133,942,398 | 0 | 0 | null | 2018-05-25T11:58:31 | 2018-05-18T10:53:20 | Python | UTF-8 | Python | false | false | 479 | py | from src.components.base_element import BaseElement
class GroupsElement(BaseElement):
MARKED_ITEM_NAV_BAR = '//a[@hrefattrs="st.cmd=userAltGroup&st._aid=NavMenu_User_AltGroups"]' \
'[@class="mctc_navMenuSec mctc_navMenuActiveSec"]'
def is_marked(self):
"""
Check for the existence of marked friends item in nav bar
:return: Bool
"""
return self.existence_of_element_by_xpath(self.MARKED_ITEM_NAV_BAR)
| [
"ya.zubarevanton@yandex.ru"
] | ya.zubarevanton@yandex.ru |
dbb9dfef04bde38e63f84dfddf9bbc7d5b6ad1a2 | c404b7f9d30cd47550b621f8f243dc4b1c2bdf8a | /a_byte_of_python/chapter10_backup_ver3.py | ffed5450115aa74531d2ba191a0cde7ab2edb502 | [] | no_license | ilxsh/python_learning | 768d7857cece0a48f05524393eb12e985b174302 | efbaa88d0339c21eb78cf96a81dd163ae377759f | refs/heads/master | 2021-01-10T10:19:12.965879 | 2015-12-24T18:32:07 | 2015-12-24T18:32:07 | 48,186,905 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,386 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Filename: backup_ver2.py
import os
import time
# 1. The files and directories to be backed up are specified in a list.
source = ['/home/test/backup1', '/home/test/backup2']
# If you are using Windows, use source = [r'C:\Documents', r'D:\Work'] or something list that
# 2. The backup must be stored in a main backup directory
target_dir = '/home/bluewind/bak/' #Remeber to change and time
# 3. The files are backed up into a zip file.
# 4. The name of the ip archive is the current date and time
today = target_dir + time.strftime('%Y%m%d%H%M%S') # + '.zip'
# The current time is the anme of the zip archive
now = time.strftime('%H%M%S')
# Take a comment from the user to create the name of the zip file
comment = input('Enter a comment --> ')
if len(comment) == 0:
target = today + os.sep + now + '.zip'
else:
target = today + os.sep + now + '_' +
comment.replace(' ', '_') + '.zip'
# Create the subdirectory if it isn't already there
if not os.path.exists(today):
os.mkdir(today) # make directory
print('Successfully created direcotry', today)
# 5. We use the zip command (in Unix/Linux) to put the files in a zip archive
zip_command = "zip -qr '%s' %s" % (target, ' '.join(source))
# Run the backup
if os.system(zip_command) == 0:
print('Successful backup to', target)
else:
print('Backup FAILED')
| [
"David@jdsu.com"
] | David@jdsu.com |
dcd109650ffae5939c920e522bef889514d1f60b | 8e0dd6f0a1936a15a23831db614697d783031df5 | /10K/LDA_10K.py | 76ef8a9b080a529fe5d8b207b7818395629801f1 | [] | no_license | Russzheng/Financial-Reports-Analytics | e7a375917c255d382ba3e02c1865ed71f50f4811 | c741ec8d858f8e01b4c04a57df8052e1d7184767 | refs/heads/master | 2021-07-21T09:53:37.411259 | 2017-10-27T06:42:39 | 2017-10-27T06:42:39 | 103,638,473 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,731 | py | ####### GENSIM LDA for 10-K Filings #######
# GENSIM: wrapped python version of MALLET
# This gensim library is incredibly slow. sklearn
# library's perplexity calculation method is all wrong
# and the paper used MALLET. So we stick with Gensim.
# But it is really slow
# This file applies the LDA model using Gensim
import pandas as pd
import re
import nltk
from nltk.corpus import stopwords
import os, os.path
import numpy as np
from stemming.porter2 import stem
import matplotlib.pyplot as plt
from scipy.interpolate import spline
from gensim import corpora, models
from gensim.models import Phrases
import gensim
import random
import csv
import logging
import time
DATA_DIR = '/media/peng/New Volume/Back-up/Desktop/10K_processed'
common_words = set(['company', 'will', 'value', 'information', 'years', 'upon', 'company\'s',
'fiscal', 'rate', 'based', 'report', 'sales', 'management', 'services', 'form', 'costs', 'related',
'tax', 'ended', 'certain', 'market', 'credit', 'products', 'amount', 'period', 'net', 'including',
'opertions', 'securities', 'cash', 'time', 'statements', 'income', 'section', 'common', 'assets',
'shares', 'business', 'plan', 'year', 'date', 'interest', 'december', 'agreement', 'stock', 'may',
'financial', 'million', 'shall', 'style', 'block', 'display', 'color', 'border', 'bottom'])
def plot(topic_li, perplex_li, flag):
topic_li = np.array(topic_li)
perplex_li = np.array(perplex_li)
topic_smooth = np.linspace(topic_li.min(), topic_li.max(), 400)
per_smooth = spline(topic_li, perplex_li, topic_smooth)
plt.plot(topic_smooth, per_smooth)
plt.xlabel('number of topics', fontsize=16)
plt.ylabel('log(perplexity)', fontsize=16)
plt.show()
if flag:
plt.savefig('test_log_perplex_li.png')
else:
plt.savefig('train_log_perplex_li.png')
def train_test_split(x, test_pct):
results = [], []
for item in x:
results[0 if random.random() < test_pct else 1].append(item)
test, train = results
np.asarray(test)
np.asarray(train)
return test, train
def load_data(file_name):
# loading strings and manipulating strings are quite time consuming
# A LOT file I/O
with open(file_name, 'r') as f:
data = f.read().replace('\n', '')
# lower case
data = data.lower()
# remove punctuations and numbers
data = re.sub('[^a-zA-Z]', ' ', data)
# split to words
words = data.split()
# delete stop words like 'the' 'a' and etc.
# delete common words, 0.1% most frequent words
stops = set(stopwords.words('english')) | common_words # use set for speed
words = [w for w in words if w not in stops]
return words
def main():
start_time = time.time()
# how many data entries for our csv files, some double checking
# prepare bag of words
print('Dataloading Starts')
# 10-40s for every 1K files loaded
# when memoery is almost consumed, could take double the time
words_li = []
id_li = []
for file_name in os.listdir(DATA_DIR):
id_li.append(file_name)
words_li.append(load_data(DATA_DIR + '/' + file_name))
if len(id_li) >= 10000:
break
if len(id_li) % 1000 == 0:
print(len(id_li), 'files processed')
print('--- %s seconds ---' % (time.time() - start_time))
data_size = len(id_li)
print('Dataset size is :', data_size)
# for some really bizzare cases
#if len(words_li) != len(id_li):
# print('ERROR when loading data!')
# exit(9)
np.asarray(words_li)
###### GENSIM ######
x_test, x_train = train_test_split(words_li, 0.1)
# Create a dictionary representation of the documents.
# Filter out words that occur less than 100 documents
dictionary = corpora.Dictionary(x_train)
dictionary.filter_extremes(no_below=100)
train_features = [dictionary.doc2bow(word) for word in x_train]
test_features = [dictionary.doc2bow(word) for word in x_test]
# Training models
print('Training starts')
# unsupervised LDA
topic_li = []
train_log_perplex_li = []
test_log_perplex_li = []
no_top_words = 20
#no_topics = 150 # change the number based on different contributors, file length and etc.
for i in [10,50,100,150,200,250,300,400]:
print('Topic number:', i)
model = gensim.models.ldamodel.LdaModel(train_features, num_topics=i, id2word = dictionary, passes=8)
if i == 10 or i == 150:
data = model.print_topics(num_topics=-1, num_words=no_top_words)
print(data)
with open('topic_word_' + str(i) + '.csv','w') as out:
csv_out = csv.writer(out)
csv_out.writerow(['Topic_Number','Words'])
for row in data:
csv_out.writerow(row)
out.close()
topic_li.append(i)
perplex = model.bound(train_features)
print('Perplexity: %s'%perplex)
per_word_perplex = np.exp2(-perplex / sum(cnt for document in train_features for _, cnt in document))
print('Per-word Perplexity: %s' % per_word_perplex)
train_log_perplex_li.append(per_word_perplex)
perplex = model.bound(test_features)
print('Perplexity: %s'%perplex)
per_word_perplex = np.exp2(-perplex / sum(cnt for document in test_features for _, cnt in document))
print('Per-word Perplexity: %s' % per_word_perplex)
test_log_perplex_li.append(per_word_perplex)
print('Training ends')
# plotting
plot(topic_li, train_log_perplex_li, 0)
plot(topic_li, test_log_perplex_li, 1)
###### GENSIM ######
if __name__=='__main__':
main() | [
"noreply@github.com"
] | noreply@github.com |
1eafdd1f445b525cf93c63c5472861c04502650d | 8bfdfde9886c85e5354bd97c9c754b821249c803 | /lib/OpenTokSDK.py | 1249027fddc8935ef371b1780db4885930e7a462 | [] | no_license | merrypuck/hotpot | 89587a715e4968c613f5fb67894f9f4985d1175c | 1debfa76ca206257c08a00710ba8c96b8c1b0635 | refs/heads/master | 2021-05-27T01:52:54.682419 | 2012-05-12T04:29:17 | 2012-05-12T04:29:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,821 | py |
"""
OpenTok Python Library v0.90.0
http://www.tokbox.com/
Copyright 2010, TokBox, Inc.
Last modified: 2011-10-12
"""
import urllib
import urllib2
import datetime
import calendar
import time
import hmac
import hashlib
import base64
import random
TIMEOUT = 10
class OpenTokException(BaseException):
"Generic OpenTok Error. All other errors extend this."
pass
class RequestError(OpenTokException):
"Indicates an error during the request. Most likely an error connecting to the OpenTok API servers. (HTTP 500 error)"
pass
class AuthError(OpenTokException):
"Indicates that the problem was likely with credentials. Check your API key and API secret and try again"
pass
class SessionProperties(object):
echoSuppression_enabled = None
multiplexer_numOutputStreams = None
multiplexer_switchType = None
multiplexer_switchTimeout = None
p2p_preference = None
def __iter__(self):
d= {'echoSuppression.enabled' : self.echoSuppression_enabled,
'multiplexer.numOutputStreams' : self.multiplexer_numOutputStreams,
'multiplexer.switchType' : self.multiplexer_switchType,
'multiplexer.switchTimeout' : self.multiplexer_switchTimeout,
'p2p.preference' : self.p2p_preference,
}
return d.iteritems()
class RoleConstants:
"List of valid roles for a token"
SUBSCRIBER = "subscriber" #Can only subscribe
PUBLISHER = "publisher" #Can publish, subscribe, and signal
MODERATOR = "moderator" #Can do the above along with forceDisconnect and forceUnpublish
class OpenTokSession(object):
def __init__(self, session_id):
self.session_id = session_id
class OpenTokSDK(object):
"""
Use this SDK to create tokens and interface with the server-side portion of the Opentok API.
"""
TOKEN_SENTINEL = "T1=="
SDK_VERSION = "tbpy-v0.91.2011-10-12"
API_URL = "https://staging.tokbox.com/hl"
# Uncomment this line when you launch your app
API_URL = "https://api.opentok.com/hl";
def __init__(self, api_key, api_secret):
self.api_key = api_key
self.api_secret = api_secret.strip()
def generate_token(self, session_id=None, role=None, expire_time=None, connection_data=None, **kwargs):
"""
Generate a token which is passed to the JS API to enable widgets to connect to the Opentok api.
session_id: Specify a session_id to make this token only valid for that session_id.
role: One of the constants defined in RoleConstants. Default is publisher, look in the documentation to learn more about roles.
expire_time: Integer timestamp. You can override the default token expire time of 24h by choosing an explicit expire time. Can be up to 7d after create_time.
"""
create_time = datetime.datetime.utcnow()
if session_id is None:
session_id = ''
if not role:
role = RoleConstants.PUBLISHER
data_params = dict(session_id=session_id,
create_time=calendar.timegm(create_time.timetuple()),
role=role,
)
if expire_time is not None:
if isinstance(expire_time, datetime.datetime):
data_params['expire_time'] = calendar.timegm(expire_time.timetuple())
else:
data_params['expire_time'] = expire_time
if type(data_params['expire_time']) != int and \
type(data_params['expire_time']) != long and \
type(data_params['expire_time']) != float:
raise OpenTokException("Expire time must be a number")
if data_params['expire_time'] < time.time():
raise OpenTokException("Expire time must be in the future")
if data_params['expire_time'] > time.time() + 604800:
raise OpenTokException("Expire time must be in the next 7 days")
if connection_data is not None:
if len(connection_data) > 1000:
raise OpenTokException("Connection data must be less than 1000 characters")
data_params['connection_data'] = connection_data
data_params['nonce'] = random.randint(0,999999)
data_string = urllib.urlencode(data_params, True)
sig = self._sign_string(data_string, self.api_secret)
token_string = "%s%s" % (self.TOKEN_SENTINEL, base64.b64encode("partner_id=%s&sdk_version=%s&sig=%s:%s" % (self.api_key, self.SDK_VERSION, sig, data_string)))
return token_string
def create_session(self, location='', properties={}, **kwargs):
"""
Create a new session in the OpenTok API. Returns an OpenTokSession object with a session_id property.
location: IP address of the user requesting the session. This is used for geolocation to choose which datacenter the session will live on.
properties: An instance of the SessionProperties object. Fill in the fields that you are interested in to use features of the groups API. Look in the documentation for more details. Also accepts any dict-like object.
"""
#ip_passthru is a deprecated argument and has been replaced with location
if 'ip_passthru' in kwargs:
location = kwargs['ip_passthru']
params = dict(api_key=self.api_key)
params['location'] = location
params.update(properties)
dom = ''
try:
dom = self._do_request("/session/create", params)
except RequestError:
raise
except Exception, e:
raise RequestError("Failed to create session: %s" % str(e) )
try:
error = dom.getElementsByTagName('error')
if error:
error = error[0]
raise AuthError("Failed to create session (code=%s): %s" % (error.attributes['code'].value, error.firstChild.attributes['message'].value))
session_id = dom.getElementsByTagName('session_id')[0].childNodes[0].nodeValue
return OpenTokSession(session_id)
except Exception, e:
raise OpenTokException("Failed to generate session: %s" % str(e))
def _sign_string(self, string, secret):
return hmac.new(secret, string.encode("utf-8"), hashlib.sha1).hexdigest()
def _do_request(self, url, params):
import xml.dom.minidom as xmldom
if '_token' in params: #Do token auth if _token is present, partner auth normally
auth_header = ('X-TB-TOKEN-AUTH', params['_token'])
del params['_token']
else:
auth_header = ('X-TB-PARTNER-AUTH', "%s:%s" % (self.api_key, self.api_secret))
method = "POST" if params else "GET"
data_string = urllib.urlencode(params, True)
context_source = [
('method', method),
('Content-Type', 'application-xml'),
('Content-Length', len(data_string)),
auth_header
]
req_string = self.API_URL + url
try:
opener = urllib2.build_opener()
opener.addheaders = context_source
if data_string:
request = urllib2.Request(url=req_string, data=data_string)
else: #GET if no data_string
request = urllib2.Request(url=req_string)
try:
response = opener.open(request, timeout=TIMEOUT)
except TypeError: #Python2.6 added the timeout keyword, if it doesn't get accepted, try without it
response = opener.open(request)
dom = xmldom.parseString(response.read())
response.close()
except urllib2.HTTPError, e:
raise RequestError("Failed to send request: %s" % str(e))
return dom
| [
"t@tinabeans.com"
] | t@tinabeans.com |
4c308631d8558f3143ccd5dbbebe96c450af9f82 | 5b10b05d22c17836aa139239bd6d2e0c7c7d8427 | /5c1t/Algebra in Algorithms/task-10/all.py | 6a21a428e54addf4dfa6a630c2299db51c4abf75 | [] | no_license | a1ip/mipt-materials | f362218c095df61a9ade282726009909589c0e23 | 5e9d8cc5d32922e939d2f4c30d0250bb5352699f | refs/heads/master | 2023-03-21T12:54:21.519985 | 2018-05-21T17:33:37 | 2018-05-21T17:36:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,728 | py | from copy import deepcopy
import numpy as np
from collections import Counter, namedtuple
class Node:
def evaluate(self, *args, **kwargs):
raise NotImplementedError()
class InputNode(Node):
def __init__(self, name):
self._name = name
def evaluate(self, *args, **kwargs):
pass
def get_name(self):
return self._name
class NotNode(Node):
def evaluate(self, x, *args, **kwargs):
return not x
class AndNode(Node):
def evaluate(self, x, y, *args, **kwargs):
return x and y
class OrNode(Node):
def evaluate(self, x, y, *args, **kwargs):
return x or y
class LogicalCircuit:
def __init__(self, nodes, inputs, terminal_node=None, eliminate_or_nodes=False):
self._nodes = nodes
self._terminal_node = terminal_node or (len(nodes) - 1)
self._inputs = inputs
if eliminate_or_nodes:
self._eliminate_or_nodes()
def get_nodes(self):
return self._nodes
def get_inputs(self):
return self._inputs
def get_terminal_node(self):
return self._terminal_node
def copy(self):
return LogicalCircuit(*map(deepcopy, [self._nodes, self._inputs, self._terminal_node]))
def get_topological_order(self):
def traverse_dfs(node, visited, nodes_black):
visited[node] = True
for node_input in self._inputs[node]:
if not visited[node_input]:
traverse_dfs(node_input, visited, nodes_black)
nodes_black.append(node)
topological_order = []
is_visited = [False] * len(self._nodes)
for node_idx in range(len(self._nodes)):
if not is_visited[node_idx]:
traverse_dfs(node_idx, is_visited, topological_order)
return topological_order
def _add_node(self, node, inputs=None):
inputs = inputs or []
self._nodes.append(node)
self._inputs.append(inputs)
return len(self._nodes) - 1
def _eliminate_or_nodes(self):
node_indices = list(range(len(self._nodes)))
for node_idx in node_indices:
node = self._nodes[node_idx]
if isinstance(node, OrNode):
inputs = self._inputs[node_idx]
node_not_x_idx = self._add_node(NotNode(), [inputs[0]])
node_not_y_idx = self._add_node(NotNode(), [inputs[1]])
node_and_idx = self._add_node(AndNode(), [node_not_x_idx, node_not_y_idx])
self._nodes[node_idx] = NotNode()
self._inputs[node_idx] = [node_and_idx]
return self
class Permutation:
@staticmethod
def identity(size):
return Permutation(np.fromiter(range(size), dtype=np.int))
def __init__(self, permutation):
if len(Counter(permutation)) < permutation.shape[0]:
raise ValueError('Elements of the permutation are not unique')
self._p = np.array(permutation)
def __mul__(self, right):
result = np.zeros(self._p.shape, dtype=np.int)
for x in range(self._p.shape[0]):
result[x] = self[right[x]]
return Permutation(result)
def __call__(self, elements, *args, **kwargs):
result = np.zeros(elements.shape, dtype=elements.dtype)
for idx, element in enumerate(elements):
result[self._p[idx]] = element
return result
def __eq__(self, other):
return np.all(self._p == other.get())
def __getitem__(self, item):
return self._p[item]
def __len__(self):
return len(self._p)
def is_identity(self):
return self.identity(len(self._p)) == self
def get(self):
return self._p
def invert(self):
result = np.zeros(self._p.shape, dtype=self._p.dtype)
for x in range(self._p.shape[0]):
result[self._p[x]] = x
return Permutation(result)
def calc_conjugate(self, other):
result = np.zeros(self._p.shape, dtype=self._p.dtype)
x_from, x_to = 0, 0
for i in range(self._p.shape[0]):
if i > 0:
x_from, x_to = self._p[x_from], other[x_to]
result[x_from] = x_to
assert other == Permutation(result) * self * Permutation(result).invert()
return Permutation(result)
class BranchingProgram:
@staticmethod
def build_from_permuting_branching_program(pbp_program, remove_unreachable=True, reduce_outputs=True):
sigma = pbp_program.get_sigma()
node_input = next(node for node in range(len(sigma)) if node != sigma[node])
node_count = ((len(pbp_program.instructions) + 1) * len(sigma))
graph = [None] * node_count
node_labels = [None if node < node_count - len(sigma) else True for node in range(node_count)]
node_output_false = len(pbp_program.instructions) * len(sigma) + node_input
node_labels[node_output_false] = False
for layer_idx, (var, perm_false, perm_true) in enumerate(pbp_program.instructions[::-1]):
for node_idx_in_layer in range(len(sigma)):
node = layer_idx * len(sigma) + node_idx_in_layer
graph[node] = {
False: (layer_idx + 1) * len(sigma) + perm_false[node_idx_in_layer],
True: (layer_idx + 1) * len(sigma) + perm_true[node_idx_in_layer]
}
node_labels[node] = var
graph[-len(sigma):] = [{False: None, True: None} for _ in range(len(sigma))]
return BranchingProgram(graph, node_labels, node_input, remove_unreachable, reduce_outputs)
def __init__(self, graph, labels, node_input=0, remove_unreachable=True, reduce_outputs=True):
self._graph = graph
self._node_labels = labels
self._node_input = node_input
if remove_unreachable:
self._remove_unreachable()
if reduce_outputs:
self._reduce_outputs()
def _remove_unreachable(self):
def traverse_dfs(node, visited):
visited[node] = True
for node_target in self._graph[node].values():
if node_target is not None and not visited[node_target]:
traverse_dfs(node_target, visited)
is_reachable = [False] * len(self._graph)
traverse_dfs(self._node_input, is_reachable)
nodes_reachable = [node for node in range(len(self._graph)) if is_reachable[node]]
node_renumbering = {node: node_idx_new for node_idx_new, node in enumerate(nodes_reachable)}
graph = [{
False: node_renumbering[self._graph[node][False]] if self._graph[node][False] is not None else None,
True: node_renumbering[self._graph[node][True]] if self._graph[node][True] is not None else None,
} for node in nodes_reachable]
node_labels = [self._node_labels[node] for node in nodes_reachable]
self._graph = graph
self._node_labels = node_labels
self._node_input = 0
def _reduce_outputs(self):
outputs = {node for node in range(len(self._graph)) if isinstance(self._node_labels[node], bool)}
output_false = next(output for output in outputs if not self._node_labels[output])
outputs_true = outputs - {output_false}
output_false_new = min(outputs)
output_true_new = output_false_new + 1
self._graph = self._graph[:output_true_new + 1]
for node in range(len(self._graph)):
for edge_type in [False, True]:
if self._graph[node][edge_type] in outputs_true:
self._graph[node][edge_type] = output_true_new
elif self._graph[node][edge_type] == output_false:
self._graph[node][edge_type] = output_false_new
self._node_labels = self._node_labels[:len(self._graph)]
self._node_labels[output_false_new], self._node_labels[output_true_new] = False, True
def get_labels(self):
return self._node_labels
def get_graph(self):
return self._graph
def get_input(self):
return self._node_input
def evaluate(self, var_values):
node = self._node_input
while not isinstance(self._node_labels[node], bool):
node = self._graph[node][var_values[self._node_labels[node]]]
return self._node_labels[node]
class PermutingBranchingProgram:
Instruction = namedtuple('Instruction', ['var', 'perm_false', 'perm_true'])
@staticmethod
def build_from_circuit(circuit, sigma=None):
node_to_program = {}
sigma = sigma or Permutation(np.array([1, 2, 3, 4, 0]))
topological_order = circuit.get_topological_order()
for node_idx in topological_order:
node = circuit.get_nodes()[node_idx]
if isinstance(node, InputNode):
node_to_program[node_idx] = PermutingBranchingProgram([
PermutingBranchingProgram.Instruction(node.get_name(),
Permutation.identity(5),
sigma)
], sigma)
elif isinstance(node, NotNode):
node_input = circuit.get_inputs()[node_idx][0]
node_to_program[node_idx] = node_to_program[node_input].invert()
elif isinstance(node, AndNode):
node_inputs = circuit.get_inputs()[node_idx]
node_to_program[node_idx] = node_to_program[node_inputs[0]].intersect(node_to_program[node_inputs[1]])
else:
raise ValueError('Unsupported node type')
return node_to_program[circuit.get_terminal_node()]
def __init__(self, instructions, final_permutation):
self.instructions = instructions
self._sigma = final_permutation
def get_sigma(self):
return self._sigma
def change_sigma(self, sigma_new):
instructions = [None] * len(self.instructions)
if len(instructions) == 1:
if self.instructions[0].perm_false == Permutation.identity(len(self.instructions[0].perm_false.get())):
instructions[0] = self.Instruction(self.instructions[0].var,
self.instructions[0].perm_false,
sigma_new)
else:
instructions[0] = self.Instruction(self.instructions[0].var,
sigma_new,
self.instructions[0].perm_true)
else:
gamma = self._sigma.calc_conjugate(sigma_new)
gamma_inverted = gamma.invert()
instructions[0] = self.Instruction(self.instructions[0].var,
gamma * self.instructions[0].perm_false,
gamma * self.instructions[0].perm_true)
instructions[1:-1] = self.instructions[1:-1]
instructions[-1] = self.Instruction(self.instructions[-1].var,
self.instructions[-1].perm_false * gamma_inverted,
self.instructions[-1].perm_true * gamma_inverted)
return PermutingBranchingProgram(instructions, sigma_new)
def invert(self):
instructions = self.change_sigma(self._sigma.invert()).instructions
instructions[-1] = self.Instruction(instructions[-1].var,
instructions[-1].perm_false * self._sigma,
instructions[-1].perm_true * self._sigma)
return PermutingBranchingProgram(instructions, self._sigma)
def intersect(self, other, preserve_sigma=True, non_commuting_sigma=None):
sigma_inverted = self._sigma.invert()
other_sigma_inverted = other.get_sigma().invert()
if (self._sigma * other.get_sigma() * sigma_inverted * other_sigma_inverted).is_identity():
non_commuting_sigma = non_commuting_sigma or Permutation(np.array([2, 4, 1, 0, 3]))
other_sigma_inverted = non_commuting_sigma.invert()
if (self._sigma * non_commuting_sigma * sigma_inverted * other_sigma_inverted).is_identity():
raise ValueError('Commuting sigma provided')
other = other.change_sigma(non_commuting_sigma)
instructions = [None] * (2 * (len(self.instructions) + len(other.instructions)))
left, right = 0, len(self.instructions)
instructions[left:right] = self.instructions
left, right = right, right + len(other.instructions)
instructions[left:right] = other.instructions
left, right = right, right + len(self.instructions)
instructions[left:right] = self.change_sigma(sigma_inverted).instructions
left, right = right, right + len(other.instructions)
instructions[left:right] = other.change_sigma(other_sigma_inverted).instructions
result = PermutingBranchingProgram(instructions,
self._sigma * other.get_sigma() * sigma_inverted * other_sigma_inverted)
if preserve_sigma:
result = result.change_sigma(self._sigma)
return result
def parse_node(node_str):
node_description = node_str.split()
inputs_start_idx = 1
if node_description[0] == 'VAR':
node = InputNode(node_description[1])
inputs_start_idx = 2
elif node_description[0] == 'OR':
node = OrNode()
elif node_description[0] == 'AND':
node = AndNode()
elif node_description[0] == 'NOT':
node = NotNode()
else:
raise ValueError('Wrong node type in input')
inputs = list(map(int, node_description[inputs_start_idx:]))
return node, inputs
def solve(nodes, node_inputs):
circuit = LogicalCircuit(nodes, node_inputs, len(nodes) - 1, eliminate_or_nodes=True)
permuting_program = PermutingBranchingProgram.build_from_circuit(circuit)
return BranchingProgram.build_from_permuting_branching_program(permuting_program)
if __name__ == '__main__':
node_count = int(input())
node_inputs = [None] * node_count
nodes = [None] * node_count
for idx in range(node_count):
nodes[idx], node_inputs[idx] = parse_node(input())
branching_program = solve(nodes, node_inputs)
graph, labels = branching_program.get_graph(), branching_program.get_labels()
for node in range(len(graph)):
label = labels[node]
if isinstance(label, bool):
label = str(label).upper()
print(f'{label}')
else:
print(f'{label} {graph[node][False]} {graph[node][True]}')
| [
"sautin1@yandex.ru"
] | sautin1@yandex.ru |
da82a7c906d4c100176b5994979f58c4b7d290da | ebc356d79226ffbb56438436b4abd13dc0594767 | /2018/pset6/cash/cash.py | e0e3f0d9757e1fe739dc7b043414089485a2c071 | [] | no_license | KevinStoneCode/CS50_2018 | 2f7aafe5b7f21bbecd3f91c3fa9b2827bc646df4 | 4030b9c9d884e30350d77466d82443a9de955840 | refs/heads/master | 2022-08-29T08:11:22.732539 | 2018-09-06T10:12:59 | 2018-09-06T10:12:59 | 145,199,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | from cs50 import get_float
while True:
change = get_float("Change owed: ")
if change >= 0:
break
cents = round(change * 100)
coins = 0
coins += cents // 25
cents %= 25
coins += cents // 10
cents %= 10
coins += cents // 5
cents %= 5
coins += cents
print(coins) | [
"kwshih0212@gmail.com"
] | kwshih0212@gmail.com |
6b340d1d189577e17193f55e983367c6b759bdae | 459cc494fc3555b52c12b1861db0abfbd07a54b5 | /0x07-python-test_driven_development/2-main.py | 5af2d947114baf0589dcb6a498621a89a69bd5d4 | [] | no_license | ymcastellar/holbertonschool-higher_level_programming | a5dd910d9b1c52d21a2bed557147c3c9a5aa3ccf | f5bd4d7e6b7ada991b5d81cea2bb14212abb8888 | refs/heads/master | 2022-12-22T13:47:07.154798 | 2020-09-24T21:17:20 | 2020-09-24T21:17:20 | 259,443,832 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | #!/usr/bin/python3
matrix_divided = __import__('2-matrix_divided').matrix_divided
matrix = [
[1, 2, 3],
[4, 5, 6]
]
print(matrix_divided(matrix, 3))
print(matrix)
print(matrix_divided(matrix, 3)) | [
"yoycas@hotmail.com"
] | yoycas@hotmail.com |
0c1a3d07a07d072f99c7a29312d13587a4198ea3 | f049ed97c00301ac9400bad7d53ad35909837ec2 | /doc_extractor/extractor/views_19.1.2021.py | c9dddd4be533a18412baf89a396f71f07555c397 | [] | no_license | vijay867777/sgk_git | 54afb4f826e65c8ef3f04698fc6fdda29e01587a | cc8856ebfd3639266bc2260f3d6a63047b50ff8a | refs/heads/main | 2023-02-24T22:08:55.310228 | 2021-02-03T07:12:17 | 2021-02-03T07:12:17 | 335,535,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,854 | py | # Django Libraries
# import concurrent.futures
from django.shortcuts import render
from django.http import HttpResponse , JsonResponse
from .models import *
# Custom Libraries
import pandas as pd
import numpy as np
import joblib
from laserembeddings import Laser
# from sentence_transformers import SentenceTransformer , models
from sklearn.neural_network import MLPClassifier # works great -- neural network
from langid import classify
from langdetect import detect
# from fastlangid.langid import LID
import os
import re
import pdfplumber
from docx2json import convert
import json
from docx import Document
import smbclient
import mammoth
from bs4 import BeautifulSoup
# Rest framework import
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.decorators import api_view,permission_classes,authentication_classes
from rest_framework.permissions import IsAuthenticated
from rest_framework.authentication import SessionAuthentication, BasicAuthentication , TokenAuthentication
# Other file import
from environment import MODE
if MODE == 'local':
from .local_constants import *
else:
from .dev_constants import *
from .excel_processing import *
from .msd_processing import *
categories = ['nutrition','ingredients','allergen statement','shelf_life_statement',
'storage instruction','address',
# 'warning statement',
"gtin_number","serial_number","lot_number","expiry_date",'form_content',
'usage instruction','pc_number','general classification',"eu_number"]
msd_categories = ['name','active_substance','excipients','form_content','method_route','warning','expiry_date',
'storage_instructions','precautions','marketing_company','unique_identifier','classification',
'usage_instruction','braille_info','mfg_date','manufacturer','packing_site','appearance',
'product_info','label_dosage','box_info']
# Initialize Laser
# model_path = r"/Users/VIJAYKANAGARAJ/PycharmProjects/Schawk_document_xml/labse"
# model = SentenceTransformer(model_path)
# Initialize Laser
laser = Laser(path_to_bpe_codes,path_to_bpe_vocab,path_to_encoder)
# langid = LID()
# @authentication_classes([SessionAuthentication, BasicAuthentication])
# @api_view()
# @permission_classes([IsAuthenticated])
# @authentication_classes([TokenAuthentication])
# def extractor(request):
# content = {'message': 'Hello, World!'}
# return Response(content)
# return render(request,'extractor/index.html')
# Create your views here.
def model_training():
df = pd.read_excel(input_excel)
df = df.sample(frac=1)
X_train_laser = laser.embed_sentences(df['text'], lang='en')
# X_train_laser = model.encode(df['text'])
# mlp = MLPClassifier(hidden_layer_sizes=(125,), solver='adam', activation='tanh', random_state=0, shuffle=True)
mlp = MLPClassifier(hidden_layer_sizes=(70,),solver='adam',max_iter=500,activation='tanh',random_state=0,shuffle=True)
mlp.fit(X_train_laser, df['category'])
joblib.dump(mlp,model_location)
return mlp
def classifier(request):
text = request.GET.get('text','')
if text:
pass
else:
return render(request, 'extractor/index_classifier.html')
model = None
if os.path.exists(model_location):
model = joblib.load(model_location)
else:
model = model_training()
# lang_detected = detect(text)
lang_detected = classify(text)[0]
# print('lang----->',lang_detected)
# print(text)
prediction = model.predict(laser.embed_sentences([text],lang=lang_detected))
probability = model.predict_proba(laser.embed_sentences([text],lang=lang_detected))
probability[0].sort()
max_probability = max(probability[0])
if (max_probability-0.35) > probability[0][-2]:
pred_output = prediction[0]
else:
pred_output = 'None'
# print(probability)
print('{}-------------->{}'.format(max(probability[0]),pred_output))
result = {'probability':max(probability[0]),'output':pred_output,'actual_output':prediction[0],'text':text}
# return HttpResponse(pred_output)
# return render(request,'extractor/doc_result.html',{'result':dict})
return render(request,'extractor/index_result.html',result)
def prediction(text):
if os.path.exists(model_location):
model = joblib.load(model_location)
else:
model = model_training()
# lang_detected = detect(text)
lang_detected = classify(text)[0]
print(text)
prediction = model.predict(laser.embed_sentences([text],lang=lang_detected))
# prediction = model.predict(model.encode([text]))
probability = model.predict_proba(laser.embed_sentences([text],lang=lang_detected))
# probability = model.predict_proba(model.encode['text'])
probability[0].sort()
max_probability = max(probability[0])
# if (max_probability-0.35) > probability[0][-2]:
if max_probability > 0.63:
pred_output = prediction[0]
else:
pred_output = 'None'
print('{}-------------->{}'.format(max(probability[0]),pred_output))
return ({'probability': max(probability[0]), 'output': pred_output, 'actual_output': prediction[0]})
def doc_extractor(request):
final = {}
file_name = request.GET.get('file','no file')
if file_name == 'no file':
return render(request, 'extractor/index.html')
else:
pass
file = document_location+file_name
doc_format = os.path.splitext(file_name)[1].lower()
if doc_format == ".pdf":
if os.path.exists(file):
pdf = pdfplumber.open(file)
else:
return HttpResponse('File not found')
no_of_pages = len(pdf.pages)
tables = len(pdf.pages[0].extract_tables())
if tables > 2:
print('type 1 --- tables')
for page_no in range(no_of_pages):
page = pdf.pages[page_no]
extracted_table = page.extract_tables()
text = [" ".join(list(filter(None, content))).replace('\n', ' ') for table in extracted_table for content in table]
for sentence in text:
unique_identifiers = Regex_parsers(sentence)
if unique_identifiers:
final = {**final, **unique_identifiers}
else:
pass
result = prediction(sentence)['output']
if result != 'None':
if result in final.keys():
final[result].append(sentence)
else:
final[result] = [sentence]
else:
pass
if len(final['Nutrition']) > 1:
final['Nutrition'] = final['Nutrition'][:-1]
else:
pass
extracted_categories = {key:val for key, val in final.items() if key.lower() in categories}
# return JsonResponse(extracted_categories)
return render(request, 'extractor/doc_result.html', {'result': extracted_categories})
else:
print('type-2-paragraph')
for page_no in range(no_of_pages):
page = pdf.pages[page_no]
extracted_text = page.extract_text()
text = sentence_tokennizer(extracted_text)
for sentence in text:
unique_identifiers = Regex_parsers(sentence)
if unique_identifiers:
final = {**final, **unique_identifiers}
else:
pass
result = prediction(sentence)['output']
if result in final.keys():
final[result].append(sentence)
else:
final[result] = [sentence]
extracted_categories = {key:val for key, val in final.items() if key.lower() in categories}
return render(request, 'extractor/doc_result.html', {'result': extracted_categories})
elif (doc_format == '.docx') or (doc_format == '.doc'):
doc = convert(file,sepBold=True)
doc_to_json = json.loads(doc)
text = doc_to_json['nonbold']
if text:
pass
else:
text = doc_to_json['text']
for sentence in text:
unique_identifiers = Regex_parsers(sentence,regex_patterns)
if unique_identifiers:
final = {**final,**unique_identifiers}
else:
pass
result = prediction(sentence)['output']
if result in final.keys():
final[result].append(sentence)
else:
final[result] = [sentence]
# print(final)
extracted_categories = {key: val for key, val in final.items() if key.lower() in categories}
return render(request, 'extractor/doc_result.html', {'result': extracted_categories})
else:
return HttpResponse('This file format not supported currently')
def sentence_tokennizer(text):
#sentences = re.split(r"[.!?]", text)
# sentences = re.split(r"\.\s\n", text)
segments = re.split(r"\n\s\n", text)
sentences = [re.split(r"\.\s\n", seg) for seg in segments]
# token = [re.sub(r"\d\-.*",'number',text) for sublist in sentences for text in sublist]
token = [text for sublist in sentences for text in sublist]
# sentences = [sent.strip() for sent in sentences if sent]
return token
def Regex_parsers(text,regex_patterns):
unique_number = {}
print('regex---->',text)
for key , pattern in regex_patterns.items():
finding = re.findall(pattern,text,(re.IGNORECASE|re.MULTILINE))
try:
finding = str(finding[0]).strip()
except:
pass
if finding:
print("---------************{}".format(finding))
unique_number[key] = [finding]
else:
pass
return unique_number
def Regex_parsers_generator(text,regex_patterns):
print('regex---->',text)
for value in text:
for key , pattern in regex_patterns.items():
finding = re.findall(pattern,value,(re.IGNORECASE|re.MULTILINE))
if finding:
yield key , finding[0]
def msd_data_extractor(list,regex_heading_msd):
tmp = []
final = {}
key = ''
for i in range(len(list)):
text = str(list[i])
if re.findall(regex_heading_msd, text):
try:
if key != '':
final[key] = '\n'.join(tmp)
else:
pass
key = text
tmp.clear()
except:
pass
else:
if i == len(list) - 1:
tmp.append(text)
final[key] = ' '.join(tmp)
else:
tmp.append(text)
return final
def msd_prediction(text):
model = None
if os.path.exists(msd_model_location):
model = joblib.load(msd_model_location)
else:
model = msd_model_training()
print('new model trained')
# lang_detected = detect(text)
lang_detected = classify(text)[0]
# print('lang----->',lang_detected)
print(text)
prediction = model.predict(laser.embed_sentences([text],lang=lang_detected))
probability = model.predict_proba(laser.embed_sentences([text],lang=lang_detected))
probability[0].sort()
max_probability = max(probability[0])
# if (max_probability-(max_probability/2)) > probability[0][-2]:
if max_probability > 0.60:
pred_output = prediction[0]
else:
pred_output = 'None'
print('{}-------------->{}'.format(max(probability[0]),pred_output))
return ({'probability': max(probability[0]), 'output': pred_output, 'actual_output': prediction[0]})
def msd_model_training():
df = pd.read_excel(msd_input_excel)
df = df.sample(frac=1)
X_train_laser = laser.embed_sentences(df['text'], lang='en')
# mlp = MLPClassifier(hidden_layer_sizes=(125,), solver='adam', activation='tanh', random_state=0, shuffle=True)
mlp = MLPClassifier(hidden_layer_sizes=(70,),solver='adam',max_iter=500,activation='tanh',random_state=0,shuffle=True)
# mlp = MLPClassifier(hidden_layer_sizes=(70,),solver='adam',max_iter=300,activation='relu',learning_rate='constant',learning_rate_init=0.001,random_state=0,shuffle=True)
mlp.fit(X_train_laser, df['category'])
joblib.dump(mlp,msd_model_location)
return mlp
# @api_view()
# @permission_classes([IsAuthenticated])
# @authentication_classes([TokenAuthentication])
def msd(request):
final_json = {}
# getting value from query string
file_name_list = request.GET.getlist('file','no file')
print('file_list',file_name_list)
if file_name_list == 'no file':
return render(request, 'extractor/index_msd.html')
# return Response({'status':'0'})
else:
pass
for file_index , file_name in enumerate(file_name_list):
final = {}
cate_tmp = {}
lang_final = set()
doc_format = os.path.splitext(file_name)[1].lower()
if doc_format == '.docx':
# Reading file from storage
if MODE == 'local':
file = document_location + file_name
extracted = text_extraction(file)
else:
file = file_name
extracted = text_extraction(file,method='SMB')
# file = get_file_smb(r"{}".format(file_name))
for key,value in extracted.items():
if "".join(value).strip() != '':
result = msd_prediction(key)['output'] # classifier
if result != 'None':
if result in final.keys():
final[result].extend([val.replace("\n",' ').strip() for val in value])
else:
final[result] = [val.replace("\n",' ').strip() for val in value]
else:
pass
unique = {}
if 'unique_identifier' in final:
# unique = Regex_parsers(str(final['unique_identifier']),regex_patterns)
for key , identifier in Regex_parsers_generator(final['unique_identifier'],regex_patterns):
unique[key] = [str(identifier).strip()]
final.pop('unique_identifier')
else:
pass
for cate , value in final.items():
if cate in msd_categories_lang:
for t in value:
if '$$' in t:
list_text = t.split('$$')
topic = ''
for index, text in enumerate(list_text):
text = text.replace('$$',' ')
if len(str(text).split()) > 2:
text = ' '.join((topic,text)).strip()
topic = ''
lang = detect(text)
lang_final.add(lang)
if cate in cate_tmp:
cate_tmp[cate].append({lang: text})
else:
cate_tmp[cate] = [{lang: text}]
else:
topic = ' '.join((topic,text)).strip()
if index == len(list_text)-1:
lang = detect(topic)
lang_final.add(lang)
if cate in cate_tmp:
cate_tmp[cate].append({lang: topic})
else:
cate_tmp[cate] = [{lang: topic}]
topic = ''
else:
pass
else:
lang = detect(t)
lang_final.add(lang)
if cate in cate_tmp:
cate_tmp[cate].append({lang: t})
else:
cate_tmp[cate] = [{lang: t}]
elif cate in msd_categories_lang_exception:
# print('^^^^$$$$', value)
for t in value:
t = t.replace('$$',' ')
lang = detect(t)
lang_final.add(lang)
if cate in cate_tmp:
cate_tmp[cate].append({lang: t})
else:
cate_tmp[cate] = [{lang: t}]
else:
# print('cate------>',cate)
cate_tmp[cate] = value
status = {'status':'1','language': list(lang_final),'file_name':[file_name]}
extracted_categories = {**status,**cate_tmp,**unique}
final_json[file_index] = extracted_categories
# return render(request, 'extractor/doc_result.html', {'result': extracted_categories})
else:
status = {'status': '0','file_name': [file_name]}
final_json[file_index] = status
# return JsonResponse(status)
# return Response(final_json)
return JsonResponse(final_json)
def get_file_smb(file_name):
data = ''
try:
data = smbclient.open_file(r"{}".format(file_name),mode='rb',username=smb_username,password=smb_password)
print('file found')
except:
smbclient.reset_connection_cache()
data = smbclient.open_file(r"{}".format(file_name), mode='rb',username=smb_username,password=smb_password)
finally:
return data
def text_extraction(file,method=None):
tmp = []
final = {}
key = ''
if method == 'SMB':
try:
with smbclient.open_file(r"{}".format(file), mode='rb', username=smb_username, password=smb_password) as f:
html = mammoth.convert_to_html(f).value
print('file found')
except:
smbclient.reset_connection_cache()
with smbclient.open_file(r"{}".format(file), mode='rb', username=smb_username, password=smb_password) as f:
html = mammoth.convert_to_html(f).value
print('file found')
else:
html = mammoth.convert_to_html(file).value
'''
soup = BeautifulSoup(html,'html.parser')
paragraphs = soup.find_all('p')
# list = [ele.text for ele in paragraphs]
list = [ele.next for ele in paragraphs]
'''
soup = BeautifulSoup(html, 'html.parser')
paragraphs = soup.find_all(['p','li'])
# -----
for i, text in enumerate(paragraphs):
text = str(text)
if '' in tmp:
tmp.remove('')
if re.findall(regex_heading_msd, text):
try:
if key and (key not in final):
if tmp:
final[key] = ['$$'.join(tmp)]
elif key in final:
if tmp:
final[key].append('$$'.join(tmp))
else:
pass
key = re.sub(r'<.*?>', '', text)
# print(key)
tmp.clear()
except:
pass
else:
if i == len(paragraphs) - 1:
text = text.strip()
tmp = [t for t in tmp if t]
if text and not re.findall(r"Panel\s\d", text):
text = text.replace('<strong>', '<b>').replace('</strong>', '</b>')
text = re.sub(r"<(\/?[^/bems]).*?>", '', text)
tmp.append(text)
if key not in final:
if tmp:
final[key] = ['$$'.join(tmp)]
elif key in final:
if tmp:
final[key].append('$$'.join(tmp))
else:
pass
else:
text = text.strip()
tmp = [t for t in tmp if t]
if text and not re.findall(r"Panel\s\d", text): # filter out heading like 'big panel 1'
text = text.replace('<strong>', '<b>').replace('</strong>', '</b>')
text = re.sub(r"<(\/?[^/bems]).*?>", '', text)
tmp.append(text)
# return final , max(lang,key=lang.count)
# print(final)
return final
def extractor(request):
final_json = {}
# getting value from query string
file_name_list = request.GET.getlist('file','no file')
print('file_list',file_name_list)
if file_name_list == 'no file':
return render(request, 'extractor/index_msd.html')
# return Response({'status':'0'})
else:
pass
for file_index , file_name in enumerate(file_name_list):
doc_format = os.path.splitext(file_name)[1].lower()
if doc_format == '.xlsx':
output = Excel_extraction(file_name).main()
final_json[file_index] = output
elif doc_format == '.docx':
output = msd_extraction().main(file_name)
final_json[file_index] = output
else:
print('format not supported')
return JsonResponse(final_json)
# def dataset_to_mangodb(request):
# from pymongo import MongoClient
# client = MongoClient('172.28.42.150',27017)
# db = client['dataset']
# collection = db['msd']
# data = [msd_dataset(category=i['category'], text=i['text'], language_code=i['language_code'],
# language=i['language'],
# type=i['type']) for i in collection.find({})]
# if data:
# msd_dataset.objects.bulk_create(data)
# return HttpResponse('success')
# else:
# return HttpResponse('Failure')
def dataset_to_mangodb(request,django_model,mongo_table):
from pymongo import MongoClient
client = MongoClient('172.28.42.150',27017)
db = client['dataset']
collection = db[mongo_table]
data = [django_model(category=i['category'], text=i['text'], language_code=i['language_code'],
language=i['language'],
category_actual=i['category_actual']) for i in collection.find({})]
if data:
msd_dataset.objects.bulk_create(data)
return HttpResponse('success')
else:
return HttpResponse('Failure')
dataset_to_mangodb(msd_contents,'msd_contents')
| [
"VIJAYKANAGARAJ@CHENMACL16.local"
] | VIJAYKANAGARAJ@CHENMACL16.local |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.