blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e9f868ff815024c1e16fbad4fecdd0555aa18e30
|
77c5e35467b91bb10967a141f7caa3a3c0b4095f
|
/associate_name_with_cat_reduce.py
|
a29676a9d7f78ae6f5f9930e603d8adcd190b9b8
|
[] |
no_license
|
yan9liu/title_search
|
cab9cd19841dbf895dc165f0172e079129c8650d
|
b155c4c3b9a4e306a121e89247f4bad72c3bbe65
|
refs/heads/master
| 2021-01-09T21:52:24.787824
| 2016-01-04T07:32:40
| 2016-01-04T07:32:40
| 48,975,494
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 785
|
py
|
import sys
def reduce():
cats = []
names = []
pre_pid = None
for line in sys.stdin:
splits = line.decode('utf-8').strip().split('\t', 1)
pid = splits[0]
if pid != pre_pid:
if cats and names:
output(pre_pid, cats, names)
cats = []
names = []
pre_pid = pid
if ':' in splits[1]:
names.append(splits[1])
else:
cats.append(splits[1])
if cats and names:
output(pre_pid, cats, names)
def output(pid, cats, names):
for name in names:
for cat in cats:
out_s = pid + "\t" + cat + "\t" + name
print out_s.encode('utf-8')
if __name__=="__main__":
reduce()
|
[
"liu.ocean@outlook.com"
] |
liu.ocean@outlook.com
|
6dd5458bc61691ad70c9076ff0135c5343b37efb
|
cc343475ca61c167944a0c0f59f531ca4f43b5c8
|
/NetworksLabCode/httpclient.py
|
197f5da8fc07afac1bef6611ebba7a87d3d40e8d
|
[] |
no_license
|
sambathkumar02/Python
|
55f20f47c333eb5bb3947c8b4d14495c9fb210dc
|
ce569a18c4f35de70aeba0007bbf4bc48730b3d4
|
refs/heads/master
| 2023-03-15T05:25:50.531048
| 2021-03-18T11:08:21
| 2021-03-18T11:08:21
| 257,019,558
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 451
|
py
|
import socket
client=socket.socket()
server=('saranathan.ac.in',80)
client.connect(server)
header = b'GET / HTTP/1.1\r\n'
header += b'Host: saranathan.ac.in:80\r\n'
header += b'Accept:text/html\r\n'
header += b'Connection: close\r\n'
header += b'\r\n'
client.send(header)
response=b''
while True:
buffer=client.recv(4096)
if not buffer:
break
response +=buffer
print(response.decode())
client.close()
|
[
"noreply@github.com"
] |
sambathkumar02.noreply@github.com
|
860ed367c2aca7e4dd6deba69f2855fdacc19f00
|
f49c866f416a0c2cf89347aa2e0814553f4b5d52
|
/train_eval_for_mrc.py
|
6c3a3cc2610e4fbcbbd86c58408dec97802e41b3
|
[] |
no_license
|
thunderboom/NER_MRC
|
3fec4a4a821592fe9d092ac2b3b9b167dee25cb5
|
a0f6a9a77ed7ed5d6ff4d46d114b83871480b1e7
|
refs/heads/main
| 2022-12-31T17:06:08.782889
| 2020-10-27T10:04:31
| 2020-10-27T10:04:31
| 307,647,627
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,300
|
py
|
# coding: UTF-8
import os
import logging
import numpy as np
import torch
import torch.nn as nn
from sklearn import metrics
from seqeval.metrics import f1_score, precision_score, recall_score, classification_report
import time
from utils import extract_flat_spans_batch
from models.loss import DiceLoss
from torch.nn.modules import BCEWithLogitsLoss
from transformers import AdamW, get_linear_schedule_with_warmup
logger = logging.getLogger(__name__)
def compute_loss(config, start_logits, end_logits, span_logits,
start_labels, end_labels, match_labels, start_label_mask, end_label_mask):
batch_size, seq_len = start_logits.size()
start_float_label_mask = start_label_mask.view(-1).float()
end_float_label_mask = end_label_mask.view(-1).float()
match_label_row_mask = start_label_mask.bool().unsqueeze(-1).expand(-1, -1, seq_len)
match_label_col_mask = end_label_mask.bool().unsqueeze(-2).expand(-1, seq_len, -1)
match_label_mask = match_label_row_mask & match_label_col_mask
match_label_mask = torch.triu(match_label_mask, 0) # start should be less equal to end
if config.span_loss_candidates == "all":
# naive mask
float_match_label_mask = match_label_mask.view(batch_size, -1).float()
else:
# use only pred or golden start/end to compute match loss
start_preds = start_logits > 0
end_preds = end_logits > 0
if config.span_loss_candidates == "gold":
match_candidates = ((start_labels.unsqueeze(-1).expand(-1, -1, seq_len) > 0)
& (end_labels.unsqueeze(-2).expand(-1, seq_len, -1) > 0))
else:
match_candidates = torch.logical_or(
(start_preds.unsqueeze(-1).expand(-1, -1, seq_len)
& end_preds.unsqueeze(-2).expand(-1, seq_len, -1)),
(start_labels.unsqueeze(-1).expand(-1, -1, seq_len)
& end_labels.unsqueeze(-2).expand(-1, seq_len, -1))
)
match_label_mask = match_label_mask & match_candidates
float_match_label_mask = match_label_mask.view(batch_size, -1).float()
if config.loss_type == "bce":
bce_loss = BCEWithLogitsLoss(reduction="none")
start_loss = bce_loss(start_logits.view(-1), start_labels.view(-1).float())
start_loss = (start_loss * start_float_label_mask).sum() / start_float_label_mask.sum()
end_loss = bce_loss(end_logits.view(-1), end_labels.view(-1).float())
end_loss = (end_loss * end_float_label_mask).sum() / end_float_label_mask.sum()
match_loss = bce_loss(span_logits.view(batch_size, -1), match_labels.view(batch_size, -1).float())
match_loss = match_loss * float_match_label_mask
match_loss = match_loss.sum() / (float_match_label_mask.sum() + 1e-10)
else:
dice_loss = DiceLoss(with_logits=True, smooth=config.dice_smooth)
start_loss = dice_loss(start_logits, start_labels.float(), start_float_label_mask)
end_loss = dice_loss(end_logits, end_labels.float(), end_float_label_mask)
match_loss = dice_loss(span_logits, match_labels.float(), float_match_label_mask)
return start_loss, end_loss, match_loss
def model_train(config, model, train_iter, dev_iter):
start_time = time.time()
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": config.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0
},
]
t_total = len(train_iter) * config.num_train_epochs
optimizer = AdamW(optimizer_grouped_parameters, lr=config.learning_rate)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=t_total * config.warmup_proportion, num_training_steps=t_total
)
#FocalLoss(gamma =2, alpha = 1) #调整gamma=0,1,2,3
# Train!
logger.info("***** Running training *****")
logger.info(" Train Num examples = %d", len(train_iter))
logger.info(" Dev Num examples = %d", len(dev_iter))
logger.info(" Num Epochs = %d", config.num_train_epochs)
logger.info(" Instantaneous batch size GPU/CPU = %d", config.batch_size)
logger.info(" Total optimization steps = %d", t_total)
logger.info(" Train device:%s, id:%d", config.device, config.device_id)
global_batch = 0 # 记录进行到多少batch
dev_best_loss = float('inf')
last_improve = 0 # 记录上次验证集loss下降的batch数
flag = False # 记录是否很久没有效果提升
predict_all = np.array([], dtype=int)
labels_all = np.array([], dtype=int)
total_loss = 0.
for epoch in range(config.num_train_epochs):
logger.info('Epoch [{}/{}]'.format(epoch + 1, config.num_train_epochs))
scheduler.step() # 学习率衰减
for i, (_, input_ids, attention_mask, token_type_ids, type_start_labels, type_end_labels,
start_label_mask, end_label_mask, match_labels, type_) in enumerate(train_iter):
global_batch += 1
model.train()
input_ids = torch.tensor(input_ids).type(torch.LongTensor).to(config.device)
attention_mask = torch.tensor(attention_mask).type(torch.LongTensor).to(config.device)
token_type_ids = torch.tensor(token_type_ids).type(torch.LongTensor).to(config.device)
type_start_labels = torch.tensor(type_start_labels).type(torch.LongTensor).to(config.device)
type_end_labels = torch.tensor(type_end_labels).type(torch.LongTensor).to(config.device)
start_label_mask = torch.tensor(start_label_mask).type(torch.LongTensor).to(config.device)
end_label_mask = torch.tensor(end_label_mask).type(torch.LongTensor).to(config.device)
match_labels = torch.tensor(match_labels).type(torch.LongTensor).to(config.device)
# model output
start_logits, end_logits, span_logits = model(input_ids, attention_mask, token_type_ids)
start_loss, end_loss, match_loss = compute_loss(
config=config,
start_logits=start_logits,
end_logits=end_logits,
span_logits=span_logits,
start_labels=type_start_labels,
end_labels=type_end_labels,
match_labels=match_labels,
start_label_mask=start_label_mask,
end_label_mask=end_label_mask
)
loss = config.weight_start * start_loss + config.weight_end * end_loss + config.weight_span * match_loss
model.zero_grad()
total_loss += loss
loss.backward()
optimizer.step()
scheduler.step() # Update learning rate schedule
# [B, length], [B, length], [B, length, length]
start_preds, end_preds, span_pred = start_logits > 0, end_logits > 0, span_logits>0
active_labels = extract_flat_spans_batch(start_pred=type_start_labels,
end_pred=type_end_labels,
match_pred=match_labels,
label_mask=start_label_mask,
pseudo_tag=type_
)
predic = extract_flat_spans_batch(start_pred=start_preds,
end_pred=end_preds,
match_pred=span_pred,
label_mask=start_label_mask,
pseudo_tag=type_
)
labels_all = np.append(labels_all, active_labels)
predict_all = np.append(predict_all, predic)
if global_batch % config.output == 0:
# 每多少轮输出在训练集和验证集上的效果
true_label = labels_all
predict_label = predict_all
train_acc = metrics.accuracy_score(labels_all, predict_all)
train_precision = precision_score(true_label, predict_label)
train_recall = recall_score(true_label, predict_label)
train_f1 = f1_score(true_label, predict_label)
predict_all = np.array([], dtype=int)
labels_all = np.array([], dtype=int)
acc, precision, recall, f1, dev_loss = model_evaluate(config, model, dev_iter)
if dev_loss < dev_best_loss:
dev_best_loss = dev_loss
improve = '*'
last_improve = global_batch
else:
improve = ''
time_dif = time.time() - start_time
msg = '{0:>6}, Train Loss: {1:>.4f}, train_acc: {2:>.2%}, precision: {3:>.2%}, recall: {4:>.2%}, f1: {5:>.2%}' \
' Val Loss: {6:>5.6f}, acc: {7:>.2%}, precision: {8:>.2%}, recall: {9:>.2%}, f1: {10:>.2%}, ' \
' Time: {11} - {12}'
logger.info(msg.format(global_batch, total_loss / config.output, train_acc, train_precision, train_recall, train_f1,
dev_loss, acc, precision, recall, f1, time_dif, improve))
total_loss = 0.
if config.early_stop and global_batch - last_improve > config.require_improvement:
# 验证集loss超过1000batch没下降,结束训练
logger.info("No optimization for a long time, auto-stopping...")
flag = True
break
if flag:
break
def model_evaluate(config, model, data_iter, test=False):
model.eval()
loss_total = 0
predict_all = np.array([], dtype=int)
labels_all = np.array([], dtype=int)
with torch.no_grad():
for i, (_, input_ids, attention_mask, token_type_ids, type_start_labels, type_end_labels,
start_label_mask, end_label_mask, match_labels, type_) in enumerate(data_iter):
input_ids = torch.tensor(input_ids).type(torch.LongTensor).to(config.device)
attention_mask = torch.tensor(attention_mask).type(torch.LongTensor).to(config.device)
token_type_ids = torch.tensor(token_type_ids).type(torch.LongTensor).to(config.device)
type_start_labels = torch.tensor(type_start_labels).type(torch.LongTensor).to(config.device)
type_end_labels = torch.tensor(type_end_labels).type(torch.LongTensor).to(config.device)
start_label_mask = torch.tensor(start_label_mask).type(torch.LongTensor).to(config.device)
end_label_mask = torch.tensor(end_label_mask).type(torch.LongTensor).to(config.device)
match_labels = torch.tensor(match_labels).type(torch.LongTensor).to(config.device)
# model output
start_logits, end_logits, span_logits = model(input_ids, attention_mask, token_type_ids)
start_loss, end_loss, match_loss = compute_loss(
config=config,
start_logits=start_logits,
end_logits=end_logits,
span_logits=span_logits,
start_labels=type_start_labels,
end_labels=type_end_labels,
match_labels=match_labels,
start_label_mask=start_label_mask,
end_label_mask=end_label_mask
)
loss = config.weight_start * start_loss + config.weight_end * end_loss + config.weight_span * match_loss
loss_total += loss
# [B, length], [B, length], [B, length, length]
start_preds, end_preds, span_pred = start_logits > 0, end_logits > 0, span_logits>0
active_labels = extract_flat_spans_batch(start_pred=type_start_labels,
end_pred=type_end_labels,
match_pred=match_labels,
label_mask=start_label_mask,
pseudo_tag=type_
)
predic = extract_flat_spans_batch(start_pred=start_preds,
end_pred=end_preds,
match_pred=span_pred,
label_mask=start_label_mask,
pseudo_tag=type_
)
labels_all = np.append(labels_all, active_labels)
predict_all = np.append(predict_all, predic)
true_label = labels_all
predict_label = predict_all
acc = metrics.accuracy_score(labels_all, predict_all)
precision = precision_score(true_label, predict_label)
recall = recall_score(true_label, predict_label)
f1 = f1_score(true_label, predict_label)
if test:
report = classification_report(true_label, predict_label, digits=4)
confusion = metrics.confusion_matrix(true_label, predict_label)
return acc, precision, recall, f1, loss_total / len(data_iter), report, confusion
return acc, precision, recall, f1, loss_total / len(data_iter)
def model_test(config, model, test_iter):
# test!
logger.info("***** Running testing *****")
logger.info(" Test Num examples = %d", len(test_iter))
start_time = time.time()
acc, precision, recall, f1, test_loss, test_report, test_confusion = model_evaluate(config, model, test_iter, test=True)
msg = 'Test Loss: {0:>5.4f}, Test acc: {1:>.2%}, precision: {2:>.2%} recall: {3:>.2%}, f1: {4:>.2%}'
logger.info(msg.format(test_loss, acc, precision, recall, f1))
logger.info("Precision, Recall and F1-Score...")
logger.info(test_report)
logger.info("Confusion Matrix...")
logger.info(test_confusion)
time_dif = time.time() - start_time
logger.info("Time usage:%.6fs", time_dif)
|
[
"470810634@qq.com"
] |
470810634@qq.com
|
b0a496a2adad7d4299f3c94ceb3f5651a373a629
|
ee8c4c954b7c1711899b6d2527bdb12b5c79c9be
|
/assessment2/amazon/run/core/controllers/manage.py
|
436b9b54ecb5b87023cfad764e11bb94a803445a
|
[] |
no_license
|
sqlconsult/byte
|
02ac9899aebea4475614969b594bfe2992ffe29a
|
548f6cb5038e927b54adca29caf02c981fdcecfc
|
refs/heads/master
| 2021-01-25T14:45:42.120220
| 2018-08-11T23:45:31
| 2018-08-11T23:45:31
| 117,135,069
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 366
|
py
|
#!/usr/bin/env python3
from flask import Blueprint, Flask, render_template, request, url_for
controller = Blueprint('manage', __name__, url_prefix='/manage')
# @controller.route('/<string:title>', methods=['GET'])
# def lookup(title):
# if title == 'Republic': # TODO 2
# return render_template('republic.html') # TODO 2
# else:
# pass
|
[
"sqlconsult@hotmail.com"
] |
sqlconsult@hotmail.com
|
a6feea4e0041758fbcfcdf08169d6272e1d4ea41
|
bdba52c756cc09f192b720ea318510c265665dcd
|
/swagger_client/api/character_api.py
|
c1e1cd12abcec0f72554fd46436981b2dad6fbd7
|
[
"MIT"
] |
permissive
|
rseichter/bootini-star
|
6b38195890f383615cc2b422c365ac28c5b87292
|
a80258f01a05e4df38748b8cb47dfadabd42c20d
|
refs/heads/master
| 2020-03-14T03:17:11.385048
| 2018-06-28T17:23:23
| 2018-06-28T17:23:23
| 131,416,504
| 0
| 0
|
MIT
| 2018-05-01T14:26:04
| 2018-04-28T14:28:46
|
Python
|
UTF-8
|
Python
| false
| false
| 94,170
|
py
|
# coding: utf-8
"""
EVE Swagger Interface
An OpenAPI for EVE Online # noqa: E501
OpenAPI spec version: 0.8.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class CharacterApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_characters_character_id(self, character_id, **kwargs): # noqa: E501
"""Get character's public information # noqa: E501
Public information about a character --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetCharactersCharacterIdOk
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_characters_character_id_with_http_info(character_id, **kwargs) # noqa: E501
else:
(data) = self.get_characters_character_id_with_http_info(character_id, **kwargs) # noqa: E501
return data
def get_characters_character_id_with_http_info(self, character_id, **kwargs): # noqa: E501
"""Get character's public information # noqa: E501
Public information about a character --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_with_http_info(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetCharactersCharacterIdOk
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['character_id', 'datasource', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_characters_character_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'character_id' is set
if ('character_id' not in params or
params['character_id'] is None):
raise ValueError("Missing the required parameter `character_id` when calling `get_characters_character_id`") # noqa: E501
if 'character_id' in params and params['character_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `character_id` when calling `get_characters_character_id`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'character_id' in params:
path_params['character_id'] = params['character_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v4/characters/{character_id}/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetCharactersCharacterIdOk', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_characters_character_id_agents_research(self, character_id, **kwargs): # noqa: E501
"""Get agents research # noqa: E501
Return a list of agents research information for a character. The formula for finding the current research points with an agent is: currentPoints = remainderPoints + pointsPerDay * days(currentTime - researchStartDate) --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_agents_research(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdAgentsResearch200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_characters_character_id_agents_research_with_http_info(character_id, **kwargs) # noqa: E501
else:
(data) = self.get_characters_character_id_agents_research_with_http_info(character_id, **kwargs) # noqa: E501
return data
def get_characters_character_id_agents_research_with_http_info(self, character_id, **kwargs): # noqa: E501
"""Get agents research # noqa: E501
Return a list of agents research information for a character. The formula for finding the current research points with an agent is: currentPoints = remainderPoints + pointsPerDay * days(currentTime - researchStartDate) --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_agents_research_with_http_info(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdAgentsResearch200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['character_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_characters_character_id_agents_research" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'character_id' is set
if ('character_id' not in params or
params['character_id'] is None):
raise ValueError("Missing the required parameter `character_id` when calling `get_characters_character_id_agents_research`") # noqa: E501
if 'character_id' in params and params['character_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `character_id` when calling `get_characters_character_id_agents_research`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'character_id' in params:
path_params['character_id'] = params['character_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/characters/{character_id}/agents_research/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCharactersCharacterIdAgentsResearch200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_characters_character_id_blueprints(self, character_id, **kwargs): # noqa: E501
"""Get blueprints # noqa: E501
Return a list of blueprints the character owns --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_blueprints(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdBlueprints200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_characters_character_id_blueprints_with_http_info(character_id, **kwargs) # noqa: E501
else:
(data) = self.get_characters_character_id_blueprints_with_http_info(character_id, **kwargs) # noqa: E501
return data
def get_characters_character_id_blueprints_with_http_info(self, character_id, **kwargs): # noqa: E501
"""Get blueprints # noqa: E501
Return a list of blueprints the character owns --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_blueprints_with_http_info(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdBlueprints200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['character_id', 'datasource', 'page', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_characters_character_id_blueprints" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'character_id' is set
if ('character_id' not in params or
params['character_id'] is None):
raise ValueError("Missing the required parameter `character_id` when calling `get_characters_character_id_blueprints`") # noqa: E501
if 'character_id' in params and params['character_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `character_id` when calling `get_characters_character_id_blueprints`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'character_id' in params:
path_params['character_id'] = params['character_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v2/characters/{character_id}/blueprints/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCharactersCharacterIdBlueprints200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_characters_character_id_chat_channels(self, character_id, **kwargs): # noqa: E501
"""Get chat channels # noqa: E501
Return chat channels that a character is the owner or operator of --- This route is cached for up to 300 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_chat_channels(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdChatChannels200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_characters_character_id_chat_channels_with_http_info(character_id, **kwargs) # noqa: E501
else:
(data) = self.get_characters_character_id_chat_channels_with_http_info(character_id, **kwargs) # noqa: E501
return data
def get_characters_character_id_chat_channels_with_http_info(self, character_id, **kwargs): # noqa: E501
"""Get chat channels # noqa: E501
Return chat channels that a character is the owner or operator of --- This route is cached for up to 300 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_chat_channels_with_http_info(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdChatChannels200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['character_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_characters_character_id_chat_channels" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'character_id' is set
if ('character_id' not in params or
params['character_id'] is None):
raise ValueError("Missing the required parameter `character_id` when calling `get_characters_character_id_chat_channels`") # noqa: E501
if 'character_id' in params and params['character_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `character_id` when calling `get_characters_character_id_chat_channels`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'character_id' in params:
path_params['character_id'] = params['character_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/characters/{character_id}/chat_channels/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCharactersCharacterIdChatChannels200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_characters_character_id_corporationhistory(self, character_id, **kwargs): # noqa: E501
"""Get corporation history # noqa: E501
Get a list of all the corporations a character has been a member of --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_corporationhistory(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdCorporationhistory200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_characters_character_id_corporationhistory_with_http_info(character_id, **kwargs) # noqa: E501
else:
(data) = self.get_characters_character_id_corporationhistory_with_http_info(character_id, **kwargs) # noqa: E501
return data
def get_characters_character_id_corporationhistory_with_http_info(self, character_id, **kwargs): # noqa: E501
"""Get corporation history # noqa: E501
Get a list of all the corporations a character has been a member of --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_corporationhistory_with_http_info(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdCorporationhistory200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['character_id', 'datasource', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_characters_character_id_corporationhistory" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'character_id' is set
if ('character_id' not in params or
params['character_id'] is None):
raise ValueError("Missing the required parameter `character_id` when calling `get_characters_character_id_corporationhistory`") # noqa: E501
if 'character_id' in params and params['character_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `character_id` when calling `get_characters_character_id_corporationhistory`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'character_id' in params:
path_params['character_id'] = params['character_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v1/characters/{character_id}/corporationhistory/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCharactersCharacterIdCorporationhistory200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_characters_character_id_fatigue(self, character_id, **kwargs): # noqa: E501
"""Get jump fatigue # noqa: E501
Return a character's jump activation and fatigue information --- This route is cached for up to 300 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_fatigue(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetCharactersCharacterIdFatigueOk
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_characters_character_id_fatigue_with_http_info(character_id, **kwargs) # noqa: E501
else:
(data) = self.get_characters_character_id_fatigue_with_http_info(character_id, **kwargs) # noqa: E501
return data
def get_characters_character_id_fatigue_with_http_info(self, character_id, **kwargs): # noqa: E501
"""Get jump fatigue # noqa: E501
Return a character's jump activation and fatigue information --- This route is cached for up to 300 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_fatigue_with_http_info(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetCharactersCharacterIdFatigueOk
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['character_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_characters_character_id_fatigue" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'character_id' is set
if ('character_id' not in params or
params['character_id'] is None):
raise ValueError("Missing the required parameter `character_id` when calling `get_characters_character_id_fatigue`") # noqa: E501
if 'character_id' in params and params['character_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `character_id` when calling `get_characters_character_id_fatigue`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'character_id' in params:
path_params['character_id'] = params['character_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/characters/{character_id}/fatigue/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetCharactersCharacterIdFatigueOk', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_characters_character_id_medals(self, character_id, **kwargs): # noqa: E501
"""Get medals # noqa: E501
Return a list of medals the character has --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_medals(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdMedals200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_characters_character_id_medals_with_http_info(character_id, **kwargs) # noqa: E501
else:
(data) = self.get_characters_character_id_medals_with_http_info(character_id, **kwargs) # noqa: E501
return data
def get_characters_character_id_medals_with_http_info(self, character_id, **kwargs): # noqa: E501
"""Get medals # noqa: E501
Return a list of medals the character has --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_medals_with_http_info(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdMedals200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['character_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_characters_character_id_medals" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'character_id' is set
if ('character_id' not in params or
params['character_id'] is None):
raise ValueError("Missing the required parameter `character_id` when calling `get_characters_character_id_medals`") # noqa: E501
if 'character_id' in params and params['character_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `character_id` when calling `get_characters_character_id_medals`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'character_id' in params:
path_params['character_id'] = params['character_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/characters/{character_id}/medals/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCharactersCharacterIdMedals200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_characters_character_id_notifications(self, character_id, **kwargs): # noqa: E501
"""Get character notifications # noqa: E501
Return character notifications --- This route is cached for up to 600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_notifications(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdNotifications200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_characters_character_id_notifications_with_http_info(character_id, **kwargs) # noqa: E501
else:
(data) = self.get_characters_character_id_notifications_with_http_info(character_id, **kwargs) # noqa: E501
return data
def get_characters_character_id_notifications_with_http_info(self, character_id, **kwargs): # noqa: E501
"""Get character notifications # noqa: E501
Return character notifications --- This route is cached for up to 600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_notifications_with_http_info(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdNotifications200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['character_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_characters_character_id_notifications" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'character_id' is set
if ('character_id' not in params or
params['character_id'] is None):
raise ValueError("Missing the required parameter `character_id` when calling `get_characters_character_id_notifications`") # noqa: E501
if 'character_id' in params and params['character_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `character_id` when calling `get_characters_character_id_notifications`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'character_id' in params:
path_params['character_id'] = params['character_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v2/characters/{character_id}/notifications/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCharactersCharacterIdNotifications200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_characters_character_id_notifications_contacts(self, character_id, **kwargs): # noqa: E501
"""Get new contact notifications # noqa: E501
Return notifications about having been added to someone's contact list --- This route is cached for up to 600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_notifications_contacts(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdNotificationsContacts200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_characters_character_id_notifications_contacts_with_http_info(character_id, **kwargs) # noqa: E501
else:
(data) = self.get_characters_character_id_notifications_contacts_with_http_info(character_id, **kwargs) # noqa: E501
return data
def get_characters_character_id_notifications_contacts_with_http_info(self, character_id, **kwargs): # noqa: E501
"""Get new contact notifications # noqa: E501
Return notifications about having been added to someone's contact list --- This route is cached for up to 600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_notifications_contacts_with_http_info(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdNotificationsContacts200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['character_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_characters_character_id_notifications_contacts" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'character_id' is set
if ('character_id' not in params or
params['character_id'] is None):
raise ValueError("Missing the required parameter `character_id` when calling `get_characters_character_id_notifications_contacts`") # noqa: E501
if 'character_id' in params and params['character_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `character_id` when calling `get_characters_character_id_notifications_contacts`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'character_id' in params:
path_params['character_id'] = params['character_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/characters/{character_id}/notifications/contacts/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCharactersCharacterIdNotificationsContacts200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_characters_character_id_portrait(self, character_id, **kwargs): # noqa: E501
"""Get character portraits # noqa: E501
Get portrait urls for a character --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_portrait(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetCharactersCharacterIdPortraitOk
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_characters_character_id_portrait_with_http_info(character_id, **kwargs) # noqa: E501
else:
(data) = self.get_characters_character_id_portrait_with_http_info(character_id, **kwargs) # noqa: E501
return data
def get_characters_character_id_portrait_with_http_info(self, character_id, **kwargs): # noqa: E501
"""Get character portraits # noqa: E501
Get portrait urls for a character --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_portrait_with_http_info(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetCharactersCharacterIdPortraitOk
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['character_id', 'datasource', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_characters_character_id_portrait" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'character_id' is set
if ('character_id' not in params or
params['character_id'] is None):
raise ValueError("Missing the required parameter `character_id` when calling `get_characters_character_id_portrait`") # noqa: E501
if 'character_id' in params and params['character_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `character_id` when calling `get_characters_character_id_portrait`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'character_id' in params:
path_params['character_id'] = params['character_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v2/characters/{character_id}/portrait/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetCharactersCharacterIdPortraitOk', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_characters_character_id_roles(self, character_id, **kwargs): # noqa: E501
"""Get character corporation roles # noqa: E501
Returns a character's corporation roles --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_roles(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetCharactersCharacterIdRolesOk
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_characters_character_id_roles_with_http_info(character_id, **kwargs) # noqa: E501
else:
(data) = self.get_characters_character_id_roles_with_http_info(character_id, **kwargs) # noqa: E501
return data
def get_characters_character_id_roles_with_http_info(self, character_id, **kwargs): # noqa: E501
"""Get character corporation roles # noqa: E501
Returns a character's corporation roles --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_roles_with_http_info(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetCharactersCharacterIdRolesOk
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['character_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_characters_character_id_roles" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'character_id' is set
if ('character_id' not in params or
params['character_id'] is None):
raise ValueError("Missing the required parameter `character_id` when calling `get_characters_character_id_roles`") # noqa: E501
if 'character_id' in params and params['character_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `character_id` when calling `get_characters_character_id_roles`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'character_id' in params:
path_params['character_id'] = params['character_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v2/characters/{character_id}/roles/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetCharactersCharacterIdRolesOk', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_characters_character_id_standings(self, character_id, **kwargs): # noqa: E501
"""Get standings # noqa: E501
Return character standings from agents, NPC corporations, and factions --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_standings(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdStandings200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_characters_character_id_standings_with_http_info(character_id, **kwargs) # noqa: E501
else:
(data) = self.get_characters_character_id_standings_with_http_info(character_id, **kwargs) # noqa: E501
return data
def get_characters_character_id_standings_with_http_info(self, character_id, **kwargs): # noqa: E501
"""Get standings # noqa: E501
Return character standings from agents, NPC corporations, and factions --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_standings_with_http_info(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdStandings200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['character_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_characters_character_id_standings" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'character_id' is set
if ('character_id' not in params or
params['character_id'] is None):
raise ValueError("Missing the required parameter `character_id` when calling `get_characters_character_id_standings`") # noqa: E501
if 'character_id' in params and params['character_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `character_id` when calling `get_characters_character_id_standings`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'character_id' in params:
path_params['character_id'] = params['character_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/characters/{character_id}/standings/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCharactersCharacterIdStandings200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_characters_character_id_stats(self, character_id, **kwargs): # noqa: E501
"""Yearly aggregate stats # noqa: E501
Returns aggregate yearly stats for a character --- This route is cached for up to 86400 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_stats(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdStats200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_characters_character_id_stats_with_http_info(character_id, **kwargs) # noqa: E501
else:
(data) = self.get_characters_character_id_stats_with_http_info(character_id, **kwargs) # noqa: E501
return data
def get_characters_character_id_stats_with_http_info(self, character_id, **kwargs): # noqa: E501
"""Yearly aggregate stats # noqa: E501
Returns aggregate yearly stats for a character --- This route is cached for up to 86400 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_stats_with_http_info(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdStats200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['character_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_characters_character_id_stats" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'character_id' is set
if ('character_id' not in params or
params['character_id'] is None):
raise ValueError("Missing the required parameter `character_id` when calling `get_characters_character_id_stats`") # noqa: E501
if 'character_id' in params and params['character_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `character_id` when calling `get_characters_character_id_stats`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'character_id' in params:
path_params['character_id'] = params['character_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v2/characters/{character_id}/stats/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCharactersCharacterIdStats200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_characters_character_id_titles(self, character_id, **kwargs): # noqa: E501
"""Get character corporation titles # noqa: E501
Returns a character's titles --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_titles(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdTitles200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_characters_character_id_titles_with_http_info(character_id, **kwargs) # noqa: E501
else:
(data) = self.get_characters_character_id_titles_with_http_info(character_id, **kwargs) # noqa: E501
return data
def get_characters_character_id_titles_with_http_info(self, character_id, **kwargs): # noqa: E501
"""Get character corporation titles # noqa: E501
Returns a character's titles --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_titles_with_http_info(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdTitles200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['character_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_characters_character_id_titles" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'character_id' is set
if ('character_id' not in params or
params['character_id'] is None):
raise ValueError("Missing the required parameter `character_id` when calling `get_characters_character_id_titles`") # noqa: E501
if 'character_id' in params and params['character_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `character_id` when calling `get_characters_character_id_titles`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'character_id' in params:
path_params['character_id'] = params['character_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/characters/{character_id}/titles/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCharactersCharacterIdTitles200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_characters_names(self, character_ids, **kwargs): # noqa: E501
"""Get character names # noqa: E501
Resolve a set of character IDs to character names --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_names(character_ids, async=True)
>>> result = thread.get()
:param async bool
:param list[int] character_ids: A comma separated list of character IDs (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersNames200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_characters_names_with_http_info(character_ids, **kwargs) # noqa: E501
else:
(data) = self.get_characters_names_with_http_info(character_ids, **kwargs) # noqa: E501
return data
def get_characters_names_with_http_info(self, character_ids, **kwargs): # noqa: E501
"""Get character names # noqa: E501
Resolve a set of character IDs to character names --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_names_with_http_info(character_ids, async=True)
>>> result = thread.get()
:param async bool
:param list[int] character_ids: A comma separated list of character IDs (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersNames200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['character_ids', 'datasource', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_characters_names" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'character_ids' is set
if ('character_ids' not in params or
params['character_ids'] is None):
raise ValueError("Missing the required parameter `character_ids` when calling `get_characters_names`") # noqa: E501
if ('character_ids' in params and
len(params['character_ids']) > 1000):
raise ValueError("Invalid value for parameter `character_ids` when calling `get_characters_names`, number of items must be less than or equal to `1000`") # noqa: E501
if ('character_ids' in params and
len(params['character_ids']) < 1):
raise ValueError("Invalid value for parameter `character_ids` when calling `get_characters_names`, number of items must be greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'character_ids' in params:
query_params.append(('character_ids', params['character_ids'])) # noqa: E501
collection_formats['character_ids'] = 'csv' # noqa: E501
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v1/characters/names/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCharactersNames200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_characters_affiliation(self, characters, **kwargs): # noqa: E501
"""Character affiliation # noqa: E501
Bulk lookup of character IDs to corporation, alliance and faction --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.post_characters_affiliation(characters, async=True)
>>> result = thread.get()
:param async bool
:param list[int] characters: The character IDs to fetch affiliations for. All characters must exist, or none will be returned. (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[PostCharactersAffiliation200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.post_characters_affiliation_with_http_info(characters, **kwargs) # noqa: E501
else:
(data) = self.post_characters_affiliation_with_http_info(characters, **kwargs) # noqa: E501
return data
def post_characters_affiliation_with_http_info(self, characters, **kwargs): # noqa: E501
"""Character affiliation # noqa: E501
Bulk lookup of character IDs to corporation, alliance and faction --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.post_characters_affiliation_with_http_info(characters, async=True)
>>> result = thread.get()
:param async bool
:param list[int] characters: The character IDs to fetch affiliations for. All characters must exist, or none will be returned. (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[PostCharactersAffiliation200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['characters', 'datasource', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_characters_affiliation" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'characters' is set
if ('characters' not in params or
params['characters'] is None):
raise ValueError("Missing the required parameter `characters` when calling `post_characters_affiliation`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'characters' in params:
body_params = params['characters']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v1/characters/affiliation/', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[PostCharactersAffiliation200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_characters_character_id_cspa(self, character_id, characters, **kwargs): # noqa: E501
"""Calculate a CSPA charge cost # noqa: E501
Takes a source character ID in the url and a set of target character ID's in the body, returns a CSPA charge cost --- # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.post_characters_character_id_cspa(character_id, characters, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param list[int] characters: The target characters to calculate the charge for (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: float
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.post_characters_character_id_cspa_with_http_info(character_id, characters, **kwargs) # noqa: E501
else:
(data) = self.post_characters_character_id_cspa_with_http_info(character_id, characters, **kwargs) # noqa: E501
return data
def post_characters_character_id_cspa_with_http_info(self, character_id, characters, **kwargs): # noqa: E501
"""Calculate a CSPA charge cost # noqa: E501
Takes a source character ID in the url and a set of target character ID's in the body, returns a CSPA charge cost --- # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.post_characters_character_id_cspa_with_http_info(character_id, characters, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param list[int] characters: The target characters to calculate the charge for (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: float
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['character_id', 'characters', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_characters_character_id_cspa" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'character_id' is set
if ('character_id' not in params or
params['character_id'] is None):
raise ValueError("Missing the required parameter `character_id` when calling `post_characters_character_id_cspa`") # noqa: E501
# verify the required parameter 'characters' is set
if ('characters' not in params or
params['characters'] is None):
raise ValueError("Missing the required parameter `characters` when calling `post_characters_character_id_cspa`") # noqa: E501
if 'character_id' in params and params['character_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `character_id` when calling `post_characters_character_id_cspa`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'character_id' in params:
path_params['character_id'] = params['character_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'characters' in params:
body_params = params['characters']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v4/characters/{character_id}/cspa/', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='float', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
[
"github@seichter.de"
] |
github@seichter.de
|
4dd3029190c967aa1021f127e1f73e6489ab3fb7
|
b012b7ce6a371511de44eee63e8bcceb29ae4a23
|
/manage.py
|
2aee2cf4a1d44bbe7e9e6f5984a50da229840468
|
[] |
no_license
|
Srinivasu-Gillella/djtemplates1
|
f23974b7af13f64717eeb9d8547cc38e046e7383
|
f77b700e6a444362c2c4c68b7836bf6f417a1a96
|
refs/heads/master
| 2022-12-16T04:12:19.059026
| 2020-09-28T12:10:16
| 2020-09-28T12:10:16
| 299,296,765
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 632
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djtemplates1.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"64829989+Srinivasu-Gillella@users.noreply.github.com"
] |
64829989+Srinivasu-Gillella@users.noreply.github.com
|
1d04d3d4f51fb6e30bcbf047d655a4f3121f14ce
|
73dc6b3fdb07592f10b8e02b7ca053deb61a2dc9
|
/msof_api/comments/admin.py
|
4e3be735e10ca856e607a6d04ccf86bad757bf99
|
[] |
no_license
|
likelion-kookmin/msof_api
|
4143c09f93b68d219aa20de3bd57ec544c2bdf32
|
f9fec7d31ebdb465a8935711da715db6d87c0fce
|
refs/heads/develop
| 2023-06-28T15:35:45.240871
| 2021-07-31T16:38:35
| 2021-07-31T16:38:35
| 347,298,658
| 3
| 1
| null | 2021-07-31T16:38:36
| 2021-03-13T07:02:56
|
Python
|
UTF-8
|
Python
| false
| false
| 784
|
py
|
"""# comments admin
- CommentAdmin
"""
from django.contrib import admin
from .models import Comment
@admin.register(Comment)
class CommentAdmin(admin.ModelAdmin):
"""## CommentAdmin
- admin에서 관리할 Comment 모델 설정
"""
list_display = [
'author',
'question',
'parent',
'selected',
'content',
'status',
'liked_count',
'disliked_count',
]
list_editable = [
'status',
]
list_filter = [
'author',
'question',
'parent',
'selected',
'status',
]
search_fields = [
'content',
'author__name',
'question__title',
'question__content',
]
ordering = [
'-updated_at',
]
|
[
"singun11@kookmin.ac.kr"
] |
singun11@kookmin.ac.kr
|
652e8c01463ca031788666db93024bbc761ec75d
|
14856ffe01c711af7a41af0b1abf0378ba4ffde6
|
/Python/Fundamentals/Fun_with_Functions.py
|
4db600213841d74d4382c1514cc6f369abdc29a8
|
[] |
no_license
|
sharonanchel/coding-dojo
|
9a8db24eec17b0ae0c220592e6864510297371c3
|
d6c4a7efd0804353b27a49e16255984c4f4b7f2a
|
refs/heads/master
| 2021-05-05T18:17:48.101853
| 2017-06-23T23:53:51
| 2017-06-23T23:53:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 531
|
py
|
# Odd/Even
def odd_even():
for i in range(1,2001):
if i%2 == 0:
type = 'even'
else:
type = 'odd'
print 'Number is',i, 'This is an',type,'number.'
odd_even()
# Multiply
def multiply(arr, num):
for i in range(0,len(arr)):
arr[i] *= num
return arr
print multiply([2,4,10,16],5)
# Hacker Challenge
def layered_multiples(arr):
newerArray = []
for i in arr:
newArray = []
for x in range(0,i):
newArray.append(1)
newerArray.append(newArray)
return newerArray
print layered_multiples(multiply([2,4,5],3))
|
[
"jao.colin@gmail.com"
] |
jao.colin@gmail.com
|
b32d0c2672ca5d2afe58d2b5c3af3ad37e89ffba
|
f23c9196b21e4ff189d2c8399f4a77de2813d2b2
|
/tests/Python + Tornado/simpleserver.py
|
1f5bb8f4f2c28b1d9f901d93d82e36fa91b26a74
|
[] |
no_license
|
gchatelet/web-performance-tests
|
bcf903135bfcdc46a485c5a0acb9175d125ab3a2
|
3e0b526132abf840dfbc9dd235a94dd4713f9c9b
|
refs/heads/master
| 2020-04-14T23:54:02.606248
| 2013-03-18T11:10:18
| 2013-03-18T11:10:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 369
|
py
|
import tornado.ioloop
import tornado.web
class GreetingResourceHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, " + self.get_argument('name', True))
application = tornado.web.Application([
(r"/", GreetingResourceHandler),
])
if __name__ == "__main__":
application.listen(8080)
tornado.ioloop.IOLoop.instance().start()
|
[
"chatelet.guillaume@gmail.com"
] |
chatelet.guillaume@gmail.com
|
cf384ca62ab9185a7636264fbac38c353fd2bb0c
|
a7c6cf9663f1db1ed407f70e3ee2578ee38b0eca
|
/main.py
|
e452961320167ed3fba64a2ba5ca1561ae0dfe9e
|
[
"MIT"
] |
permissive
|
theSlayer4089/EasyFNBotGlitch
|
bfce879c92c56a239b698b4802c45a04e1452053
|
fd214dc3984092b3a845b7ab6960ba389893dc50
|
refs/heads/master
| 2020-12-05T22:36:01.536330
| 2020-01-05T16:45:38
| 2020-01-05T16:45:38
| 232,265,289
| 1
| 0
|
MIT
| 2020-01-07T07:12:19
| 2020-01-07T07:12:19
| null |
UTF-8
|
Python
| false
| false
| 8,429
|
py
|
import json,fortniteAPI,functions,MultipleClients,os,UpdateCheck
from functools import partial
from Events import ready,friends,party,message
from threading import Thread
try:
import fortnitepy,asyncio,aiofiles
from termcolor import colored
from flask import Flask
except:
os.system("pip3 install --user -r requirements.txt")
Settings = json.loads(open("Settings.json").read())
Languages = ["ar","de","es-419","es","en","fr","it","ja","ko","pl","pt-BR","ru","tr","zh-CN","zh-Hant"]
fortniteClient = fortnitepy.Client(email=Settings["Email"],password=Settings["Password"],status="Join my Discord\nIf you want your own bot\nhttps://discord.gg/jxgZH6Z\nOr Follow me on Twitter\n@LupusLeaks")
fortniteClient.Settings = Settings
fortniteClient.Clients = {}
fortniteClient.RemovingFriends = False
default_party_member = []
default_party = {}
#Default language
if Settings["Default item search language"] in Languages:
fortniteClient.DefaultLang = Settings["Default item search language"].lower()
else:
print(f'ERROR: Couldn\'t find {Settings["DefaultItemSearchLanguage"]} as a language')
fortniteClient.DefaultLang = "en"
#Banner
SeasonLevel=1000
if Settings["Default season level"] and type(Settings["Default season level"]) == int:
SeasonLevel = Settings["Default season level"]
else:
print(f'ERROR: {Settings["Default season level"]} is invaild, make sure you only use numbers')
default_party_member.append(partial(fortnitepy.ClientPartyMember.set_banner,season_level=SeasonLevel,icon=Settings["Default banner"],color=Settings["Default banner color"]))
#Platform + Privacy
if Settings["Platform"].upper() in fortnitepy.Platform.__members__:
fortniteClient.platform = fortnitepy.Platform[Settings["Platform"].upper()]
if Settings["Privacy"].upper() in fortnitepy.PartyPrivacy.__members__:
default_party["privacy"] = fortnitepy.PartyPrivacy[Settings["Privacy"].upper()]
#Cosmetics
#Backpack
if Settings["Default backpack"] and not Settings["Default pet"]:
Backpack = fortniteAPI.SGetBackpack(Settings["Default backpack"],fortniteClient.DefaultLang)
if not "status" in Backpack:
v = []
if Settings["Default backpack varaint channel name"] and Settings["Default backpack varaint name"] and Backpack["variants"]["en"]:
VariantChannelName = Settings["Default backpack varaint channel name"].upper()
Variant = Settings["Default backpack varaint name"].upper()
for variant in Backpack["variants"]["en"]:
if variant["type"].upper() == VariantChannelName:
for tag in variant["options"]:
if tag["name"].upper() == Variant:
v.append(functions.create_variant(variant["channel"],tag["tag"],item="AthenaBackpack"))
default_party_member.append(partial(fortnitepy.ClientPartyMember.set_backpack,asset=f'{str(Backpack["path"]).replace("FortniteGame/Content","/Game")}.{Backpack["id"]}',variants=v))
#Skin
if Settings["Default skin"]:
Skin = fortniteAPI.SGetSkin(Settings["Default skin"],fortniteClient.DefaultLang)
if not "status" in Skin:
v = []
if Settings["Default skin varaint channel name"] and Settings["Default skin varaint name"] and Skin["variants"]["en"]:
VariantChannelName = Settings["Default skin varaint channel name"].upper()
Variant = Settings["Default skin varaint name"].upper()
for variant in Skin["variants"]["en"]:
if variant["type"].upper() == VariantChannelName:
for tag in variant["options"]:
if tag["name"].upper() == Variant:
v.append(functions.create_variant(variant["channel"],tag["tag"],item="AthenaCharacter"))
default_party_member.append(partial(fortnitepy.ClientPartyMember.set_outfit,asset=f'{str(Skin["path"]).replace("FortniteGame/Content","/Game")}.{Skin["id"]}',variants=v))
#Pickaxe
if Settings["Default pickaxe"]:
Pickaxe = fortniteAPI.SGetPickaxe(Settings["Default pickaxe"],fortniteClient.DefaultLang)
if not "status" in Pickaxe:
v = []
if Settings["Default pickaxe varaint channel name"] and Settings["Default pickaxe varaint name"] and Pickaxe["variants"]["en"]:
VariantChannelName = Settings["Default pickaxe varaint channel name"].upper()
Variant = Settings["Default pickaxe varaint name"].upper()
for variant in Pickaxe["variants"]["en"]:
if variant["type"].upper() == VariantChannelName:
for tag in variant["options"]:
if tag["name"].upper() == Variant:
v.append(functions.create_variant(variant["channel"],tag["tag"],item="AthenaPickaxe"))
default_party_member.append(partial(fortnitepy.ClientPartyMember.set_pickaxe,asset=f'{str(Pickaxe["path"]).replace("FortniteGame/Content","/Game")}.{Pickaxe["id"]}',variants=v))
#Pet
if Settings["Default pet"]:
Pet = fortniteAPI.SGetPet(Settings["Default pet"],fortniteClient.DefaultLang)
if not "status" in Pet:
v = []
if Settings["Default pet varaint channel name"] and Settings["Default pet varaint name"] and Pet["variants"]["en"]:
VariantChannelName = Settings["Default pet varaint channel name"].upper()
Variant = Settings["Default pet varaint name"].upper()
for variant in Pickaxe["variants"]["en"]:
if variant["type"].upper() == VariantChannelName:
for tag in variant["options"]:
if tag["name"].upper() == Variant:
v.append(functions.create_variant(variant["channel"],tag["tag"],item="AthenaPetCarrier"))
default_party_member.append(partial(fortnitepy.ClientPartyMember.set_backpack,asset=f'{str(Pet["path"]).replace("FortniteGame/Content","/Game")}.{Pet["id"]}',variants=v))
fortniteClient.default_party_config = default_party
fortniteClient.default_party_member_config = default_party_member
@fortniteClient.event
async def event_ready():
fortniteClient.starting = True
fortniteClient.mainID = fortniteClient.user.id
tasks = []
for email,password in Settings["SubAccounts"].items():
if "@" in email:
tasks.append(MultipleClients.LoadAccount(fortniteClient,email,password))
if len(tasks) > 0:
print("Starting sub accounts!")
await asyncio.wait(tasks)
for Client in fortniteClient.Clients.values():
Friends = fortniteClient.has_friend(Client.user.id)
if not Friends:
try:
await fortniteClient.add_friend(Client.user.id)
except:
pass
Client.starting = False
await ready.Ready(fortniteClient)
fortniteClient.starting = False
@fortniteClient.event
async def event_friend_add(friend):
await friends.event_friend_add(fortniteClient, friend)
@fortniteClient.event
async def event_friend_remove(friend):
await friends.event_friend_remove(fortniteClient, friend)
@fortniteClient.event
async def event_friend_request(friend):
await friends.event_friend_request(fortniteClient, friend)
@fortniteClient.event
async def event_party_invite(invitation):
await party.event_party_invite(fortniteClient, invitation)
@fortniteClient.event
async def event_party_member_join(Member):
await party.event_party_member_join(fortniteClient,Member)
@fortniteClient.event
async def event_party_member_promote(old_leader, new_leader):
await party.event_party_member_promote(fortniteClient, old_leader,new_leader)
@fortniteClient.event
async def event_party_message(Message):
await message.Command(fortniteClient, Message)
@fortniteClient.event
async def event_friend_message(Message):
await message.Command(fortniteClient, Message)
app = Flask(__name__)
@app.route('/')
def Home():
return "Follow @LupusLeaks on Twitter"
Thread(target=app.run).start()
Thread(target=UpdateCheck.CheckVersion).start()
Thread(target=UpdateCheck.CheckItems).start()
try:
fortniteClient.run()
except fortnitepy.errors.AuthException:
print(colored("Invalid account credentials!","red"))
|
[
"noreply@github.com"
] |
theSlayer4089.noreply@github.com
|
a959feaae80f94e9b538502bda97297c7f29dc52
|
0d7085e202a232b419e4a2d0efca07ec30b474e6
|
/src/storage.py
|
6070abacfb99463a396ca4d48626e45a122dec50
|
[] |
no_license
|
maconel/fastrun
|
8a0880cb4078d93da2f8ae24ab52044efb34e78b
|
131701aa6f95b9692965461d395d8a7d0e5f6c13
|
refs/heads/master
| 2021-01-01T19:39:28.499482
| 2011-10-30T13:37:06
| 2011-10-30T13:37:06
| 2,520,012
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,915
|
py
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
import os
import sys
import pinyinlib
datafilepath = r'..\data\data.txt'
#name |path |cmd0 :cmd1 :cmd2 |priority
#记事本|c:\windows\notepad.exe|记事本:notepad.ex:jishibene|18
class Item(object):
def __init__(self, name, path, cmd, priority):
self.name = name
self.path = path
self.cmd = cmd
self.priority = priority
class Storage(object):
def __init__(self):
self.items = []
def load(self):
self.items = []
with file(os.path.join(curfilepath(), datafilepath), 'rt') as f:
lineno = 0
for line in f:
fields = line.rstrip('\r\n').split('|')
if len(fields) != 4:
continue
self.items.append(Item(fields[0], fields[1], fields[2].lower().split(':'), int(fields[3])))
lineno += 1
def raise_priority(self, item):
item.priority += 1
self.items.sort(key=lambda(item):item.priority, reverse=True)
with file(os.path.join(curfilepath(), datafilepath), 'wt') as f:
for item in self.items:
f.write(self.item_to_str(item))
f.write('\n')
def item_to_str(self, item):
return '|'.join((item.name, item.path, ':'.join(item.cmd), '%04d' % item.priority))
def add(self, name, path):
pinyinlist = pinyinlib.wordlist_to_pinyin(name)
item = Item(name, path, ':'.join((name, os.path.basename(path), ':'.join(pinyinlist))), 0)
self.items.append(item)
with file(os.path.join(curfilepath(), datafilepath), 'at') as f:
f.write(self.item_to_str(item))
f.write('\n')
def curfilepath():
return os.path.dirname(os.path.abspath(os.path.join(os.getcwd(), __file__)))
|
[
"maconel.reg@gmail.com"
] |
maconel.reg@gmail.com
|
4199440910460a422c013a0c40e9ecddfe383267
|
e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f
|
/indices/nnbelt.py
|
a561c79d95be99afc054e24528da2a296c42f2e6
|
[] |
no_license
|
psdh/WhatsintheVector
|
e8aabacc054a88b4cb25303548980af9a10c12a8
|
a24168d068d9c69dc7a0fd13f606c080ae82e2a6
|
refs/heads/master
| 2021-01-25T10:34:22.651619
| 2015-09-23T11:54:06
| 2015-09-23T11:54:06
| 42,749,205
| 2
| 3
| null | 2015-09-23T11:54:07
| 2015-09-18T22:06:38
|
Python
|
UTF-8
|
Python
| false
| false
| 1,067
|
py
|
ii = [('LyelCPG2.py', 1), ('MarrFDI.py', 1), ('CoolWHM2.py', 2), ('KembFFF.py', 1), ('RogePAV.py', 4), ('RennJIT.py', 1), ('LeakWTI2.py', 1), ('LeakWTI3.py', 1), ('PettTHE.py', 1), ('TennAP.py', 1), ('PeckJNG.py', 1), ('BailJD2.py', 1), ('FitzRNS3.py', 2), ('WilkJMC2.py', 3), ('CarlTFR.py', 4), ('LyttELD.py', 1), ('BailJD1.py', 1), ('RoscTTI2.py', 1), ('CrokTPS.py', 1), ('ClarGE.py', 2), ('BuckWGM.py', 2), ('LyelCPG.py', 3), ('GilmCRS.py', 1), ('WestJIT2.py', 1), ('DibdTRL2.py', 1), ('CrocDNL.py', 3), ('FerrSDO2.py', 1), ('LeakWTI.py', 1), ('BachARE.py', 1), ('MereHHB3.py', 1), ('HowiWRL2.py', 3), ('BailJD3.py', 1), ('WilkJMC.py', 5), ('MartHRW.py', 4), ('MackCNH.py', 1), ('FitzRNS4.py', 5), ('CoolWHM3.py', 1), ('DequTKM.py', 5), ('FitzRNS.py', 3), ('BowrJMM.py', 1), ('LyttELD3.py', 1), ('RoscTTI.py', 2), ('LewiMJW.py', 1), ('JacoWHI2.py', 1), ('SomeMMH.py', 2), ('BrewDTO.py', 2), ('RogeSIP.py', 6), ('MartHRW2.py', 3), ('MartHSI.py', 3), ('DwigTHH.py', 1), ('BowrJMM2.py', 1), ('BowrJMM3.py', 2), ('BeckWRE.py', 1), ('KeigTSS.py', 1), ('HowiWRL.py', 1)]
|
[
"varunwachaspati@gmail.com"
] |
varunwachaspati@gmail.com
|
4d3d7789762162ec46b85085c7c5971a1c7ab7cc
|
aab2f73ce75434ae127a00c708ec14e29d99dfec
|
/fermi_level/carrier_con_instrinsic_semi.py
|
7f2d08137e2169990632993d6e2db1a9438df8a2
|
[] |
no_license
|
dadhikar/semiconductor_physics_and_modeling
|
29a7c008d93906d40d82a6ef4657da7c28830a6a
|
8e9c082daaf41b228641bc6741e04a491eb7a06f
|
refs/heads/master
| 2022-11-29T04:26:00.530412
| 2020-08-18T21:08:18
| 2020-08-18T21:08:18
| 256,613,677
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,986
|
py
|
"""
For an intrinsic semiconductor, calculating electron density
at conduction band.
This involves solving Fermi-Dirac integral
"""
# importing required libraries
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['xtick.direction'] = 'in'
mpl.rcParams['ytick.direction'] = 'in'
mpl.rcParams['xtick.top'] = True
mpl.rcParams['ytick.right'] = True
from scipy.integrate import quad
def fermi_dirac_dist(e, ef, T):
"""
define Fermi-Dirac distribution function
where, e - energy level in eV
ef - fermi energy level in eV
T - temperature
return:
probability value for the distribution
"""
kb = 8.33e-5 # Boltzmann constant in eV/K
if T == 0.0:
if e <= ef:
f2 = 1.0
else:
f2 = 0.0
else:
f1 = np.exp((e-ef)/(kb*T))
f2 = 1/(1+f1)
return f2
# T = np.linspace(2, 100, 20, endpoint=True)
# e = np.linspace(0.2, 0.4, 50, endpoint=True)
# print(T)
# f = fermi_dirac_dist(e, 0.3, 300)
# plt.plot(e, f)
# plt.show()
def density_of_states(e, ec):
"""
Density of states near the bottom of the conduction band
for low-enough carrier density and temperature
ec (in eV) - conduction band edge
e (in eV)- energy value close to ec
"""
me = 0.5110e6 # electron mass (in eV)
factor = 0.91 # this will be material dependent
meff = factor*me # effective electron mass
h_cross = 6.582e-16 # in eV-s
f1 = (np.sqrt(2)/np.power(np.pi, 2))*np.power(meff, 1.5)
f2 = np.power(e-ec, 0.5)/np.power(h_cross, 3)
return f1*f2
# print(density_of_states(0.302, 0.3))
def fermi_dirac_integrand(x, xf):
"""
x = (e-ec)/kB*T
xf = (ef-ec)/kB*T
ef = Fermi enegry in eV
ec = conduction band edge in eV
kB = Boltzmann constant
T = Temperature
"""
return np.power(x, 0.5)/(1+np.exp(x-xf))
def fermi_dirac_integral(xf):
"""
"""
integral_value, _ = quad(func= fermi_dirac_integrand, a=0, b=10, args=(xf),
full_output=0, epsabs=1.49e-08, epsrel=1.49e-08, limit=50,
points=None, weight=None, wvar=None, wopts=None, maxp1=50, limlst=50)
return integral_value
fermi_integral = []
xf = np.linspace(-10, 10, 1000)
for x in xf:
integral_value = fermi_dirac_integral(x)
# print(xf, integral_value)
fermi_integral.append(integral_value)
plt.semilogy(xf, np.asarray(fermi_integral), 'ro', ms=2.5, label=r'Fermi-Dirac')
plt.semilogy(xf, 0.5*np.power(np.pi, 0.5)*np.exp(xf), 'ko', ms=2.5, label=r'Boltzmann approx.' )
plt.vlines(x=0.0, ymin= 1e-5, ymax= 30, colors='g', linestyles='--',linewidth=2.0)
plt.xlabel(r'(E$_{f}$ - E$_{c}$) / k$_{B}$T [no unit]')
plt.ylabel('Fermi-Dirac Integral [ab. unit]')
plt.xlim(-10, 10)
plt.ylim(1e-5, 25)
plt.legend()
#plt.text(0.55, 0.5, r'E$_{f}$ = 0.5 eV', c='r', fontsize=12)
plt.title(r'Intrinsic Semiconductor')
plt.show()
|
[
"dadhikar@dadhikar.local"
] |
dadhikar@dadhikar.local
|
5f6535ea4fadf155e1c96cc0508e31a1a8227986
|
9c0bb2bd2788bac007f857eca11a672751c8d808
|
/hello_world.py
|
a2a497ef8c45ee1717f3cbe8af39a268a1c98ac3
|
[] |
no_license
|
michaelorr/10gen
|
e52b5ff697fa845ab523e1268b38502b4bb61c61
|
e938d92a291c1986deb51de84043efff446bc170
|
refs/heads/master
| 2016-09-06T16:00:28.218176
| 2013-02-24T18:48:11
| 2013-02-24T18:48:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 177
|
py
|
from bottle import route, run, template
@route('/hello/:name')
def index(name='World'):
return template('<b>Hello {{name}}</b>', name=name)
run(host='localhost', port=8888)
|
[
"michael@orr.co"
] |
michael@orr.co
|
e5f68e03c3e1f9546ede6129f969da71ae852f20
|
2369cb94be22a6dcaf5694faf7b40a440d24c3ef
|
/rally_plugins/scenarios/kubernetes/replication_controllers.py
|
bc305f074bf4bebeb74cfd3c4dc2a36732466f0b
|
[
"Apache-2.0"
] |
permissive
|
xinhuihu/rally-plugins
|
c1c5c9a595c4fbe23e81923da224a7ddd06c15dc
|
a26fe046862b4fcf695dd237586134f81953d707
|
refs/heads/master
| 2020-08-09T13:38:53.745630
| 2019-08-02T10:33:57
| 2019-08-02T10:33:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,445
|
py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.task import scenario
from rally_plugins.scenarios.kubernetes import common as common_scenario
@scenario.configure(name="Kubernetes.create_and_delete_replication_controller",
platform="kubernetes")
class RCCreateAndDelete(common_scenario.BaseKubernetesScenario):
"""Kubernetes replication controller create and delete test.
Choose created namespace, create replication controller with defined image
and number of replicas, wait until it won't be running and delete it after.
"""
def run(self, image, replicas, command=None, status_wait=True):
"""Create and delete replication controller.
:param replicas: number of replicas for replication controller
:param image: replication controller image
:param command: array of strings representing container command
:param status_wait: wait replication controller status
"""
namespace = self.choose_namespace()
name = self.client.create_rc(
replicas=replicas,
image=image,
namespace=namespace,
command=command,
status_wait=status_wait
)
self.client.delete_rc(
name,
namespace=namespace,
status_wait=status_wait
)
@scenario.configure(
name="Kubernetes.create_scale_and_delete_replication_controller",
platform="kubernetes"
)
class CreateScaleAndDeleteRCPlugin(common_scenario.BaseKubernetesScenario):
"""Kubernetes replication controller scale test.
Create replication controller, scale it with number of replicas,
scale it with original number of replicas, delete replication controller.
"""
def run(self, image, replicas, scale_replicas, command=None,
status_wait=True):
"""Create RC, scale with replicas, revert scale and then delete it.
:param image: RC pod template image
:param replicas: original number of replicas
:param scale_replicas: number of replicas to scale
:param command: array of strings representing container command
:param status_wait: wait replication controller status
"""
namespace = self.choose_namespace()
name = self.client.create_rc(
namespace=namespace,
replicas=replicas,
image=image,
command=command,
status_wait=status_wait
)
self.client.scale_rc(
name,
namespace=namespace,
replicas=scale_replicas,
status_wait=status_wait
)
self.client.scale_rc(
name,
namespace=namespace,
replicas=replicas,
status_wait=status_wait
)
self.client.delete_rc(
name,
namespace=namespace,
status_wait=status_wait
)
|
[
"prazumovsky@mirantis.com"
] |
prazumovsky@mirantis.com
|
fb7e3fc360eec3bf7e5029668cb7c5c927013bc2
|
aa43cbcef5414f240e72b6840b1acc462dccc528
|
/Functions exercises/18.py
|
147de8be9382b2833c8ca80cfe2d2ce048cf7bce
|
[] |
no_license
|
Jeevan5955/Python
|
730a7fd70eb9719e59173231a0530946010da45b
|
7cd46e46787d5ee51524a551f68975c4c9806e48
|
refs/heads/master
| 2022-11-20T11:43:56.586066
| 2020-07-27T05:51:38
| 2020-07-27T05:51:38
| 278,003,415
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 373
|
py
|
def count_primes(num):
primes = [2]
x = 3
if num < 2: # for the case of num = 0 or 1
return 0
while x <= num:
for y in range(3,x,2): # test all odd factors up to x-1
if x%y == 0:
x += 2
break
else:
primes.append(x)
x += 2
print(primes)
return len(primes)
|
[
"noreply@github.com"
] |
Jeevan5955.noreply@github.com
|
92d91153f08b294489b5212a168455b19ff4682c
|
cd8ca699f1f7ba14f731db76acc025ad97de01fe
|
/ref_sys/oper/migrations/0002_alter_invite_status.py
|
b5418854527b652a7a9dbde1148e21a3edcbc8d4
|
[] |
no_license
|
valbayzak/ref_system
|
8784fef3e85683208bed9a0bf3ae7bd632f44146
|
67fc708d5f28ed2c007a825faa17230460e05481
|
refs/heads/main
| 2023-06-03T18:22:01.572896
| 2021-06-21T16:53:17
| 2021-06-21T16:53:17
| 378,987,422
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 474
|
py
|
# Generated by Django 3.2.4 on 2021-06-21 14:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('oper', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='invite',
name='status',
field=models.PositiveSmallIntegerField(choices=[(1, 'ACTIVE'), (2, 'NOT_ACTIVE'), (3, 'ACCEPTED')], default=1, verbose_name='Статус'),
),
]
|
[
"85289417+vallbay@users.noreply.github.com"
] |
85289417+vallbay@users.noreply.github.com
|
5cf35eeab105fc35d939285aa6aaed87c88a0b92
|
a91b9cc658421d078520f73320bd4551b74ed51f
|
/dev3/demo3/models.py
|
cb564a1b90f3dcea0ac7262e543cc3dadbf0c4a0
|
[] |
no_license
|
2519325088/dev3
|
e0ea12a54a36dc722747dc693974a0ccd11a5bd9
|
847a49542c2612d7fc879438d65a542158c93f12
|
refs/heads/master
| 2020-05-15T02:19:58.605565
| 2019-04-19T03:19:56
| 2019-04-19T03:19:56
| 182,047,444
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 404
|
py
|
from django.db import models
# Create your models here.
class Problem(models.Model):
pname=models.CharField(max_length=200)
def __str__(self):
return self.pname
class Option(models.Model):
oname=models.CharField(max_length=100)
oshu=models.IntegerField(default=0)
pid=models.ForeignKey('Problem',on_delete=models.CASCADE)
def __str__(self):
return self.oname
|
[
"2519325088@qq.com"
] |
2519325088@qq.com
|
79c1cfdd225efbe367313be720d75fd7941a44b2
|
4eebce0d0c1132aed8227325bd58faf61a4010c7
|
/CONTEXT_178/d2.py
|
7a83e4ac92f0948ab14911f4a674624665be9101
|
[] |
no_license
|
xu1718191411/AT_CODE_BEGINNER_SELECTION
|
05836cfcc63dab2a0a82166c8f4c43c82b72686b
|
e4e412733d1a632ce6c33c739064fe036367899e
|
refs/heads/master
| 2021-07-17T00:59:46.315645
| 2020-09-22T06:14:27
| 2020-09-22T06:14:27
| 214,153,758
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
py
|
def calculate(n):
arr = [0 for i in range(2000 + 1)]
arr[3] = 1
arr[4] = 1
arr[5] = 1
for i in range(6, n + 1):
arr[i] = (arr[i - 2] + arr[i - 3] + arr[i - 4]) % (1000000000 + 7)
print(arr[n])
calculate(int(input()))
|
[
"xu1718191411@gmail.com"
] |
xu1718191411@gmail.com
|
9906f5196c28ef9676151c8ed0c701f327159f25
|
96afce5cdd9c636f066830aa41d4eb9fce1a42d1
|
/pull_list_of_books.py
|
ffd62cd7061bac27e5b98af8e110f5dcf60eee5b
|
[] |
no_license
|
davehedengren/authors
|
d588026f8d29ac96f204bb89d196268ae9fb1faf
|
94c8a288ad5ea6c59313ce4a609ea7b0bc92e1b7
|
refs/heads/master
| 2016-09-06T15:19:03.836097
| 2014-09-20T02:15:36
| 2014-09-20T02:15:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 375
|
py
|
books = requests.get('https://www.goodreads.com/author/list.xml', params={'key':my_key,'id':author_id,'page':pg})
books2 = ET.fromstring(books.content)
fields=[0,4,13,14,15,19]
for book in books2[1][3]:
x = unicode(str(author_id) + '|' + str(books2[1][1].text) + '|','utf-8','ignore')
for f in fields:
y = unicode(book[f].text)
x += y +'|'
print x
|
[
"davehedengren@gmail.com"
] |
davehedengren@gmail.com
|
0f537880f6377398a6e8bee99458346243422035
|
3eab5590af67374ac8fab14111c3c9f62da3b809
|
/aes-tests.py
|
c86125adb3d0b0671a05954e0875255f56fca8a5
|
[] |
no_license
|
ungear/cardinal
|
497ed16a0a348155a175d4efc510c3e72d0b5e9b
|
448ce9282d0fcf665fb5c49083367a8c4888c813
|
refs/heads/master
| 2021-09-04T02:08:32.286779
| 2018-01-14T12:37:26
| 2018-01-14T12:37:26
| 114,804,899
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,915
|
py
|
import aes
import unittest
TEST_MESSAGE_BLOCK = [0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70]
TEST_KEY_BYTES = [0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6, 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c]
class TestKeyExpansion(unittest.TestCase):
def testKeyScheduleLastKey(self):
keySchedule = aes.keyExpansion(TEST_KEY_BYTES)
lastKey = keySchedule[len(keySchedule)-1]
self.assertEqual(lastKey, 0xb6630ca6)
def testKeyScheduleLength(self):
keySchedule = aes.keyExpansion(TEST_KEY_BYTES)
self.assertEqual(len(keySchedule), 44)
def testKeyScheduleException(self):
with self.assertRaises(ValueError):
aes.keyExpansion(TEST_KEY_BYTES[:10:])
class TestCreateWord(unittest.TestCase):
def testWord(self):
self.assertEqual(aes.createWord(0xa1, 0x11, 0x3b, 0x59), 0xa1113b59)
class TestRotWord(unittest.TestCase):
def testWord(self):
self.assertEqual(aes.rotWord(0xa13c3b59), 0x3c3b59a1)
class TestSubWord(unittest.TestCase):
def testWord(self):
self.assertEqual(aes.subWord(0xa13c3b59), 0x32ebe2cb)
class TestCreateState(unittest.TestCase):
def testState(self):
state = aes.createState(TEST_MESSAGE_BLOCK)
expectedState = [
TEST_MESSAGE_BLOCK[0::4],
TEST_MESSAGE_BLOCK[1::4],
TEST_MESSAGE_BLOCK[2::4],
TEST_MESSAGE_BLOCK[3::4],
]
self.assertEqual(state, expectedState)
class TestSubBytes(unittest.TestCase):
def testSubBytes(self):
state = aes.createState(TEST_MESSAGE_BLOCK)
expectedBytes = [0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, 0x51]
expectedMutatedState = aes.createState(expectedBytes)
aes.subBytes(state)
self.assertEqual(state, expectedMutatedState)
class TestInvSubBytes(unittest.TestCase):
def testSubBytes(self):
state = aes.createState(TEST_MESSAGE_BLOCK)
expectedBytes = [0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06, 0xd0]
expectedMutatedState = aes.createState(expectedBytes)
aes.invSubBytes(state)
self.assertEqual(state, expectedMutatedState)
class TestShiftRows(unittest.TestCase):
def testShiftRows(self):
state = aes.createState(TEST_MESSAGE_BLOCK)
expectedBytes = [0x61, 0x66, 0x6b, 0x70, 0x65, 0x6a, 0x6f, 0x64, 0x69, 0x6e, 0x63, 0x68, 0x6d, 0x62, 0x67, 0x6c]
expectedState = aes.createState(expectedBytes)
aes.shiftRows(state)
self.assertEqual(state, expectedState)
class TestInvShiftRows(unittest.TestCase):
def testInvShiftRows(self):
state = aes.createState(TEST_MESSAGE_BLOCK)
expectedBytes = [0x61, 0x6e, 0x6b, 0x68, 0x65, 0x62, 0x6f, 0x6c, 0x69, 0x66, 0x63, 0x70, 0x6d, 0x6a, 0x67, 0x64]
expectedState = aes.createState(expectedBytes)
aes.invShiftRows(state)
self.assertEqual(state, expectedState)
class TestMixColumns(unittest.TestCase):
def testMixColumns(self):
originalBytes = [0xd4, 0xbf, 0x5d, 0x30, 0xe0, 0xb4, 0x52, 0xae, 0xb8, 0x41, 0x11, 0xf1, 0x1e, 0x27, 0x98, 0xe5]
expectedBytes = [0x04, 0x66, 0x81, 0xe5, 0xe0, 0xcb, 0x19, 0x9a, 0x48, 0xf8, 0xd3, 0x7a, 0x28, 0x06, 0x26, 0x4c]
state = aes.createState(originalBytes)
expectedState = aes.createState(expectedBytes)
aes.mixColumns(state)
self.assertEqual(state, expectedState)
class TestInvMixColumns(unittest.TestCase):
def testInvMixColumns(self):
originalBytes = [0x04, 0x66, 0x81, 0xe5, 0xe0, 0xcb, 0x19, 0x9a, 0x48, 0xf8, 0xd3, 0x7a, 0x28, 0x06, 0x26, 0x4c]
expectedBytes = [0xd4, 0xbf, 0x5d, 0x30, 0xe0, 0xb4, 0x52, 0xae, 0xb8, 0x41, 0x11, 0xf1, 0x1e, 0x27, 0x98, 0xe5]
state = aes.createState(originalBytes)
expectedState = aes.createState(expectedBytes)
aes.invMixColumns(state)
self.assertEqual(state, expectedState)
class TestCipher(unittest.TestCase):
def testCipher(self):
inputBytes = [0x32, 0x43, 0xf6, 0xa8, 0x88, 0x5a, 0x30, 0x8d, 0x31, 0x31, 0x98, 0xa2, 0xe0, 0x37, 0x07, 0x34]
exampleCypherKeyBytes = [0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6, 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c]
expectedResultBytes = [0x39, 0x25, 0x84, 0x1d, 0x02, 0xdc, 0x09, 0xfb, 0xdc, 0x11, 0x85, 0x97, 0x19, 0x6a, 0x0b, 0x32]
inputState = aes.createState(inputBytes)
expectedState = aes.createState(expectedResultBytes)
keySchedule = aes.keyExpansion(exampleCypherKeyBytes)
result = aes.cipher(inputState, keySchedule)
self.assertEqual(result, expectedState)
def testCipher2(self):
inputBytes = [0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff]
exampleCypherKeyBytes = [0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f]
expectedResultBytes = [0x69, 0xc4, 0xe0, 0xd8, 0x6a, 0x7b, 0x04, 0x30, 0xd8, 0xcd, 0xb7, 0x80, 0x70, 0xb4, 0xc5, 0x5a]
inputState = aes.createState(inputBytes)
expectedState = aes.createState(expectedResultBytes)
keySchedule = aes.keyExpansion(exampleCypherKeyBytes)
result = aes.cipher(inputState, keySchedule)
self.assertEqual(result, expectedState)
class TestInvCipher(unittest.TestCase):
def testInvCipher(self):
inputBytes = [0x69, 0xc4, 0xe0, 0xd8, 0x6a, 0x7b, 0x04, 0x30, 0xd8, 0xcd, 0xb7, 0x80, 0x70, 0xb4, 0xc5, 0x5a]
exampleCypherKeyBytes = [0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f]
expectedResultBytes = [0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff]
inputState = aes.createState(inputBytes)
expectedState = aes.createState(expectedResultBytes)
keySchedule = aes.keyExpansion(exampleCypherKeyBytes)
result = aes.invCipher(inputState, keySchedule)
self.assertEqual(result, expectedState)
class TestTheWholeProcess(unittest.TestCase):
def testEncriptDecript(self):
plainText = 'idjpi23j023uc0j1-0i-soxl=kixq[wkz=21ks[qqwdqwd'
password = 'dke8qpend'
encodedText = aes.encode(password, plainText)
decodedText = aes.decode(password, encodedText)
self.assertEqual(decodedText, plainText)
class TestGetPasswordHash(unittest.TestCase):
def testHashLength(self):
password7 = '1234567'
password20 = '0123456789abcdef0123'
hash7 = aes.getPasswordHash(password7)
hash20 = aes.getPasswordHash(password20)
self.assertEqual(len(hash7), 16)
self.assertEqual(len(hash20), 16)
if __name__ == '__main__':
unittest.main()
|
[
"redeemer@inbox.ru"
] |
redeemer@inbox.ru
|
a1206366cfe0cff96c0e2306766ca9fd485e3b71
|
ec61946a176935044d08cf1244d2185f2460df32
|
/pyleecan/Methods/Machine/Lamination/comp_surface_axial_vent.py
|
67a12115e86fe145e22c9c128345b44e666dd252
|
[
"Apache-2.0"
] |
permissive
|
Lunreth/pyleecan
|
d3974a144cb8a6c332339ab0426f1630b7516fc9
|
1faedde4b24acc6361fa1fdd4e980eaec4ca3a62
|
refs/heads/master
| 2023-06-07T01:46:32.453763
| 2021-07-01T21:29:51
| 2021-07-01T21:29:51
| 383,880,732
| 1
| 0
|
Apache-2.0
| 2021-07-07T17:47:01
| 2021-07-07T17:47:01
| null |
UTF-8
|
Python
| false
| false
| 426
|
py
|
# -*- coding: utf-8 -*-
def comp_surface_axial_vent(self):
"""Compute the Lamination axial vent
Parameters
----------
self : Lamination
A Lamination object
Returns
-------
Svent: float
Surface of the Lamination's axial ventilation [m**2]
"""
if len(self.axial_vent) > 0:
return sum([vent.comp_surface() for vent in self.axial_vent])
else:
return 0
|
[
"pierre.bonneel@eomys.com"
] |
pierre.bonneel@eomys.com
|
ab8a37c6cec2cd67dee3e609825cc30311aeeacd
|
6d8ed4233a766ed34f3e3924fcba241e11341cbc
|
/TestBase64.py
|
bccaf7d0f0bab3149626f3d0982c6fd43a30a27b
|
[] |
no_license
|
kuxingseng/learnPython
|
56b77b01ddfbc3c8483d8abec2bd1eea186b6f19
|
73a746a9329f0ba3bfabb7f5e47864364ed44391
|
refs/heads/master
| 2021-09-12T11:07:33.951552
| 2018-04-08T02:42:05
| 2018-04-08T02:42:05
| 113,518,262
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 451
|
py
|
# Base64是一种用64个字符来表示任意二进制数据的方法。
# 每6个bit转换为一个字符
import base64
b64_encode = base64.b64encode(b'test')
print(b64_encode)
b64_decode = base64.b64decode(b64_encode)
print(b64_decode)
def safe_base64_decode(s):
# 数字乘以字符,等于数字个字符组成的字符串
s = s + len(s) % 4 * b'='
return base64.b64decode(s)
print(safe_base64_decode(b'dGVzdA'))
print(5 * 't')
|
[
"chshuai@hotmail.com"
] |
chshuai@hotmail.com
|
41496b02cff46275f641d32d95dd21b748f7f1e3
|
08ff60b74fe11b8aa3d01e69de58a12d44aa6c0b
|
/webServer/webServer.py
|
0a08cd1e39bef63318e3ee77a5247383260cec21
|
[] |
no_license
|
jim-stickney/aquariumController
|
cee7dc477ff6db64adce91911d90b0158f9b31c1
|
a3c4de39fafe21a209c4eeae4a7d4712b9e51eb6
|
refs/heads/master
| 2016-09-06T09:35:27.389195
| 2016-02-27T19:13:52
| 2016-02-27T19:13:52
| 33,930,196
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,875
|
py
|
from flask import Flask, render_template, Markup
import datetime
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy
plt.ioff()
from dateutil import parser
import mpld3
import Pyro4
app = Flask(__name__)
data_logger = Pyro4.Proxy('PYRONAME:data.logger')
@app.route("/")
def hello():
times = []
temps = []
data = data_logger.getData()
if 'avgTemps' in data.keys():
temps += data['avgTemps']
for t in data['avgTimes']:
times.append(parser.parse(t))
if 'temps' in data.keys():
temps += data['temps']
for t in data['time']:
times.append(parser.parse(t))
temps = numpy.array(temps)
fig, ax = plt.subplots()
ax.cla()
ax.plot_date(times, temps, '-')
ax.set_xlabel("Time")
ax.set_ylabel("Temperature (in deg F)")
fig1, ax1 = plt.subplots()
ax1.cla()
nT = len(data['thermostatTime1'])
thermostatTimes = [0]*(nT+1)
thermostatState = numpy.zeros(nT+1)
for iii in range(nT):
thermostatState[iii] = int(data['thermostatState1'][iii] )
thermostatTimes[iii] = parser.parse(data['thermostatTime1'][iii])
thermostatTimes[-1] = datetime.datetime.now()
thermostatState[-1] = thermostatState[-2]
ax1.plot_date(thermostatTimes, thermostatState, '-')
nT = len(data['thermostatTime15'])
thermostatTimes = [0]*(nT+1)
thermostatState = numpy.zeros(nT+1)
for iii in range(nT):
thermostatState[iii] = int(data['thermostatState15'][iii] )
thermostatTimes[iii] = parser.parse(data['thermostatTime15'][iii])
thermostatTimes[-1] = datetime.datetime.now()
thermostatState[-1] = thermostatState[-2]
ax1.plot_date(thermostatTimes, thermostatState, '-')
ax1.set_xlabel("Time")
ax1.set_ylabel("Thermostat State")
nT = len(data['fillingTime0'])
fillingTimes = [0]*(nT+1)
fillingState = numpy.zeros(nT+1)
for iii in range(nT):
fillingState[iii] = int(data['fillingState0'][iii] )
fillingTimes[iii] = parser.parse(data['fillingTime0'][iii])
fillingTimes[-1] = datetime.datetime.now()
fillingState[-1] = fillingState[-2]
fig2, ax2 = plt.subplots()
ax2.cla()
ax2.plot_date(fillingTimes, fillingState, '-')
ax2.set_xlabel("Time")
ax2.set_ylabel("Filling State")
now = datetime.datetime.now()
timeString = now.strftime("%Y-%m-%d %H:%M")
templateData = {'title': "My aquarium status",
'time': timeString,
'tempFigure' : Markup(mpld3.fig_to_html(fig)),
'thermostatFigure' : Markup(mpld3.fig_to_html(fig1)),
'fillingFigure' : Markup(mpld3.fig_to_html(fig2)),
}
return render_template('main.html', **templateData)
app.run(host='0.0.0.0', port=80, debug=True)
|
[
"jim.stickney@gmail.com"
] |
jim.stickney@gmail.com
|
0886616bd81e0a2e31e16fed2ae9620947223dac
|
ae326c4e6a2b2d5b67fa8d175249ef90f6a3021a
|
/leo/external/rope/ropetest/refactor/extracttest.py
|
167f7984d254f4be25e2554d9f39807e0827d542
|
[
"GPL-2.0-only",
"GPL-1.0-or-later",
"MIT"
] |
permissive
|
frakel/leo-editor
|
f95e6c77d60485d80fddfbeaf35db961cf691177
|
b574118ee3b7ffe8344fa0d00dac603096117ac7
|
refs/heads/master
| 2020-03-28T10:40:24.621077
| 2018-10-23T14:39:31
| 2018-10-23T14:39:31
| 148,132,817
| 0
| 0
|
MIT
| 2018-09-10T09:40:18
| 2018-09-10T09:40:18
| null |
UTF-8
|
Python
| false
| false
| 42,781
|
py
|
try:
import unittest2 as unittest
except ImportError:
import unittest
import rope.base.codeanalyze
import rope.base.exceptions
from rope.refactor import extract
from ropetest import testutils
class ExtractMethodTest(unittest.TestCase):
def setUp(self):
super(ExtractMethodTest, self).setUp()
self.project = testutils.sample_project()
self.pycore = self.project.pycore
def tearDown(self):
testutils.remove_project(self.project)
super(ExtractMethodTest, self).tearDown()
def do_extract_method(self, source_code, start, end, extracted, **kwds):
testmod = testutils.create_module(self.project, 'testmod')
testmod.write(source_code)
extractor = extract.ExtractMethod(
self.project, testmod, start, end)
self.project.do(extractor.get_changes(extracted, **kwds))
return testmod.read()
def do_extract_variable(self, source_code, start, end, extracted, **kwds):
testmod = testutils.create_module(self.project, 'testmod')
testmod.write(source_code)
extractor = extract.ExtractVariable(self.project, testmod, start, end)
self.project.do(extractor.get_changes(extracted, **kwds))
return testmod.read()
def _convert_line_range_to_offset(self, code, start, end):
lines = rope.base.codeanalyze.SourceLinesAdapter(code)
return lines.get_line_start(start), lines.get_line_end(end)
def test_simple_extract_function(self):
code = "def a_func():\n print('one')\n print('two')\n"
start, end = self._convert_line_range_to_offset(code, 2, 2)
refactored = self.do_extract_method(code, start, end, 'extracted')
expected = "def a_func():\n extracted()\n print('two')\n\n" \
"def extracted():\n print('one')\n"
self.assertEquals(expected, refactored)
def test_extract_function_at_the_end_of_file(self):
code = "def a_func():\n print('one')"
start, end = self._convert_line_range_to_offset(code, 2, 2)
refactored = self.do_extract_method(code, start, end, 'extracted')
expected = "def a_func():\n extracted()\n" \
"def extracted():\n print('one')\n"
self.assertEquals(expected, refactored)
def test_extract_function_after_scope(self):
code = "def a_func():\n print('one')\n print('two')" \
"\n\nprint('hey')\n"
start, end = self._convert_line_range_to_offset(code, 2, 2)
refactored = self.do_extract_method(code, start, end, 'extracted')
expected = "def a_func():\n extracted()\n print('two')\n\n" \
"def extracted():\n print('one')\n\nprint('hey')\n"
self.assertEquals(expected, refactored)
def test_simple_extract_function_with_parameter(self):
code = "def a_func():\n a_var = 10\n print(a_var)\n"
start, end = self._convert_line_range_to_offset(code, 3, 3)
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = "def a_func():\n a_var = 10\n new_func(a_var)\n\n" \
"def new_func(a_var):\n print(a_var)\n"
self.assertEquals(expected, refactored)
def test_not_unread_variables_as_parameter(self):
code = "def a_func():\n a_var = 10\n print('hey')\n"
start, end = self._convert_line_range_to_offset(code, 3, 3)
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = "def a_func():\n a_var = 10\n new_func()\n\n" \
"def new_func():\n print('hey')\n"
self.assertEquals(expected, refactored)
def test_simple_extract_function_with_two_parameter(self):
code = 'def a_func():\n a_var = 10\n another_var = 20\n' \
' third_var = a_var + another_var\n'
start, end = self._convert_line_range_to_offset(code, 4, 4)
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = 'def a_func():\n a_var = 10\n another_var = 20\n' \
' new_func(a_var, another_var)\n\n' \
'def new_func(a_var, another_var):\n' \
' third_var = a_var + another_var\n'
self.assertEquals(expected, refactored)
def test_simple_extract_function_with_return_value(self):
code = 'def a_func():\n a_var = 10\n print(a_var)\n'
start, end = self._convert_line_range_to_offset(code, 2, 2)
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = 'def a_func():\n a_var = new_func()' \
'\n print(a_var)\n\n' \
'def new_func():\n a_var = 10\n return a_var\n'
self.assertEquals(expected, refactored)
def test_extract_function_with_multiple_return_values(self):
code = 'def a_func():\n a_var = 10\n another_var = 20\n' \
' third_var = a_var + another_var\n'
start, end = self._convert_line_range_to_offset(code, 2, 3)
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = 'def a_func():\n a_var, another_var = new_func()\n' \
' third_var = a_var + another_var\n\n' \
'def new_func():\n a_var = 10\n another_var = 20\n' \
' return a_var, another_var\n'
self.assertEquals(expected, refactored)
def test_simple_extract_method(self):
code = 'class AClass(object):\n\n' \
' def a_func(self):\n print(1)\n print(2)\n'
start, end = self._convert_line_range_to_offset(code, 4, 4)
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = 'class AClass(object):\n\n' \
' def a_func(self):\n' \
' self.new_func()\n' \
' print(2)\n\n' \
' def new_func(self):\n print(1)\n'
self.assertEquals(expected, refactored)
def test_extract_method_with_args_and_returns(self):
code = 'class AClass(object):\n' \
' def a_func(self):\n' \
' a_var = 10\n' \
' another_var = a_var * 3\n' \
' third_var = a_var + another_var\n'
start, end = self._convert_line_range_to_offset(code, 4, 4)
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = 'class AClass(object):\n' \
' def a_func(self):\n' \
' a_var = 10\n' \
' another_var = self.new_func(a_var)\n' \
' third_var = a_var + another_var\n\n' \
' def new_func(self, a_var):\n' \
' another_var = a_var * 3\n' \
' return another_var\n'
self.assertEquals(expected, refactored)
def test_extract_method_with_self_as_argument(self):
code = 'class AClass(object):\n' \
' def a_func(self):\n' \
' print(self)\n'
start, end = self._convert_line_range_to_offset(code, 3, 3)
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = 'class AClass(object):\n' \
' def a_func(self):\n' \
' self.new_func()\n\n' \
' def new_func(self):\n' \
' print(self)\n'
self.assertEquals(expected, refactored)
def test_extract_method_with_no_self_as_argument(self):
code = 'class AClass(object):\n' \
' def a_func():\n' \
' print(1)\n'
start, end = self._convert_line_range_to_offset(code, 3, 3)
with self.assertRaises(rope.base.exceptions.RefactoringError):
self.do_extract_method(code, start, end, 'new_func')
def test_extract_method_with_multiple_methods(self):
code = 'class AClass(object):\n' \
' def a_func(self):\n' \
' print(self)\n\n' \
' def another_func(self):\n' \
' pass\n'
start, end = self._convert_line_range_to_offset(code, 3, 3)
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = 'class AClass(object):\n' \
' def a_func(self):\n' \
' self.new_func()\n\n' \
' def new_func(self):\n' \
' print(self)\n\n' \
' def another_func(self):\n' \
' pass\n'
self.assertEquals(expected, refactored)
def test_extract_function_with_function_returns(self):
code = 'def a_func():\n def inner_func():\n pass\n' \
' inner_func()\n'
start, end = self._convert_line_range_to_offset(code, 2, 3)
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = 'def a_func():\n' \
' inner_func = new_func()\n inner_func()\n\n' \
'def new_func():\n' \
' def inner_func():\n pass\n' \
' return inner_func\n'
self.assertEquals(expected, refactored)
def test_simple_extract_global_function(self):
code = "print('one')\nprint('two')\nprint('three')\n"
start, end = self._convert_line_range_to_offset(code, 2, 2)
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = "print('one')\n\ndef new_func():\n print('two')\n" \
"\nnew_func()\nprint('three')\n"
self.assertEquals(expected, refactored)
def test_extract_global_function_inside_ifs(self):
code = 'if True:\n a = 10\n'
start, end = self._convert_line_range_to_offset(code, 2, 2)
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = '\ndef new_func():\n a = 10\n\nif True:\n' \
' new_func()\n'
self.assertEquals(expected, refactored)
def test_extract_function_while_inner_function_reads(self):
code = 'def a_func():\n a_var = 10\n' \
' def inner_func():\n print(a_var)\n' \
' return inner_func\n'
start, end = self._convert_line_range_to_offset(code, 3, 4)
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = 'def a_func():\n a_var = 10\n' \
' inner_func = new_func(a_var)' \
'\n return inner_func\n\n' \
'def new_func(a_var):\n' \
' def inner_func():\n print(a_var)\n' \
' return inner_func\n'
self.assertEquals(expected, refactored)
def test_extract_method_bad_range(self):
code = "def a_func():\n pass\na_var = 10\n"
start, end = self._convert_line_range_to_offset(code, 2, 3)
with self.assertRaises(rope.base.exceptions.RefactoringError):
self.do_extract_method(code, start, end, 'new_func')
def test_extract_method_bad_range2(self):
code = "class AClass(object):\n pass\n"
start, end = self._convert_line_range_to_offset(code, 1, 1)
with self.assertRaises(rope.base.exceptions.RefactoringError):
self.do_extract_method(code, start, end, 'new_func')
def test_extract_method_containing_return(self):
code = 'def a_func(arg):\n if arg:\n return arg * 2' \
'\n return 1'
start, end = self._convert_line_range_to_offset(code, 2, 4)
with self.assertRaises(rope.base.exceptions.RefactoringError):
self.do_extract_method(code, start, end, 'new_func')
def test_extract_method_containing_yield(self):
code = "def a_func(arg):\n yield arg * 2\n"
start, end = self._convert_line_range_to_offset(code, 2, 2)
with self.assertRaises(rope.base.exceptions.RefactoringError):
self.do_extract_method(code, start, end, 'new_func')
def test_extract_method_containing_uncomplete_lines(self):
code = 'a_var = 20\nanother_var = 30\n'
start = code.index('20')
end = code.index('30') + 2
with self.assertRaises(rope.base.exceptions.RefactoringError):
self.do_extract_method(code, start, end, 'new_func')
def test_extract_method_containing_uncomplete_lines2(self):
code = 'a_var = 20\nanother_var = 30\n'
start = code.index('20')
end = code.index('another') + 5
with self.assertRaises(rope.base.exceptions.RefactoringError):
self.do_extract_method(code, start, end, 'new_func')
def test_extract_function_and_argument_as_paramenter(self):
code = 'def a_func(arg):\n print(arg)\n'
start, end = self._convert_line_range_to_offset(code, 2, 2)
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = 'def a_func(arg):\n new_func(arg)\n\n' \
'def new_func(arg):\n print(arg)\n'
self.assertEquals(expected, refactored)
def test_extract_function_and_end_as_the_start_of_a_line(self):
code = 'print("hey")\nif True:\n pass\n'
start = 0
end = code.index('\n') + 1
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = '\ndef new_func():\n print("hey")\n\n' \
'new_func()\nif True:\n pass\n'
self.assertEquals(expected, refactored)
def test_extract_function_and_indented_blocks(self):
code = 'def a_func(arg):\n if True:\n' \
' if True:\n print(arg)\n'
start, end = self._convert_line_range_to_offset(code, 3, 4)
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = 'def a_func(arg):\n ' \
'if True:\n new_func(arg)\n\n' \
'def new_func(arg):\n if True:\n print(arg)\n'
self.assertEquals(expected, refactored)
def test_extract_method_and_multi_line_headers(self):
code = 'def a_func(\n arg):\n print(arg)\n'
start, end = self._convert_line_range_to_offset(code, 3, 3)
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = 'def a_func(\n arg):\n new_func(arg)\n\n' \
'def new_func(arg):\n print(arg)\n'
self.assertEquals(expected, refactored)
def test_single_line_extract_function(self):
code = 'a_var = 10 + 20\n'
start = code.index('10')
end = code.index('20') + 2
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = "\ndef new_func():\n " \
"return 10 + 20\n\na_var = new_func()\n"
self.assertEquals(expected, refactored)
def test_single_line_extract_function2(self):
code = 'def a_func():\n a = 10\n b = a * 20\n'
start = code.rindex('a')
end = code.index('20') + 2
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = 'def a_func():\n a = 10\n b = new_func(a)\n' \
'\ndef new_func(a):\n return a * 20\n'
self.assertEquals(expected, refactored)
def test_single_line_extract_method_and_logical_lines(self):
code = 'a_var = 10 +\\\n 20\n'
start = code.index('10')
end = code.index('20') + 2
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = '\ndef new_func():\n ' \
'return 10 + 20\n\na_var = new_func()\n'
self.assertEquals(expected, refactored)
def test_single_line_extract_method_and_logical_lines2(self):
code = 'a_var = (10,\\\n 20)\n'
start = code.index('10') - 1
end = code.index('20') + 3
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = '\ndef new_func():\n' \
' return (10, 20)\n\na_var = new_func()\n'
self.assertEquals(expected, refactored)
def test_single_line_extract_method(self):
code = "class AClass(object):\n\n" \
" def a_func(self):\n a = 10\n b = a * a\n"
start = code.rindex('=') + 2
end = code.rindex('a') + 1
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = 'class AClass(object):\n\n' \
' def a_func(self):\n' \
' a = 10\n b = self.new_func(a)\n\n' \
' def new_func(self, a):\n return a * a\n'
self.assertEquals(expected, refactored)
def test_single_line_extract_function_if_condition(self):
code = 'if True:\n pass\n'
start = code.index('True')
end = code.index('True') + 4
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = "\ndef new_func():\n return True\n\nif new_func():" \
"\n pass\n"
self.assertEquals(expected, refactored)
def test_unneeded_params(self):
code = 'class A(object):\n ' \
'def a_func(self):\n a_var = 10\n a_var += 2\n'
start = code.rindex('2')
end = code.rindex('2') + 1
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = 'class A(object):\n' \
' def a_func(self):\n a_var = 10\n' \
' a_var += self.new_func()\n\n' \
' def new_func(self):\n return 2\n'
self.assertEquals(expected, refactored)
def test_breaks_and_continues_inside_loops(self):
code = 'def a_func():\n for i in range(10):\n continue\n'
start = code.index('for')
end = len(code) - 1
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = 'def a_func():\n new_func()\n\n' \
'def new_func():\n' \
' for i in range(10):\n continue\n'
self.assertEquals(expected, refactored)
def test_breaks_and_continues_outside_loops(self):
code = 'def a_func():\n' \
' for i in range(10):\n a = i\n continue\n'
start = code.index('a = i')
end = len(code) - 1
with self.assertRaises(rope.base.exceptions.RefactoringError):
self.do_extract_method(code, start, end, 'new_func')
def test_variable_writes_followed_by_variable_reads_after_extraction(self):
code = 'def a_func():\n a = 1\n a = 2\n b = a\n'
start = code.index('a = 1')
end = code.index('a = 2') - 1
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = 'def a_func():\n new_func()\n a = 2\n b = a\n\n' \
'def new_func():\n a = 1\n'
self.assertEquals(expected, refactored)
def test_var_writes_followed_by_var_reads_inside_extraction(self):
code = 'def a_func():\n a = 1\n a = 2\n b = a\n'
start = code.index('a = 2')
end = len(code) - 1
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = 'def a_func():\n a = 1\n new_func()\n\n' \
'def new_func():\n a = 2\n b = a\n'
self.assertEquals(expected, refactored)
def test_extract_variable(self):
code = 'a_var = 10 + 20\n'
start = code.index('10')
end = code.index('20') + 2
refactored = self.do_extract_variable(code, start, end, 'new_var')
expected = 'new_var = 10 + 20\na_var = new_var\n'
self.assertEquals(expected, refactored)
def test_extract_variable_multiple_lines(self):
code = 'a = 1\nb = 2\n'
start = code.index('1')
end = code.index('1') + 1
refactored = self.do_extract_variable(code, start, end, 'c')
expected = 'c = 1\na = c\nb = 2\n'
self.assertEquals(expected, refactored)
def test_extract_variable_in_the_middle_of_statements(self):
code = 'a = 1 + 2\n'
start = code.index('1')
end = code.index('1') + 1
refactored = self.do_extract_variable(code, start, end, 'c')
expected = 'c = 1\na = c + 2\n'
self.assertEquals(expected, refactored)
def test_extract_variable_for_a_tuple(self):
code = 'a = 1, 2\n'
start = code.index('1')
end = code.index('2') + 1
refactored = self.do_extract_variable(code, start, end, 'c')
expected = 'c = 1, 2\na = c\n'
self.assertEquals(expected, refactored)
def test_extract_variable_for_a_string(self):
code = 'def a_func():\n a = "hey!"\n'
start = code.index('"')
end = code.rindex('"') + 1
refactored = self.do_extract_variable(code, start, end, 'c')
expected = 'def a_func():\n c = "hey!"\n a = c\n'
self.assertEquals(expected, refactored)
def test_extract_variable_inside_ifs(self):
code = 'if True:\n a = 1 + 2\n'
start = code.index('1')
end = code.rindex('2') + 1
refactored = self.do_extract_variable(code, start, end, 'b')
expected = 'if True:\n b = 1 + 2\n a = b\n'
self.assertEquals(expected, refactored)
def test_extract_variable_inside_ifs_and_logical_lines(self):
code = 'if True:\n a = (3 + \n(1 + 2))\n'
start = code.index('1')
end = code.index('2') + 1
refactored = self.do_extract_variable(code, start, end, 'b')
expected = 'if True:\n b = 1 + 2\n a = (3 + \n(b))\n'
self.assertEquals(expected, refactored)
# TODO: Handle when extracting a subexpression
def xxx_test_extract_variable_for_a_subexpression(self):
code = 'a = 3 + 1 + 2\n'
start = code.index('1')
end = code.index('2') + 1
refactored = self.do_extract_variable(code, start, end, 'b')
expected = 'b = 1 + 2\na = 3 + b\n'
self.assertEquals(expected, refactored)
def test_extract_variable_starting_from_the_start_of_the_line(self):
code = 'a_dict = {1: 1}\na_dict.values().count(1)\n'
start = code.rindex('a_dict')
end = code.index('count') - 1
refactored = self.do_extract_variable(code, start, end, 'values')
expected = 'a_dict = {1: 1}\n' \
'values = a_dict.values()\nvalues.count(1)\n'
self.assertEquals(expected, refactored)
def test_extract_variable_on_the_last_line_of_a_function(self):
code = 'def f():\n a_var = {}\n a_var.keys()\n'
start = code.rindex('a_var')
end = code.index('.keys')
refactored = self.do_extract_variable(code, start, end, 'new_var')
expected = 'def f():\n a_var = {}\n ' \
'new_var = a_var\n new_var.keys()\n'
self.assertEquals(expected, refactored)
def test_extract_variable_on_the_indented_function_statement(self):
code = 'def f():\n if True:\n a_var = 1 + 2\n'
start = code.index('1')
end = code.index('2') + 1
refactored = self.do_extract_variable(code, start, end, 'new_var')
expected = 'def f():\n if True:\n' \
' new_var = 1 + 2\n a_var = new_var\n'
self.assertEquals(expected, refactored)
def test_extract_method_on_the_last_line_of_a_function(self):
code = 'def f():\n a_var = {}\n a_var.keys()\n'
start = code.rindex('a_var')
end = code.index('.keys')
refactored = self.do_extract_method(code, start, end, 'new_f')
expected = 'def f():\n a_var = {}\n new_f(a_var).keys()\n\n' \
'def new_f(a_var):\n return a_var\n'
self.assertEquals(expected, refactored)
def test_raising_exception_when_on_incomplete_variables(self):
code = 'a_var = 10 + 20\n'
start = code.index('10') + 1
end = code.index('20') + 2
with self.assertRaises(rope.base.exceptions.RefactoringError):
self.do_extract_method(code, start, end, 'new_func')
def test_raising_exception_when_on_incomplete_variables_on_end(self):
code = 'a_var = 10 + 20\n'
start = code.index('10')
end = code.index('20') + 1
with self.assertRaises(rope.base.exceptions.RefactoringError):
self.do_extract_method(code, start, end, 'new_func')
def test_raising_exception_on_bad_parens(self):
code = 'a_var = (10 + 20) + 30\n'
start = code.index('20')
end = code.index('30') + 2
with self.assertRaises(rope.base.exceptions.RefactoringError):
self.do_extract_method(code, start, end, 'new_func')
def test_raising_exception_on_bad_operators(self):
code = 'a_var = 10 + 20 + 30\n'
start = code.index('10')
end = code.rindex('+') + 1
with self.assertRaises(rope.base.exceptions.RefactoringError):
self.do_extract_method(code, start, end, 'new_func')
# FIXME: Extract method should be more intelligent about bad ranges
def xxx_test_raising_exception_on_function_parens(self):
code = 'a = range(10)'
start = code.index('(')
end = code.rindex(')') + 1
with self.assertRaises(rope.base.exceptions.RefactoringError):
self.do_extract_method(code, start, end, 'new_func')
def test_extract_method_and_extra_blank_lines(self):
code = '\nprint(1)\n'
refactored = self.do_extract_method(code, 0, len(code), 'new_f')
expected = '\n\ndef new_f():\n print(1)\n\nnew_f()\n'
self.assertEquals(expected, refactored)
def test_variable_writes_in_the_same_line_as_variable_read(self):
code = 'a = 1\na = 1 + a\n'
start = code.index('\n') + 1
end = len(code)
refactored = self.do_extract_method(code, start, end, 'new_f',
global_=True)
expected = 'a = 1\n\ndef new_f(a):\n a = 1 + a\n\nnew_f(a)\n'
self.assertEquals(expected, refactored)
def test_variable_writes_in_the_same_line_as_variable_read2(self):
code = 'a = 1\na += 1\n'
start = code.index('\n') + 1
end = len(code)
refactored = self.do_extract_method(code, start, end, 'new_f',
global_=True)
expected = 'a = 1\n\ndef new_f():\n a += 1\n\nnew_f()\n'
self.assertEquals(expected, refactored)
def test_variable_and_similar_expressions(self):
code = 'a = 1\nb = 1\n'
start = code.index('1')
end = start + 1
refactored = self.do_extract_variable(code, start, end,
'one', similar=True)
expected = 'one = 1\na = one\nb = one\n'
self.assertEquals(expected, refactored)
def test_definition_should_appear_before_the_first_use(self):
code = 'a = 1\nb = 1\n'
start = code.rindex('1')
end = start + 1
refactored = self.do_extract_variable(code, start, end,
'one', similar=True)
expected = 'one = 1\na = one\nb = one\n'
self.assertEquals(expected, refactored)
def test_extract_method_and_similar_expressions(self):
code = 'a = 1\nb = 1\n'
start = code.index('1')
end = start + 1
refactored = self.do_extract_method(code, start, end,
'one', similar=True)
expected = '\ndef one():\n return 1\n\na = one()\nb = one()\n'
self.assertEquals(expected, refactored)
def test_simple_extract_method_and_similar_statements(self):
code = 'class AClass(object):\n\n' \
' def func1(self):\n a = 1 + 2\n b = a\n' \
' def func2(self):\n a = 1 + 2\n b = a\n'
start, end = self._convert_line_range_to_offset(code, 4, 4)
refactored = self.do_extract_method(code, start, end,
'new_func', similar=True)
expected = 'class AClass(object):\n\n' \
' def func1(self):\n' \
' a = self.new_func()\n b = a\n\n' \
' def new_func(self):\n' \
' a = 1 + 2\n return a\n' \
' def func2(self):\n' \
' a = self.new_func()\n b = a\n'
self.assertEquals(expected, refactored)
def test_extract_method_and_similar_statements2(self):
code = 'class AClass(object):\n\n' \
' def func1(self, p1):\n a = p1 + 2\n' \
' def func2(self, p2):\n a = p2 + 2\n'
start = code.rindex('p1')
end = code.index('2\n') + 1
refactored = self.do_extract_method(code, start, end,
'new_func', similar=True)
expected = 'class AClass(object):\n\n' \
' def func1(self, p1):\n ' \
'a = self.new_func(p1)\n\n' \
' def new_func(self, p1):\n return p1 + 2\n' \
' def func2(self, p2):\n a = self.new_func(p2)\n'
self.assertEquals(expected, refactored)
def test_extract_method_and_similar_sttemnts_return_is_different(self):
code = 'class AClass(object):\n\n' \
' def func1(self, p1):\n a = p1 + 2\n' \
' def func2(self, p2):\n self.attr = p2 + 2\n'
start = code.rindex('p1')
end = code.index('2\n') + 1
refactored = self.do_extract_method(code, start, end,
'new_func', similar=True)
expected = 'class AClass(object):\n\n' \
' def func1(self, p1):' \
'\n a = self.new_func(p1)\n\n' \
' def new_func(self, p1):\n return p1 + 2\n' \
' def func2(self, p2):\n' \
' self.attr = self.new_func(p2)\n'
self.assertEquals(expected, refactored)
def test_definition_should_appear_where_it_is_visible(self):
code = 'if True:\n a = 1\nelse:\n b = 1\n'
start = code.rindex('1')
end = start + 1
refactored = self.do_extract_variable(code, start, end,
'one', similar=True)
expected = 'one = 1\nif True:\n a = one\nelse:\n b = one\n'
self.assertEquals(expected, refactored)
def test_extract_variable_and_similar_statements_in_classes(self):
code = 'class AClass(object):\n\n' \
' def func1(self):\n a = 1\n' \
' def func2(self):\n b = 1\n'
start = code.index(' 1') + 1
refactored = self.do_extract_variable(code, start, start + 1,
'one', similar=True)
expected = 'class AClass(object):\n\n' \
' def func1(self):\n one = 1\n a = one\n' \
' def func2(self):\n b = 1\n'
self.assertEquals(expected, refactored)
def test_extract_method_in_staticmethods(self):
code = 'class AClass(object):\n\n' \
' @staticmethod\n def func2():\n b = 1\n'
start = code.index(' 1') + 1
refactored = self.do_extract_method(code, start, start + 1,
'one', similar=True)
expected = 'class AClass(object):\n\n' \
' @staticmethod\n def func2():\n' \
' b = AClass.one()\n\n' \
' @staticmethod\n def one():\n' \
' return 1\n'
self.assertEquals(expected, refactored)
def test_extract_normal_method_with_staticmethods(self):
code = 'class AClass(object):\n\n' \
' @staticmethod\n def func1():\n b = 1\n' \
' def func2(self):\n b = 1\n'
start = code.rindex(' 1') + 1
refactored = self.do_extract_method(code, start, start + 1,
'one', similar=True)
expected = 'class AClass(object):\n\n' \
' @staticmethod\n def func1():\n b = 1\n' \
' def func2(self):\n b = self.one()\n\n' \
' def one(self):\n return 1\n'
self.assertEquals(expected, refactored)
def test_extract_variable_with_no_new_lines_at_the_end(self):
code = 'a_var = 10'
start = code.index('10')
end = start + 2
refactored = self.do_extract_variable(code, start, end, 'new_var')
expected = 'new_var = 10\na_var = new_var'
self.assertEquals(expected, refactored)
def test_extract_method_containing_return_in_functions(self):
code = 'def f(arg):\n return arg\nprint(f(1))\n'
start, end = self._convert_line_range_to_offset(code, 1, 3)
refactored = self.do_extract_method(code, start, end, 'a_func')
expected = '\ndef a_func():\n def f(arg):\n return arg\n' \
' print(f(1))\n\na_func()\n'
self.assertEquals(expected, refactored)
def test_extract_method_and_varying_first_parameter(self):
code = 'class C(object):\n' \
' def f1(self):\n print(str(self))\n' \
' def f2(self):\n print(str(1))\n'
start = code.index('print(') + 6
end = code.index('))\n') + 1
refactored = self.do_extract_method(code, start, end,
'to_str', similar=True)
expected = 'class C(object):\n' \
' def f1(self):\n print(self.to_str())\n\n' \
' def to_str(self):\n return str(self)\n' \
' def f2(self):\n print(str(1))\n'
self.assertEquals(expected, refactored)
def test_extract_method_when_an_attribute_exists_in_function_scope(self):
code = 'class A(object):\n def func(self):\n pass\n' \
'a = A()\n' \
'def f():\n' \
' func = a.func()\n' \
' print func\n'
start, end = self._convert_line_range_to_offset(code, 6, 6)
refactored = self.do_extract_method(code, start, end, 'g')
refactored = refactored[refactored.index('A()') + 4:]
expected = 'def f():\n func = g()\n print func\n\n' \
'def g():\n func = a.func()\n return func\n'
self.assertEquals(expected, refactored)
def test_global_option_for_extract_method(self):
code = 'def a_func():\n print(1)\n'
start, end = self._convert_line_range_to_offset(code, 2, 2)
refactored = self.do_extract_method(code, start, end,
'extracted', global_=True)
expected = 'def a_func():\n extracted()\n\n' \
'def extracted():\n print(1)\n'
self.assertEquals(expected, refactored)
def test_global_extract_method(self):
code = 'class AClass(object):\n\n' \
' def a_func(self):\n print(1)\n'
start, end = self._convert_line_range_to_offset(code, 4, 4)
refactored = self.do_extract_method(code, start, end,
'new_func', global_=True)
expected = 'class AClass(object):\n\n' \
' def a_func(self):\n new_func()\n\n' \
'def new_func():\n print(1)\n'
self.assertEquals(expected, refactored)
def test_extract_method_with_multiple_methods(self): # noqa
code = 'class AClass(object):\n' \
' def a_func(self):\n' \
' print(1)\n\n' \
' def another_func(self):\n' \
' pass\n'
start, end = self._convert_line_range_to_offset(code, 3, 3)
refactored = self.do_extract_method(code, start, end,
'new_func', global_=True)
expected = 'class AClass(object):\n' \
' def a_func(self):\n' \
' new_func()\n\n' \
' def another_func(self):\n' \
' pass\n\n' \
'def new_func():\n' \
' print(1)\n'
self.assertEquals(expected, refactored)
def test_where_to_seach_when_extracting_global_names(self):
code = 'def a():\n return 1\ndef b():\n return 1\nb = 1\n'
start = code.index('1')
end = start + 1
refactored = self.do_extract_variable(code, start, end, 'one',
similar=True, global_=True)
expected = 'def a():\n return one\none = 1\n' \
'def b():\n return one\nb = one\n'
self.assertEquals(expected, refactored)
def test_extracting_pieces_with_distinct_temp_names(self):
code = 'a = 1\nprint a\nb = 1\nprint b\n'
start = code.index('a')
end = code.index('\nb')
refactored = self.do_extract_method(code, start, end, 'f',
similar=True, global_=True)
expected = '\ndef f():\n a = 1\n print a\n\nf()\nf()\n'
self.assertEquals(expected, refactored)
def test_extract_methods_in_glob_funcs_should_be_glob(self):
code = 'def f():\n a = 1\ndef g():\n b = 1\n'
start = code.rindex('1')
refactored = self.do_extract_method(code, start, start + 1, 'one',
similar=True, global_=False)
expected = 'def f():\n a = one()\ndef g():\n b = one()\n\n' \
'def one():\n return 1\n'
self.assertEquals(expected, refactored)
def test_extract_methods_in_glob_funcs_should_be_glob_2(self):
code = 'if 1:\n var = 2\n'
start = code.rindex('2')
refactored = self.do_extract_method(code, start, start + 1, 'two',
similar=True, global_=False)
expected = '\ndef two():\n return 2\n\nif 1:\n var = two()\n'
self.assertEquals(expected, refactored)
def test_extract_method_and_try_blocks(self):
code = 'def f():\n try:\n pass\n' \
' except Exception:\n pass\n'
start, end = self._convert_line_range_to_offset(code, 2, 5)
refactored = self.do_extract_method(code, start, end, 'g')
expected = 'def f():\n g()\n\ndef g():\n try:\n pass\n' \
' except Exception:\n pass\n'
self.assertEquals(expected, refactored)
def test_extract_and_not_passing_global_functions(self):
code = 'def next(p):\n return p + 1\nvar = next(1)\n'
start = code.rindex('next')
refactored = self.do_extract_method(code, start, len(code) - 1, 'two')
expected = 'def next(p):\n return p + 1\n' \
'\ndef two():\n return next(1)\n\nvar = two()\n'
self.assertEquals(expected, refactored)
def test_extracting_with_only_one_return(self):
code = 'def f():\n var = 1\n return var\n'
start, end = self._convert_line_range_to_offset(code, 2, 3)
refactored = self.do_extract_method(code, start, end, 'g')
expected = 'def f():\n return g()\n\n' \
'def g():\n var = 1\n return var\n'
self.assertEquals(expected, refactored)
def test_extracting_variable_and_implicit_continuations(self):
code = 's = ("1"\n "2")\n'
start = code.index('"')
end = code.rindex('"') + 1
refactored = self.do_extract_variable(code, start, end, 's2')
expected = 's2 = "1" "2"\ns = (s2)\n'
self.assertEquals(expected, refactored)
def test_extracting_method_and_implicit_continuations(self):
code = 's = ("1"\n "2")\n'
start = code.index('"')
end = code.rindex('"') + 1
refactored = self.do_extract_method(code, start, end, 'f')
expected = '\ndef f():\n return "1" "2"\n\ns = (f())\n'
self.assertEquals(expected, refactored)
def test_passing_conditional_updated_vars_in_extracted(self):
code = 'def f(a):\n' \
' if 0:\n' \
' a = 1\n' \
' print(a)\n'
start, end = self._convert_line_range_to_offset(code, 2, 4)
refactored = self.do_extract_method(code, start, end, 'g')
expected = 'def f(a):\n' \
' g(a)\n\n' \
'def g(a):\n' \
' if 0:\n' \
' a = 1\n' \
' print(a)\n'
self.assertEquals(expected, refactored)
def test_returning_conditional_updated_vars_in_extracted(self):
code = 'def f(a):\n' \
' if 0:\n' \
' a = 1\n' \
' print(a)\n'
start, end = self._convert_line_range_to_offset(code, 2, 3)
refactored = self.do_extract_method(code, start, end, 'g')
expected = 'def f(a):\n' \
' a = g(a)\n' \
' print(a)\n\n' \
'def g(a):\n' \
' if 0:\n' \
' a = 1\n' \
' return a\n'
self.assertEquals(expected, refactored)
def test_extract_method_with_variables_possibly_written_to(self):
code = "def a_func(b):\n" \
" if b > 0:\n" \
" a = 2\n" \
" print a\n"
start, end = self._convert_line_range_to_offset(code, 2, 3)
refactored = self.do_extract_method(code, start, end, 'extracted')
expected = "def a_func(b):\n" \
" a = extracted(b)\n" \
" print a\n\n" \
"def extracted(b):\n" \
" if b > 0:\n" \
" a = 2\n" \
" return a\n"
self.assertEquals(expected, refactored)
if __name__ == '__main__':
unittest.main()
|
[
"edreamleo@gmail.com"
] |
edreamleo@gmail.com
|
044bc7efb85e34003ae56c0d48f464ec535fc949
|
7700fd9502e46b9c742093ac8748c43919a84091
|
/rbac_blog/blog/forms.py
|
aebe7f6c213ef3608de4b1fb267456437a4e718d
|
[] |
no_license
|
Guo-kai-feng/python_project
|
e825bf2e20d79bde3facf6e08a4482a025cfe2d4
|
5a24734ad5d4aa82ac47c5d912d1dc48c32c8f24
|
refs/heads/master
| 2020-07-13T22:00:37.493273
| 2019-08-29T13:07:46
| 2019-08-29T13:07:46
| 205,163,822
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,190
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from blog import models
from django import forms
class AlterBlogForm(forms.ModelForm):
class Meta:
model = models.Blog
fields = '__all__'
exclude = []
error_messages = {
'name': {'required': '不能为空'},
'user': {'required': '不能为空'},
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
from multiselectfield.forms.fields import MultiSelectFormField
for field_name, field in self.fields.items():
if not isinstance(field, MultiSelectFormField):
field.widget.attrs.update({'class': 'form-control'})
class AlterArticleForm(forms.ModelForm):
class Meta:
model = models.Article
fields = '__all__'
exclude = ['create_at', ]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
from multiselectfield.forms.fields import MultiSelectFormField
for field_name, field in self.fields.items():
if not isinstance(field, MultiSelectFormField):
field.widget.attrs.update({'class': 'form-control'})
|
[
"798136317@qq.com"
] |
798136317@qq.com
|
42860a6c8058042cf8d6eefda4f3cc8887a54477
|
b7ffa11d72642c5b547f4a48307401cbd379cc43
|
/src/cart/urls.py
|
5948162b0be5bb65c606f441ca98da30f69d521e
|
[] |
no_license
|
gummigun/captainconsole-git
|
8d5a27d042c8a1fe4fa3bf7d89d45ce871ab4eac
|
886e9a86052684256a5473495759996894b261ce
|
refs/heads/master
| 2023-08-14T23:00:02.699957
| 2020-05-15T23:23:44
| 2020-05-15T23:23:44
| 259,331,163
| 0
| 0
| null | 2021-09-22T19:02:10
| 2020-04-27T13:32:13
|
Python
|
UTF-8
|
Python
| false
| false
| 515
|
py
|
from django.urls import path, include, re_path
from . import views
urlpatterns = [
# Home page url pattern
path('', views.index, name="cart"),
path('add/<int:id>', views.update_cart, name="update_cart"),
path('remove/<int:id>', views.remove_cart, name="remove_cart"),
path('review/', views.review, name="review"),
path('checkout/', views.checkout, name="checkout"),
path('process/', views.process, name="process")
#re_path(r'^add/[0-9]$', views.update_cart, name="update_cart"),
]
|
[
"gummi.gunnlaugsson@gmail.com"
] |
gummi.gunnlaugsson@gmail.com
|
fba3989988948bd693ff0403cfac404b8e39195b
|
fbf05e2d3eef31367f26af979150f1d8325f6b2f
|
/flask_dmango/query/exceptions.py
|
12742acd9013a856f68e40b5a47283c5465a20f1
|
[] |
no_license
|
jungkoo/flask-dmango
|
0b653dac9b72935315244b9ff74b5fc89c5bb542
|
abecafa611fce10cd34c9b8401df179b163a9424
|
refs/heads/master
| 2021-01-10T09:29:01.651686
| 2016-03-27T08:27:57
| 2016-03-27T08:27:57
| 54,324,383
| 1
| 1
| null | 2016-03-22T04:18:53
| 2016-03-20T15:14:43
|
Python
|
UTF-8
|
Python
| false
| false
| 1,152
|
py
|
#-*- coding: utf-8 -*-
class DmangoException(Exception):
def __init__(self, message):
self._message = message
def _get_message(self):
return self._message
def _set_message(self, message):
self._message = message
message = property(_get_message, _set_message)
class NotSupportMethod(DmangoException):
def __init__(self, method):
self.method = method
def __str__(self):
return '"%s" is not a valid method name' % (str(self.method), )
class DmangoValueException(DmangoException):
"""
value 를 관련 문제를 의미한다.
예를 들어 'a' 를 int 로 변경하려고 했을경우라거나... 값이 허용되는게 아니라거나.
"""
def __init__(self, value_name):
self._value_name = value_name
def __str__(self):
return '[DMANGO-100002] value Error. (value name="%s")' % (self._value_name,)
class DmangoParseException(DmangoException):
def __init__(self, errmsg='parse error'):
self.errmsg = errmsg
def __str__(self):
return "[DMANGO-100003] " + self.errmsg
|
[
"deajang@gmail.com"
] |
deajang@gmail.com
|
cd42b43018616531062a56884c91ab6fd2f1ea44
|
cc52dc8b6295c9617ae8df40d0dbe9a062f0d7de
|
/dinerindex.py
|
44a69ab9afc22b4c8821608ed28a80df8bd6729d
|
[] |
no_license
|
mariopedregon/python-diner-bottega
|
f98cbcae3f67145b0a163666eb40ceebf91bdcfe
|
43829fed3cf982925c45c62932b839ccc5e30c22
|
refs/heads/master
| 2020-05-23T06:13:59.391487
| 2019-05-14T16:45:29
| 2019-05-14T16:45:29
| 186,662,610
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,257
|
py
|
def main():
totalCost = 0
print('''
"Welcome to the Bottega Diner , What can we get you started today!"
''')
name = input("What is your name?")
print("Hello " + name + "!")
print('''
You get one entree and two side choices at regular cost.
''')
print('''
Here is our menu!
''')
mainMenu(totalCost)
def mainMenu(totalCost):
print("1.Steak $13.99")
print("2.Chicken $11.99")
print("3.Ribs $15.99")
selection = int(input("Enter Choice:"))
print("\n")
if selection == 1:
totalCost += Steak(totalCost)
totalCost += sideMenu(totalCost)
receipt = "your total is $" + str(totalCost)
print(receipt)
elif selection == 2:
totalCost += Chicken(totalCost)
totalCost += sideMenu(totalCost)
receipt = "your total is $" + str(totalCost)
print(receipt)
elif selection == 3:
totalCost += Ribs(totalCost)
totalCost += sideMenu(totalCost)
receipt = "your total is $" + str(totalCost)
print(receipt)
else:
print("Invalid choice. enter 1-3")
mainMenu()
def Steak(totalCost):
print("Great choice!")
totalCost += 8
return totalCost
def Chicken(totalCost):
print("Great choice!")
totalCost += 7.5
return totalCost
def Ribs(totalCost):
print("Great Choice")
totalCost += 6
return totalCost
def sideMenu(totalCost):
print("1.corn on the cob $10.50")
print("2.house salad $7.50")
print("3.Fries $3")
selection = int(input("Enter Choice:"))
if selection == 1:
totalCost += corn(totalCost)
return totalCost
elif selection == 2:
totalCost += house(totalCost)
return totalCost
elif selection == 3:
totalCost += Drink(totalCost)
return totalCost
else:
print("Invalid choice. enter 1-3")
sideMenu()
def corn(totalCost):
print("That'll be $10.50.")
totalCost += 10.5
return totalCost
def house(totalCost):
print("That'll be $7.50")
totalCost += 7.5
return totalCost
def Drink(totalCost):
print("Sweet!")
totalCost += 3
return totalCost
receipt = "your total is $" + str(totalCost)
main()
|
[
"mariopedregon93@gmail.com"
] |
mariopedregon93@gmail.com
|
76958178b7438bb05a58d4bf3edd04bf9ee28403
|
cc212540f928a95fa56f4679e3eb58e2ad329ca5
|
/annpy/training/trainer.py
|
c93d497850a77427e0a1ba0888254a24da4a10e7
|
[
"LicenseRef-scancode-mit-taylor-variant",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
nirvguy/annpy
|
ec05e07316bddd4bc5fbbd3d9e73ec94dc52a4b9
|
ea5f92048173d0ebd1ad134cf626fa623569905e
|
refs/heads/master
| 2018-06-03T06:11:21.911758
| 2018-05-30T16:16:46
| 2018-05-30T16:16:48
| 118,555,614
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,346
|
py
|
# See LICENSE file for copyright and license details.
import torch
class Trainer(object):
def __init__(self, learning_rule):
self._learning_rule = learning_rule
self._epoch = 0
self._hooks = []
self._remaining_epochs = 0
@property
def epoch(self):
return self._epoch
@staticmethod
def check_batch(batch):
if not isinstance(batch, torch.Tensor):
raise Exception("Batchs must be torch.Tensor's")
if len(batch.shape) <= 1:
raise Exception("Batch shape must have at least dimension two")
def _notify(self, msg):
for hook in self._hooks:
hook.notify(msg)
def train(self, batchs, epochs=1):
if len(batchs) == 0:
return
for batch in batchs:
self.check_batch(batch)
self._remaining_epochs = epochs
self._notify('pre_training')
for _ in range(epochs):
self._notify('pre_epoch')
for batch in batchs:
self._learning_rule.step(batch)
self._epoch += 1
self._remaining_epochs -= 1
self._notify('post_epoch')
self._notify('post_training')
def remaining_epochs(self):
return self._remaining_epochs
def attach(self, hook):
self._hooks.append(hook)
|
[
"nirvguy@gmail.com"
] |
nirvguy@gmail.com
|
b089edef3519feb7f892bdd66d7ebb57fe321c27
|
d214b72b3ae340d288c683afe356de6846a9b09d
|
/动态规划/最大矩形_85.py
|
d5fa9f35ee7dab90956eab9b4c2c0e9f34d1993c
|
[] |
no_license
|
Xiaoctw/LeetCode1_python
|
540af6402e82b3221dad8648bbdcce44954a9832
|
b2228230c90d7c91b0a40399fa631520c290b61d
|
refs/heads/master
| 2021-08-29T15:02:37.786181
| 2021-08-22T11:12:07
| 2021-08-22T11:12:07
| 168,444,276
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,071
|
py
|
from typing import *
class Solution:
def maximalRectangle(self, matrix: List[List[str]]) -> int:
m, n = len(matrix), len(matrix[0])
num_up = [[0] * n for _ in range(m)]
for i in range(n):
if matrix[0][i] == '1':
num_up[0][i] = 1
for i in range(1, m):
for j in range(n):
if matrix[i][j] == '1':
num_up[i][j] = num_up[i - 1][j] + 1
ans = 0
for i in range(m):
pre_zero = -1
min_up = float('inf')
for j in range(n):
if matrix[i][j] == '0':
pre_zero = j
min_up=float('inf')
else:
min_up = min(min_up, num_up[i][j])
ans = max(ans, min_up * (j - pre_zero))
return ans
if __name__ == '__main__':
matrix = [["1", "0", "1", "0", "0"], ["1", "0", "1", "1", "1"], ["1", "1", "1", "1", "1"],
["1", "0", "0", "1", "0"]]
sol=Solution()
print(sol.maximalRectangle(matrix))
|
[
"m18846183092@163.com"
] |
m18846183092@163.com
|
1fab3a455107a4685915923d7fdca0125d651eae
|
f7b47ac437f2c60c99fa004a5f11563cc2340c98
|
/migrations/versions/ba949b44fadf_.py
|
6caacd88df68bf1244959e34581efd7d76baafd4
|
[] |
no_license
|
QYJiua/myblogproject
|
23586970b9b8ccdf7aa4a931adde1a7a4e04a673
|
0a6749306ca74bb2d7d1f876c03e945a259c0909
|
refs/heads/master
| 2023-07-29T22:27:07.373173
| 2021-09-16T14:43:29
| 2021-09-16T14:43:29
| 407,197,481
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,876
|
py
|
"""empty message
Revision ID: ba949b44fadf
Revises: 7afb21f57fac
Create Date: 2021-08-28 22:06:03.824638
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = 'ba949b44fadf'
down_revision = '7afb21f57fac'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('article_type',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('type_name', sa.String(length=20), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('comment',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('article_id', sa.Integer(), nullable=True),
sa.Column('comment', sa.String(length=255), nullable=False),
sa.Column('cdatetime', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['article_id'], ['article.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.add_column('article', sa.Column('type_id', sa.Integer(), nullable=True))
op.alter_column('article', 'user_id',
existing_type=mysql.INTEGER(display_width=11),
nullable=True)
op.create_foreign_key(None, 'article', 'article_type', ['type_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'article', type_='foreignkey')
op.alter_column('article', 'user_id',
existing_type=mysql.INTEGER(display_width=11),
nullable=False)
op.drop_column('article', 'type_id')
op.drop_table('comment')
op.drop_table('article_type')
# ### end Alembic commands ###
|
[
"1273884908@qq.com"
] |
1273884908@qq.com
|
a7b5924d50b1d26afe48b186debeead1b5c2ba60
|
5e3768b724a93e834eca6c92f54f45bd45b05106
|
/automate_models.py
|
1da485abbe89f744c99b449feb929897d2d07ede
|
[
"MIT"
] |
permissive
|
BorisBorshevsky/ML-Elections
|
e10bd578e2923ef15112165702280ceca8f0f285
|
26a0b7b184deceb7a1c2727ba8e458d565b19512
|
refs/heads/master
| 2021-05-06T19:08:15.226036
| 2018-01-15T00:57:55
| 2018-01-15T00:57:55
| 112,019,076
| 0
| 1
|
MIT
| 2017-12-04T00:48:08
| 2017-11-25T16:43:39
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,584
|
py
|
import pandas as pd
import numpy as np
from IPython import embed
from sklearn import clone
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.multiclass import OneVsOneClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import Perceptron
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.svm import LinearSVC
from sklearn.metrics import classification_report
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.metrics import confusion_matrix
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.ensemble import RandomForestClassifier
models = {
"SVC Linear kernel": SVC(kernel='linear'),
"linearSVC OVR": LinearSVC(multi_class='ovr'),
"linearSVC crammer_singer": LinearSVC(multi_class='crammer_singer'),
"One Vs One": OneVsOneClassifier(LinearSVC()),
"Naive Bayes": GaussianNB(),
"Perceptron": Perceptron(max_iter=300),
"LinearDiscriminantAnalysis": LinearDiscriminantAnalysis()
}
def add_parametrized_models():
for splitter in range(2, 20):
models["DecisionTreeClassifier with splitter %d" % splitter] = DecisionTreeClassifier(min_samples_split=splitter,
random_state=0)
for splitter in range(2, 20):
models["RandomForestClassifier with splitter %d" % splitter] = RandomForestClassifier(min_samples_split=splitter,
random_state=0)
for n in range(2, 20):
models["KNeighborsClassifier with n=%d" % n] = KNeighborsClassifier(n_neighbors=n)
def load_prepared_data():
df_train = pd.read_csv('./data/output/processed_train.csv', header=0)
df_test = pd.read_csv('./data/output/processed_test.csv', header=0)
features = list(set(df_train.columns) - {'Vote'})
target = 'Vote'
df_train_X = df_train[features]
df_train_Y = df_train[target]
df_test_X = df_test[features]
df_test_Y = df_test[target]
# labels = {"0":"Blues","1":"Browns","2":"Greens","3":"Greys","4":"Oranges","5":"Pinks","6":"Purples","7":"Reds","8":"Whites","9":"Yellows" }
labels = ["Blues", "Browns", "Greens", "Greys", "Oranges", "Pinks", "Purples", "Reds", "Whites", "Yellows"]
return df_train_X, df_train_Y, df_test_X, df_test_Y, labels
def evaluate_and_get_best(features, target):
max_model = "linearSVC crammer_singer"
max_score = 0
for k, v in models.iteritems():
scores = cross_val_score(v, features, target, cv=15)
score = np.mean(scores)
print "%s - Score: %f" % (k, score)
if score > max_score:
max_score = score
max_model = k
return max_model
def main():
df_train_X, df_train_Y, df_test_X, df_test_Y, labels = load_prepared_data()
train_val_data = pd.concat([df_train_X])
features = train_val_data.values
target = pd.concat([df_train_Y]).values
add_parametrized_models()
best_model_name = evaluate_and_get_best(features, target)
clf = clone(models[best_model_name])
clf.fit(df_test_X, df_test_Y)
print "#######################"
print "Prediction"
print "#######################"
pred = clf.predict(df_test_X)
distribution = np.bincount(pred.astype('int64'))
for index, party in enumerate(distribution):
print "%s, %f, %f" % (labels[index], distribution[index], distribution[index] / float(df_test_Y.size) * 100) + '%'
if __name__ == '__main__':
main()
|
[
"BorisBorshevsky@gmail.com"
] |
BorisBorshevsky@gmail.com
|
1f57a94143af972a289bfc920a65f67f1bd6adf6
|
2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae
|
/python/python_17171.py
|
41ccf0e53304977e412d274d7efa76fe1482d35e
|
[] |
no_license
|
AK-1121/code_extraction
|
cc812b6832b112e3ffcc2bb7eb4237fd85c88c01
|
5297a4a3aab3bb37efa24a89636935da04a1f8b6
|
refs/heads/master
| 2020-05-23T08:04:11.789141
| 2015-10-22T19:19:40
| 2015-10-22T19:19:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 35
|
py
|
# Integer to Boolean
return a != b
|
[
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] |
ubuntu@ip-172-31-7-228.us-west-2.compute.internal
|
04777f286a93e171d45397ce0a3643795c4b76af
|
9d3cab321f1b940bc7ef0ffbd1c7779d58008e35
|
/hooks.py
|
b2c04ec8ba85f827c9081d4e87b7c411a59aea3d
|
[] |
no_license
|
celsoflores/orbui
|
ae37d2497c1eebc3132097d98a1847950cfd6eed
|
6b8ac794832e4baa6cf5ef8e0d7ba5ed8eda12e6
|
refs/heads/master
| 2022-11-13T03:20:40.098283
| 2016-05-16T22:16:41
| 2016-05-16T22:16:41
| 276,276,112
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,127
|
py
|
# -*- coding: utf-8 -*-
# Validation hook for relationship
# Author: Softtek - MLCS
# Date: Jun 2014
# Project: Platypus
from cubicweb import ValidationError
from cubicweb.server.hook import Hook, match_rtype
from cubes.orbui.views.autocomplete_edition_view import AutoCompleteEntityRetriever
class Validate_Autocomplete_RulesHook(Hook):
"""
Validate the correct application of the autocomplete rules
"""
__regid__ = 'validateAutocompleteRules'
__select__ = Hook.__select__ & ~match_rtype('created_by', 'owned_by')
events = ('before_add_relation',)
def __call__(self):
#print 'eidfrom: %s, eidto: %s, rtype: %s' % (self.eidfrom, self.eidto, self.rtype)
#Cuando ya existe la relación no se evaluan las condiciones especiales
srql = 'Any X, Y WHERE X %s Y, X eid %s, Y eid %s' % (self.rtype, self.eidfrom, self.eidto)
if self._cw.execute(srql).rowcount > 0:
return
eidfrom = self._cw.entity_from_eid(self.eidfrom)
eidto = self._cw.entity_from_eid(self.eidto)
#Evaluate the direct relation
target = ''
specialsearch = AutoCompleteEntityRetriever().getSpecialSearch(self._cw, eidfrom, self.rtype, type(eidto).__name__, 'subject')
if specialsearch != ' ':
unrelated = eidfrom.cw_unrelated_rql(self.rtype, type(eidto).__name__, 'subject')
srql = ((unrelated[0] % unrelated[1]) + specialsearch + ', O eid ' + str(self.eidto))
if self._cw.execute(srql).rowcount < 1:
target = ('%(entity)s|%(relation)s%(role)s|%(etype_search)s'
% {'entity': type(eidfrom).__name__,
'relation': self.rtype, 'role': '',
'etype_search': type(eidto).__name__})
helpmsg = self._cw._('Validation error, relation not valid')
if target in AutoCompleteEntityRetriever().HELP_MESSAGES:
helpmsg = self._cw._(AutoCompleteEntityRetriever().HELP_MESSAGES[target])
raise ValidationError(self.eidfrom, {self.rtype: helpmsg})
#Evaluate the reverse relation
target = ''
specialsearch = AutoCompleteEntityRetriever().getSpecialSearch(self._cw, eidto, self.rtype, type(eidfrom).__name__, 'object')
if specialsearch != ' ':
unrelated = eidto.cw_unrelated_rql(self.rtype, type(eidfrom).__name__, 'object')
srql = ((unrelated[0] % unrelated[1]) + specialsearch + ', S eid ' + str(self.eidfrom))
if self._cw.execute(srql).rowcount < 1:
target = ('%(entity)s|%(relation)s%(role)s|%(etype_search)s'
% {'entity': type(eidto).__name__,
'relation': self.rtype, 'role': '_object',
'etype_search': type(eidfrom).__name__})
helpmsg = self._cw._('Validation error, relation not valid')
if target in AutoCompleteEntityRetriever().HELP_MESSAGES:
helpmsg = self._cw._(AutoCompleteEntityRetriever().HELP_MESSAGES[target])
raise ValidationError(self.eidto, {self.rtype: helpmsg})
|
[
"walter.arriaga@softtek.com"
] |
walter.arriaga@softtek.com
|
c96e3d5f4930ba27639b6713431e4463fe902921
|
74e516e50d5f7181d0ef340f0941e8ffc7b20022
|
/1-100/6/sum_square.py
|
872f7bf384605bbeae6bf4eb7891d10f3711eb67
|
[] |
no_license
|
js837/project-euler
|
fb261a8fc2898f4e86bb66ad8c119c961a8178a6
|
8b32fdbdfda13cf7c8881b400c6ce59334749dad
|
refs/heads/master
| 2016-09-05T16:38:09.639874
| 2015-04-21T21:10:17
| 2015-04-21T21:10:17
| 34,351,684
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 141
|
py
|
# Sum of squares forumla is: n*(n+1)(2*n+1)/6
n=100
sum_sqaures=n*(n+1)*(2*n+1)/6
square_sum=n**2*(n+1)**2/4
print square_sum-sum_sqaures
|
[
"stacey.jake@gmail.com"
] |
stacey.jake@gmail.com
|
a519de5be7a9fc0af870c2e10b69c7887da9f26a
|
cf97a7de6fad3c917356a0c7fb75bda1b4e31981
|
/unravel/text/legal/glossary.py
|
e866b388acfda0220997586ac84c01a56dd5d2cf
|
[
"Apache-2.0"
] |
permissive
|
unravel-text/unravel
|
a2614de23e0676d5b7027d2e397ee39a0d9942e4
|
d819b90bfd1e4c0dd3157f43595fdbb38ae82d50
|
refs/heads/master
| 2023-06-26T13:52:33.611158
| 2023-06-17T03:41:41
| 2023-06-17T03:41:41
| 155,546,598
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25
|
py
|
class Glossary:
pass
|
[
"cofiem@gmail.com"
] |
cofiem@gmail.com
|
f841e9e9170838ca8c2972ca807eedb0e4ecd954
|
e905abd9bb7bd7017657d0a0c4d724d16e37044c
|
/.history/article/settings_20210208181317.py
|
5959719e37fa4bb9dcbc2f1420a4a206f030284f
|
[] |
no_license
|
tabdelbari/articles
|
a8b921841f84fb473f5ed1cdcda743863e6bc246
|
f0e1dfdc9e818e43095933139b6379a232647898
|
refs/heads/main
| 2023-03-05T10:21:35.565767
| 2021-02-10T13:35:14
| 2021-02-10T13:35:14
| 325,654,973
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,437
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for article project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
MONGO_URI = 'mongodb://localhost:27017/'
MONGO_DATABASE = 'articles'
BOT_NAME = 'article'
SPIDER_MODULES = ['article.spiders']
NEWSPIDER_MODULE = 'article.spiders'
SPLASH_URL = 'http://localhost:8050'
DUPEFILTER_CLASS = 'scrapy_splash.SplashAwareDupeFilter'
HTTPCACHE_STORAGE = 'scrapy_splash.SplashAwareFSCacheStorage'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'article (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 10
# The download delay setting will honor only one of:
CONCURRENT_REQUESTS_PER_DOMAIN = 1
CONCURRENT_REQUESTS_PER_IP = 1
# Disable cookies (enabled by default)
COOKIES_ENABLED = True
COOKIES_DEBUG = True
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en',
}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
SPIDER_MIDDLEWARES = {
'scrapy_splash.SplashDeduplicateArgsMiddleware': 100,
}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'scrapy_splash.SplashCookiesMiddleware': 723,
'scrapy_splash.SplashMiddleware': 725,
'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810,
}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'article.pipelines.MongoPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"abdelbari1996@hotmail.com"
] |
abdelbari1996@hotmail.com
|
30bcc49e425481fed6a6df0a85ca78f5255b3b93
|
200abee8ebb5fa255e594c8d901c8c68eb9c1a9c
|
/venv/03_old/hello_world.py
|
3ef7463bc3ca43192af6add6ec132d91cd3a73f7
|
[] |
no_license
|
Vestenar/PythonProjects
|
f083cbc07df57ea7a560c6b18efed2bb0dc42efb
|
f8fdf9faff013165f8d835b0ccb807f8bef6dac4
|
refs/heads/master
| 2021-07-20T14:14:15.739074
| 2019-03-12T18:05:38
| 2019-03-12T18:05:38
| 163,770,129
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,760
|
py
|
'''def sum(param1, param2):
return param1 + param2 # return result to the function caller
param1 = int(input())
param2 = int(input())
c = sum(param1, param2)
print(c)
nlen=0
def checkPalindrome(inputString):
nlen = len(inputString)
if nlen == 1:
t = True
else:
for i in range(nlen//2):
if inputString[i] == inputString[-i-1]:
t = True
else:
t = False
break
return t
inputString = "sasass"
print(checkPalindrome(inputString))'''
'''def adjacentElementsProduct(inputArray):
test = inputArray[0]*inputArray[1]
for i in range((len(inputArray)-2)):
nmax = inputArray[i+1]*inputArray[i+2]
if test < nmax:
test = nmax
return test
inputArray = [6, 2, 3, 8]
max = 0
max = adjacentElementsProduct(inputArray)
print(max)'''
'''sequence = [1, 3, 2, 1]
count = 0
t = True
t1 = True
t2 = True
narray = list(sequence)
for b in range(2):
for i in range(len(narray)-1):
if narray[i] < narray[i-1]:
narray[i-1:i] = []
count += 1
if count < 2:
t1 = False
count = 0
narray2 = list(sequence)
narray = list(sequence)
for b in range(2):
for i in range(len(narray)-1):
if narray[i] < narray[i-1]:
narray[i:i+1] = []
count += 1
if count < 2:
t1 = False
t = t1 or t2
print(narray)
print(narray2)
print(t1, t2, t)'''
'''t = True
count = 0
for i in range(len(sequence)):
if count > 2:
data = False
break
if i+1 < len(sequence) and sequence[i] >= sequence[i+1]:
count += 1
if i+2 < len(sequence) and sequence[i] >= sequence[i+2]:
count += 1
print(t)
'''
'''matrix = [[1,1,1],
[2,2,2],
[3,3,3]]
price = 0
for i in range(len(matrix)):
for j in range(len(matrix[i])):
if i != 0 and matrix[i-1][j] == 0:
matrix[i][j] = 0
for row in matrix:
for elem in row:
price += elem
print(price)'''
'''inputArray = ["aba", "aa", "ad", "vcd", "aba"]
lenw = 0
out = []
for i in range(len(inputArray)):
if lenw < len(inputArray[i]):
lenw = len(inputArray[i])
for i in range(len(inputArray)):
if len(inputArray[i]) == max(len(s) for s in inputArray):
out.append(inputArray[i])
print(out)'''
'''s1 = "aabzca"
s2 = "adcaaz"
n = 0
for i in s1:
if i in s2:
n +=1
s2 = s2.replace(i, "0", 1)
print(n)'''
'''n = str(int(123610))
mid = len(n)//2
n1 = n[:mid]
n2 = n[mid:]
sum1 = 0
for i in range(len(n1)):
sum1 +=int(n1[i])
for i in range(len(n2)):
sum1 -=int(n2[i])
if sum1 == 0:
out = "Счастливый"
else:
out = "Обычный"
print(out)'''
'''s = 'aaaabbcccaabb'
t = s[0]
count = 0
out = ''
for i in s:
if i == t:
count += 1
else:
out = out + t+str(count)
t = i
count = 1
out = out + t + str(count)
print(t, out)'''
'''a = [23, 54, -1, 43, 1, -1, -1, 77, -1, -1, -1, 3]
print([1, 3, -1, 23, 43, -1, -1, 54, -1, -1, -1, 77])
m = max(a)
for i in range(1, len(a)):
if a[-i] != -1:
a[-i], a[a.index(m)] = a[a.index(m)], a[-i]
m = max(a[:-i])
print(a)
'''
'''s = "The ((quick (brown) (fox) jumps over the lazy) dog)"
count = s.count('(')
op = []
cl = []
id = 0
for ch in s:
if ch == '(':
op.append(id)
id += 1
op = op[::-1]
id = 0
'ускорить поиск скобок путем определения начала поиска'
for i in range(count):
for ch in s:
if ch == ')' and id > op[i] and id not in cl:
cl.append(id)
break
id += 1
id = 0
for i in range(count):
sh = s[op[i]+1:cl[i]]
s = s.replace(sh, sh[::-1])
s = s.replace("(", "")
s = s.replace(")", "")
print(s)'''
'''s = "The ((quick (brown) (fox) jumps over the lazy) dog)"
while ')' in s:
j = s.index(')')
i = s.rindex('(', 0, j)
s = s[:i] + s[j-1:i:-1] + s[j+1:]
print(s)'''
'''a = [50]
b = [0,0]
for i in range(len(a)):
b[i%2] += a[i]
print(b)'''
'''
a = ["*****",
"*abc*",
"*ded*",
"*****"]
picture = ["abc", "ded"]
picture.insert(0,"*" * len(picture[0]))
picture.append("*" * len(picture[0]))
for i in range(len(picture)):
test = picture[i]
test = "*" + test + "*"
picture[i] = test
print(picture)'''
'''def areSimilar(a, b):
idx = []
if len(a) != len(b):
return False
for i in range(len(a)):
if a[i] != b[i]:
idx.append(i)
if len(idx) == 0:
return True
if len(idx) != 2:
return False
if a[idx[0]] == b[idx[1]] and a[idx[1]] == b[idx[0]]:
return True
else:
return False
'заносим в массив idx только те символы, которые не совпадают в исходных массивах, если таких символов только две пары, то проверяем взаимозаменяемость пар'
a = [1, 2, 2]
b = [2, 1, 1]
print(areSimilar(a, b))'''
'''def arrayChange(inputArray):
n = 0
for i in range(1, len(inputArray)):
if inputArray[i] <= inputArray[i-1]:
n += inputArray[i - 1] - inputArray[i] + 1
inputArray[i] += inputArray[i-1] - inputArray[i] +1
return n
inputArray = [2, 3, 3, 5, 5, 5, 4, 12, 12, 10, 15]
print(arrayChange(inputArray))
'''
'''a = [int(i) for i in input().split()]
b = []
ans = ''
for i in range(len(a)):
if a.count(a[i]) > 1 and (a[i] not in b):
b.append(a[i])
for i in b:
ans += str(i) + ' '
print(ans)
'''
'''
проверка строки на возможность получить палиндром перестановкой символов.
считаем только символы, количество которых нечетное и заносим в массив
def palindromeRearranging(inputString):
a = []
for i in range(len(inputString)):
if inputString.count(inputString[i]) % 2 != 0:
if inputString[i] != inputString[i-1]:
a.append(inputString.count(inputString[i]))
return len(a) <= 1
task = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaccc'
print(palindromeRearranging(task))
'''
'''САПЕР codesignal
def minesweeper(matrix):
row, col = len(matrix), len(matrix[0])
ans = [[0 for c in range(col)] for r in range(row)]
for i in range(row):
for j in range(col):
if matrix[i][j]:
ans[i][j] = -1
for di in range(-1, 2):
for dj in range(-1, 2):
ai = i + di
aj = j + dj
if 0 <= ai < row and 0 <= aj < col and matrix[ai][aj]:
ans[i][j] += 1
return ans
task = [[True,False,False],[False,True,False],[False,False,False]]
print(minesweeper(task))
'''
'''
def avoidObstacles(inputArray):
jump = 1
a = 0
while a < max(inputArray)//jump:
jump += 1
for i in range(1, max(inputArray)//jump+1):
if jump*i not in inputArray:
a += 1
else:
a = 0
break
return jump
task = [5, 3, 6, 7, 9]
print(avoidObstacles(task))
'''
''' # эффект блюр для "фотографии"
def boxBlur(image):
row, col = len(image), len(image[0]) # row rows, col columns
ans = []
for i in range(1, row-1):
ans.append([])
for j in range(1, col-1):
flsum = 0
for k in range(-1, 2):
for l in range(-1, 2):
flsum += image[i+k][j+l]
ans[i-1].append(int(flsum/9))
return ans
task = [[7, 4, 0, 1], [5, 6, 2, 2], [6, 10, 7, 8], [1, 4, 2, 0]]
print(boxBlur(task))
'''
'''codesignal является ли имя переменой корректным
def variableName(name):
if not name[0].isalpha() and name[0] != '_':
return False
else:
for i in range(1, len(name)):
if not name[i].isalnum() and name[i] != '_':
return False
return True
name = 'var1_'
print(variableName(name))
'''
'''codesignal
def absoluteValuesSumMinimization(a):
# x = a[0]
list = {}
for i in range(len(a)):
sabs = 0
for j in range(len(a)):
sabs += abs(a[j] - a[-(i+1)])
list[sabs] = a[-(i+1)]
print(list)
return list[min(list)]
test = [1, 1, 3, 4]
print(absoluteValuesSumMinimization(test))
'''
''' задача на брутфорс всех перестановок
def stringsRearrangement(inputArray):
import itertools
perm = list(itertools.permutations(inputArray, len(inputArray))) #полный список всех перестановок
for k in perm: #проверяем каждый вариант перестановки
for i in range(1, len(k)):
a = k[i]
b = k[i-1]
count = 0
for index in range(len(a)):
if a[index] != b[index]:
count += 1
if count != 1:
break
if count == 1:
return True
return False'''
'''#codesignal
#Given array of integers, find the maximal possible sum of some of its k consecutive elements.
def arrayMaxConsecutiveSum(a, k):
c = m = sum(a[:k]) #посчитали исходную сумму
for i in range(len(a) - k):
c = c + a[i + k] - a[i] #уменьшили сумму на предыдущий элемент и увеличили на следующий
m = max(c, m) #проверили максимум и сохранили в m
return m
test = [1, 3, 2, 4]
k = 3
print(arrayMaxConsecutiveSum(test, k))'''
|
[
"vestenar@gmail.com"
] |
vestenar@gmail.com
|
fde156d69337a167c10dea149b053022dba9878a
|
bd38b6be261e997e1a34694b70f3e9fa22e73c8e
|
/StatMyBallsApi/migrations/0003_goal_goal_date.py
|
a065c4260cca8911bebe0264d213452e776fbc15
|
[] |
no_license
|
Strapontin/StatMyBallsDjango
|
05e73a502a8db8bdeeeef7533a1a3514773261b4
|
8082b2630a2ddf4dded999636c8fd39b0fb65b0a
|
refs/heads/main
| 2023-04-27T17:17:12.815401
| 2021-05-16T19:27:32
| 2021-05-16T19:27:32
| 305,686,523
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 526
|
py
|
# Generated by Django 3.1.3 on 2020-11-11 10:07
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('StatMyBallsApi', '0002_auto_20201111_1058'),
]
operations = [
migrations.AddField(
model_name='goal',
name='goal_date',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, verbose_name='goal_date'),
preserve_default=False,
),
]
|
[
"cedric.hernandez.soto@gmail.com"
] |
cedric.hernandez.soto@gmail.com
|
3cd5a1b7ce865bf7a94113f781c663ed6ae8ebe9
|
21ff624abb58c2af27d209f7b1d1e167244b7536
|
/adminLab/adminLab/settings.py
|
cb4f9842ab89239aec4bb420614f742c7ea339ef
|
[] |
no_license
|
SachekDenisHomePc/DjangoLab2-3
|
95c858bcdcbd6458a5eedd6805245d4217e93e7d
|
e062898f91fbabb98605a4207953c3786e4751bf
|
refs/heads/master
| 2021-05-24T12:18:53.901085
| 2020-04-06T16:41:56
| 2020-04-06T16:41:56
| 253,558,121
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,314
|
py
|
"""
Django settings for adminLab project.
Generated by 'django-admin startproject' using Django 3.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 't1&t_zu8r+y)1)^b%)w*%ypu^i#@1%(7(fa9n51_62qkktjocg'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'lab1DB',
'Lab2',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'adminLab.urls'
TEMPLATE_DIR = os.path.join(BASE_DIR, "templates")
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR, ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'adminLab.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
},
'Lab2Db': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'lab2Db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"sachek.denis@gmail.com"
] |
sachek.denis@gmail.com
|
19907e7cb61cd025d174242e51357e774a777801
|
d257ddf7e6959d0989d76080a8a048e82393657f
|
/002_TemplateMatching/002_template_match_implemented.py
|
112464bcd0690858ab97442b59d77b3d552eca7f
|
[
"MIT"
] |
permissive
|
remichartier/027_selfDrivingCarND_ObjectDetectionExercises
|
d210f37b7baf306dd034c09f62e125b263f8270d
|
ccd853c975d35df5f31e1a445a1a8757b8bd13f5
|
refs/heads/main
| 2023-04-17T08:09:55.465143
| 2021-05-03T07:11:16
| 2021-05-03T07:11:16
| 362,013,745
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,661
|
py
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
image = mpimg.imread('bbox-example-image.jpg')
#image = mpimg.imread('temp-matching-example-2.jpg')
templist = ['cutout1.jpg', 'cutout2.jpg', 'cutout3.jpg',
'cutout4.jpg', 'cutout5.jpg', 'cutout6.jpg']
# Here is your draw_boxes function from the previous exercise
def draw_boxes(img, bboxes, color=(0, 0, 255), thick=6):
# Make a copy of the image
imcopy = np.copy(img)
# Iterate through the bounding boxes
for bbox in bboxes:
# Draw a rectangle given bbox coordinates
cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)
# Return the image copy with boxes drawn
return imcopy
# All the 6 methods for comparison in a list
# methods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR',
# 'cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']
# Define a function that takes an image and a list of templates as inputs
# then searches the image and returns the a list of bounding boxes
# for matched templates
def find_matches(img, template_list):
# Make a copy of the image to draw on
imcopy = np.copy(img)
# Define an empty list to take bbox coords
bbox_list = []
# Iterate through template list
for temp in template_list:
# Read in templates one by one
templ = mpimg.imread(temp)
print(templ.shape[::-1])
l, w, h = templ.shape[::-1]
# Use cv2.matchTemplate() to search the image
# using whichever of the OpenCV search methods you prefer
#meth = 'cv2.TM_SQDIFF' # --> Not working
meth = 'cv2.TM_CCOEFF' # --> Working
#meth = 'cv2.TM_CCOEFF_NORMED' # --> Working
#meth = 'cv2.TM_CCORR' # --> Not working
#meth = 'cv2.TM_CCORR_NORMED' # --> Working
#meth = 'cv2.TM_SQDIFF' # --> Not working
#meth = 'cv2.TM_SQDIFF_NORMED' # --> Not working
method = eval(meth)
res = cv2.matchTemplate(img,templ,method)
# Use cv2.minMaxLoc() to extract the location of the best match
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
# Determine bounding box corners for the match
if meth in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
bbox_list.append((top_left,bottom_right))
# Return the list of bounding boxes
return bbox_list
bboxes = find_matches(image, templist)
result = draw_boxes(image, bboxes)
plt.imshow(result)
|
[
"remipr.chartier@gmail.com"
] |
remipr.chartier@gmail.com
|
3f0ba0135eca1c3fa561a27e6ca70bbfd56d36ce
|
dbc35bdcec5f64ef8482c709a28e527c6b66d638
|
/company_admin/urls.py
|
d643efa64d0a8b4422c90ea707d9d82e77bb0054
|
[] |
no_license
|
PkQDark/Dosa
|
2bc001f1ab7c2e15ae2c8fb0f2309185024be590
|
bcead811892b2f0c06e2cb5e03cf3f98a0dc9b7b
|
refs/heads/master
| 2021-01-01T06:56:46.259731
| 2017-09-19T17:29:43
| 2017-09-19T17:29:43
| 97,556,407
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,057
|
py
|
from django.conf.urls import url
from .views import dosing, \
cistern_add, cistern_list, cistern_edit, cistern_info, \
add_fuel, fuels_list, fuel_info,\
keys, edit_key, \
users, add_local_user, edit_local_user
urlpatterns = [
url(r'^$', dosing, name='dosing'),
url(r'^cisterns/$', cistern_list, name='cistern_list'),
url(r'^cisterns/add-cistern/$', cistern_add, name='cistern_add'),
url(r'^cisterns/edit/(?P<cist_id>\d+)/', cistern_edit, name='cist_edit'),
url(r'^cisterns/(?P<cist_id>\d+)/', cistern_info, name='cist_info'),
url(r'^fuels/$', fuels_list, name='fuels'),
url(r'^fuels/add-fuel/$', add_fuel, name='add_fuel'),
url(r'^fuels/(?P<fuel_id>\d+)/', fuel_info, name='fuel_info'),
url(r'^keys/$', keys, name='keys'),
url(r'^keys/edit/(?P<key_id>\d+)/', edit_key, name='edit_key'),
url(r'^users/$', users, name='users'),
url(r'^users/add-user/$', add_local_user, name='add_user'),
url(r'^users/edit/(?P<user_id>\d+)/', edit_local_user, name='edit_user'),
]
|
[
"blackbirdvlad@gmail.com"
] |
blackbirdvlad@gmail.com
|
ed0d0eca931fce65500d604ab0cbc8aa1dbd612c
|
227a045665ea8c5b1822bed84c38b990a1343770
|
/libs/sms.py
|
f6376bdddca1492090f0f94c2f3fc68aebad1f1a
|
[
"Apache-2.0"
] |
permissive
|
theEndProject/aboutTa
|
f31a1629afdf7fc157e219b2499b47c2c2181e98
|
33a1e391e56e76006ee6ef3d9102efc496251cb6
|
refs/heads/main
| 2023-01-02T01:31:10.635058
| 2020-10-26T09:33:05
| 2020-10-26T09:33:05
| 303,625,794
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,185
|
py
|
import time
import json
from hashlib import md5
import requests
from aboutTa import config as cfg
def send_sms(phonenum, vcode):
'''发送短信'''
args = {
'appid': cfg.SD_APPID, # APPID
'to': phonenum, # 手机号
'project': cfg.SD_PROJECT, # 短信模板的ID
'vars': json.dumps({'code': vcode}),
'timestamp': int(time.time()),
'sign_type': cfg.SD_SIGN_TYPE,
}
# 计算参数的签名
sorted_args = sorted(args.items()) # 提取每一项
args_str = '&'.join([f'{key}={value}' for key, value in sorted_args]) # 对参数排序、组合
sign_str = f'{cfg.SD_APPID}{cfg.SD_APPKEY}{args_str}{cfg.SD_APPID}{cfg.SD_APPKEY}' # 拼接成待签名字符串
sign_bytes = sign_str.encode('utf8') # 转换成bytes(二进制)
signature = md5(sign_bytes).hexdigest() # 计算签名
args['signature'] = signature
response = requests.post(cfg.SD_API, data=args)
if response.status_code == 200:
result = response.json()
print('短信结果:', result)
if result.get('status') == 'success':
return True
return False
|
[
"472437593@qq.com"
] |
472437593@qq.com
|
d8a49d368a82b0008bacdd568c57aa745bde3133
|
d86ed2c37a55b4a3118131a04f9a68dbd3b51a7f
|
/sherpatest/lev3fft-bar.py
|
384437c626b0fbb39addb9d1c5274f6e57b5fd62
|
[] |
no_license
|
hamogu/sherpa-test-data
|
f745cc907c2535a721d46472b33f7281bd6e6711
|
77d9fc563875c59a4acff2960d46180ee7a8ec14
|
refs/heads/master
| 2023-06-18T22:30:44.947033
| 2020-08-03T12:07:13
| 2020-08-03T12:07:13
| 275,202,255
| 0
| 0
| null | 2020-06-26T16:38:19
| 2020-06-26T16:38:19
| null |
UTF-8
|
Python
| false
| false
| 1,244
|
py
|
#!/usr/bin/env python
from sherpa.astro.ui import *
image_file = "acisf07999_000N001_r0035_regevt3_srcimg.fits"
psf_file = "acisf07999_000N001_r0035b_psf3.fits"
reg_file = "ellipse(3145.8947368421,4520.7894736842,37.0615234375,15.3881587982,92.2273254395)"
srcid = 1
load_data(srcid, image_file)
load_psf("psf%i" % srcid, psf_file)
set_psf(srcid, "psf%i" % srcid)
set_coord(srcid, "physical")
notice2d_id(srcid, reg_file)
# Switch to WCS for fitting
set_coord(srcid, "wcs")
# Use Nelder-Mead, C-statistic as fit method, statistic
set_method("neldermead")
set_stat("cstat")
set_source(srcid, 'gauss2d.src + const2d.bkg')
guess(srcid, src)
image_file = "acisf08478_000N001_r0043_regevt3_srcimg.fits"
psf_file = "acisf08478_000N001_r0043b_psf3.fits"
reg_file = "ellipse(3144.5238095238,4518.8095238095,25.2978591919,19.1118583679,42.9872131348)"
srcid = 2
load_data(srcid, image_file)
load_psf("psf%i" % srcid, psf_file)
set_psf(srcid, "psf%i" % srcid)
set_coord(srcid, "physical")
notice2d_id(srcid, reg_file)
# Switch to WCS for fitting
set_coord(srcid, "wcs")
# Use Nelder-Mead, C-statistic as fit method, statistic
set_method("neldermead")
set_stat("cstat")
set_source(srcid, 'gauss2d.src + const2d.bkg')
guess(srcid, src)
fit()
|
[
"olaurino@cfa.harvard.edu"
] |
olaurino@cfa.harvard.edu
|
51b331eccb9809f73598d78540ae4e6af635ee6f
|
f83839dfcbd8bfd5c535f819cf9bb9c303ff3f94
|
/MachineBookExtract/book_tools/characters_tool.py
|
8dce0f703472fc5e254302b519d10a0b74df7fbc
|
[] |
no_license
|
pawo97/MachineBookExtract
|
8cabc4c18a65022c1b61a6bd9e9f47eb60753401
|
33fab023b7b0e48e5fe043f504269d80cb964237
|
refs/heads/main
| 2023-07-25T01:45:43.711512
| 2021-09-06T21:11:32
| 2021-09-06T21:11:32
| 345,401,332
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,519
|
py
|
import traceback
from book_tools.characters_person_rate import characters_person_rate
class characters_tool:
def get_list_non_alpha_numeric(self, words):
non_alpha_words = []
for w in words:
# blank
if w != '':
alphanumeric = ""
for character in w:
if character.isalnum():
alphanumeric += character
non_alpha_words.append(alphanumeric.lower())
return list(dict.fromkeys(non_alpha_words))
def get_second_word(self, li):
li_second_word = []
for i in li:
if ' ' in i:
j = i.split(' ')
if len(j) >= 2:
li_second_word.append(j[1])
else:
li_second_word.append(i)
return li_second_word
def get_words_with_prefix(self, nouns):
prefix_list = []
for s in nouns:
s_final = ''
if (s.startswith('a ') or s.startswith('A ')) and s[2].isupper() and 'CHAPTER' not in s:
s_final = s[2:]
# print("LL", s[2:])
prefix_list.append(s_final)
elif (s.startswith('the ') or s.startswith('The ')) and s[4].isupper() and 'CHAPTER' not in s:
s_final = s[4:]
# print("LL", s[4:])
prefix_list.append(s_final)
return prefix_list
def get_persons_no_duplicates(self, doc):
persons = []
for entity in doc.ents:
if entity.label_ == 'PERSON':
if entity.text[0].isupper():
persons.append(entity.text)
return list(dict.fromkeys(persons))
def get_last_word(self, persons):
new_persons = []
for i in persons:
i = i.replace('\n', ' ')
if ' ' in i:
j = i.split(' ')
if len(j) >= 2:
new_persons.append(j[len(j) - 1])
else:
new_persons.append(i)
return new_persons
def remove_dot_s(self, persons):
new_persons = []
for w in persons:
if w.endswith("’s"):
w = w[0:len(w) - 2]
new_persons.append(w)
return new_persons
def check_spacy_tags(self, nlp, words_selected, persons):
# Create rating list
person_rates = []
for p in persons:
if p != 'the' and p != 'a' and len(p) > 1:
# check spacy tag
doc = nlp(p)
if 'NN' == doc[0].tag_:
person = characters_person_rate()
person.rate = 0
person.word = p
person.tag = doc[0].tag_
person_rates.append(person)
elif 'NNS' == doc[0].tag_:
person = characters_person_rate()
person.rate = 0
person.word = p[0:len(p) - 1]
person.tag = doc[0].tag_
person_rates.append(person)
elif 'NNP' == doc[0].tag_:
person = characters_person_rate()
person.rate = 0
person.word = p
person.tag = doc[0].tag_
person_rates.append(person)
# Count in words
for w in words_selected:
for p in person_rates:
if p.word in w or p.word == w:
p.rate += 1
person_rates.sort(key=lambda x: x.rate, reverse=True)
person_rates = list(dict.fromkeys(person_rates))
return person_rates
def capital_letter_and_not_empty_str_list(self, persons):
del persons[30:]
# capital letter
for i in range(len(persons)):
persons[i] = persons[i].title()
# delete empty strings
final_person = []
for i in range(len(persons)):
if persons[i] != '' and len(persons[i]) > 2:
final_person.append(persons[i])
return final_person
def sum_lists_rates(self, one, two, three):
d = {}
for i in one:
d[i.lower()] = 0
for i in two:
if i not in d.keys():
d[i.lower()] = 0
else:
d[i] += 1
for i in three:
if i not in d.keys():
d[i.lower()] = 0
else:
d[i] += 1
d = list(dict(sorted(d.items(), key=lambda item: item[1], reverse=True)).keys())
return d
def get_characters(self, words, doc, nlp):
try:
words_selected = self.get_list_non_alpha_numeric(words)
# ==================================================================== GET BY TAGS
nouns = [chunk.text for chunk in doc.noun_chunks]
a_the_lists = self.get_words_with_prefix(nouns)
second_words_list = self.get_second_word(a_the_lists)
li_not_alpha = self.get_list_non_alpha_numeric(second_words_list)
li_not_alpha_duplicates = list(dict.fromkeys(li_not_alpha))
# ==================================================================== GET BY WORDS
persons = self.get_persons_no_duplicates(doc)
li_not_space = self.get_last_word(persons)
li_dot_s = self.remove_dot_s(li_not_space)
persons_result_list = self.get_list_non_alpha_numeric(li_dot_s)
# ==================================================================== RATING PERSONS
li_persons = list(dict.fromkeys(persons_result_list))
# Create rating list
li_person_rate = self.check_spacy_tags(nlp, words_selected, li_persons)
del li_person_rate[30:]
li_persons = []
for p in li_person_rate:
li_persons.append(str(p.word))
# ==================================================================== SUM RESULTS
persons_result_list = self.capital_letter_and_not_empty_str_list(persons_result_list)
# sum and the biggest values from three lists
d = self.sum_lists_rates(persons_result_list, li_persons, li_not_alpha_duplicates)
# capitalize first letter
final_list = self.capital_letter_and_not_empty_str_list(d)
except Exception as e:
print(traceback.format_exc())
final_list = []
return final_list
|
[
"pa-wo97@o2.pl"
] |
pa-wo97@o2.pl
|
77c55d04b0a750c8b0c0dc571cf5927a6d78e179
|
356f3f1b7caf0ccb20cc830d40821dfb2cbda046
|
/sfit/sfit/doctype/items/items.py
|
c1943c13dec9e21c63e99267eb3e87e7de102726
|
[
"MIT"
] |
permissive
|
vignesharumainayagam/sfit
|
f4b75b9a8b2de08d0eaa4eadbcd3d5e432ffba56
|
a96afbf35b0e1635e44cb5f83d7f86c83abedb8f
|
refs/heads/master
| 2021-09-05T18:22:43.494208
| 2018-01-30T07:23:02
| 2018-01-30T07:23:02
| 104,332,803
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 257
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Valiant Systems and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class Items(Document):
pass
|
[
"vigneshwaran@valiantsystems.com"
] |
vigneshwaran@valiantsystems.com
|
9bd93aab5d5388ddaa38608af37c4a2b0e8f8509
|
d46db380847d51cea1966ba514d856f22251019e
|
/app/core/migrations/0001_initial.py
|
0a6c4c2dffc565e27135176e19bd4e4b03b6e1ba
|
[
"MIT"
] |
permissive
|
bilesanmiahmad/recipe-app-api
|
bc3b2004146ed46bbaf427947db63c8215c3230a
|
b292c0212627513bc62eb48cc187bfb6c5fd0aed
|
refs/heads/master
| 2022-12-22T10:08:15.240112
| 2020-10-01T09:48:27
| 2020-10-01T09:48:27
| 294,135,372
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,715
|
py
|
# Generated by Django 3.1.1 on 2020-10-01 08:54
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='CustomUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
|
[
"fbilesanmi@gmail.com"
] |
fbilesanmi@gmail.com
|
75d4242afd465b53edbc623e479a98134dddabf9
|
1735d7a35e9a3bc9b423d6960310b4bb80ca6b42
|
/py3bc/17_functions_03.py
|
deac79e8d38581446c4dc3a10ce91f3d7ee10321
|
[] |
no_license
|
qwertyzhed/python3bootcamp
|
38c0c1f2d354094f90db0fb54c9955c1befe4a56
|
0257ab47155115bf9994e6da77f7daab89d64fc3
|
refs/heads/master
| 2020-03-20T18:39:40.989958
| 2018-06-19T15:54:30
| 2018-06-19T15:54:30
| 137,598,832
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 68
|
py
|
def yell(word):
return print(word.upper() + '!')
yell('fk you')
|
[
"alex@gmail.com"
] |
alex@gmail.com
|
bc16f890677503af70743dd56dce78a66b1d4d0b
|
726c443d00ca8b43cc2a7559c2ae21fbad3bda10
|
/order/migrations/0008_auto_20191208_1242.py
|
a5bd9f20b96975e7f9a87f15c41c817b9ec1ba97
|
[] |
no_license
|
djleeyuanfang/onlineshop
|
3af8ef9e213ccc3a18f5f61ab20e8c1bfbfdf5b0
|
f3d15614f4104475a98b3d387aee6d2121639c12
|
refs/heads/master
| 2020-11-24T13:37:46.709584
| 2020-08-29T11:36:38
| 2020-08-29T11:36:38
| 228,171,654
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,498
|
py
|
# Generated by Django 3.0 on 2019-12-08 12:42
from django.db import migrations, models
import django.db.models.deletion
import order.models
class Migration(migrations.Migration):
dependencies = [
('order', '0007_auto_20191207_1950'),
]
operations = [
migrations.CreateModel(
name='ImageDir',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('direction', models.CharField(max_length=50, unique=True, verbose_name='分类目录')),
],
),
migrations.RemoveField(
model_name='goodimage',
name='img',
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file_name', models.CharField(max_length=32, verbose_name='上传文件名')),
('img', models.ImageField(upload_to=order.models.dre_path)),
('ImageDir', models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='order.ImageDir', verbose_name='目录')),
],
),
migrations.AddField(
model_name='goodimage',
name='image',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='order.Image', verbose_name='图片'),
),
]
|
[
"1025939769@qq.com"
] |
1025939769@qq.com
|
fa09f73820c428ce2463a685f915692ae007b9f1
|
782160673937ccf69c809e1ed3edca6d08bbc171
|
/Chapter5/5-9.py
|
667e06267d49334e52d8fc311668b4d42a1e84dd
|
[] |
no_license
|
ParkEunbin/17.02-Python
|
6549f4fd10fe366f100082dd3834135aef584562
|
8e3831d8a911263ddd7b9f83bb26bcc79e34efb0
|
refs/heads/master
| 2021-06-18T08:03:07.099654
| 2021-01-20T05:38:10
| 2021-01-20T05:38:10
| 158,062,168
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 255
|
py
|
#과일 가격계산
fruit={"apple":1500,"banana":1300,"melon":2000}
for a,b in fruit.items():
print(a,b)
total=0
for c in fruit.keys():
print("%s의 갯수:"%c, end=" ")
num=int(input())
total+=(fruit[c]*num)
print(total,"원")
|
[
"noreply@github.com"
] |
ParkEunbin.noreply@github.com
|
2cd28a19444e56e138b55bd4f48633e1c6bb8a0f
|
085551650c697038bdfaebe4778e9741d3f1431a
|
/dashboard/urls.py
|
a596cb731368ad6c8cb3e4c609ca4d104d86ba18
|
[] |
no_license
|
AlonsoCN/chat-school-project
|
8029f011645c7043c27fd6583532e6dbc4ad063f
|
946b19352406804fd363582be56cd58dc426d149
|
refs/heads/master
| 2020-04-05T23:15:44.616122
| 2016-06-07T18:46:18
| 2016-06-07T18:46:18
| 60,637,883
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 142
|
py
|
from django.conf.urls import patterns, url
urlpatterns = patterns('dashboard.views',
url(r'^$', 'dashboard_view', name='dashboard'),
)
|
[
"luis.alonso.cn@gmail.com"
] |
luis.alonso.cn@gmail.com
|
c42d909697d0db5a72ae51a3c5d635841a1787f8
|
a8fca7b6bc1f0eeaba12b682a81d880dc71cc929
|
/FlaskEndpoint/tests/system/test_home.py
|
38225c4925d80136cac8cbc7e3a04b5a0ac7ca4e
|
[] |
no_license
|
sineczek/Automated-Software-Testing-with-Python
|
cb74d8714ad5b2ec9a6ffc013a400f0181f8095b
|
2e7c4ff4bb5acfd53afb43a4bfa7191eb58a899c
|
refs/heads/main
| 2023-04-14T08:15:53.917614
| 2021-04-24T17:18:23
| 2021-04-24T17:18:23
| 345,342,351
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 454
|
py
|
from tests.system.base_test import BaseTest
import json
class TestHome(BaseTest):
def test_home(self):
with self.app() as c:
resp = c.get('/')
self.assertEqual(
resp.status_code, 200
)
self.assertEqual(
json.loads(resp.get_data()), # loads - ładuje stringa; potem zmienia go na json'a czyli słownik
{'message': 'Hello, world!'}
)
|
[
"michalzaitz@gmail.com"
] |
michalzaitz@gmail.com
|
8f885274db507628a34e8f8f094526a25c935972
|
cc9d1aeb8aefe3d4f86c94b4279a64e70bf5fd80
|
/setup.py
|
be0365371238e8e2c7a86eb0bd4aa3c81f749446
|
[
"MIT"
] |
permissive
|
sdelquin/sendgrify
|
a520a2da7d6c6d7c4707c325f6d67523e53803eb
|
fe8ee1d0efd0c8d8034d1c57cfc07672f77d7e8e
|
refs/heads/main
| 2023-06-11T15:49:27.284693
| 2023-05-28T12:54:34
| 2023-05-28T12:54:34
| 342,843,979
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 650
|
py
|
# read the contents of your README file
from pathlib import Path
from setuptools import setup
this_directory = Path(__file__).parent
long_description = (this_directory / 'README.md').read_text()
REQUIREMENTS = (
'sendgrid==5.3.0',
'markdown',
)
setup(
name='sendgrify',
version='2.0.3',
url='https://github.com/sdelquin/sendgrify.git',
author='Sergio Delgado Quintero',
author_email='sdelquin@gmail.com',
description='SendGrid for Humans',
license='MIT',
packages=['sendgrify'],
install_requires=REQUIREMENTS,
long_description=long_description,
long_description_content_type='text/markdown',
)
|
[
"sdelquin@gmail.com"
] |
sdelquin@gmail.com
|
4080d41a60b85ff5500efacfc8fa63c51b33899f
|
2d1ffb862ec65116f88b0986e4f36d36110cbfe5
|
/app/views.py
|
ced21fb3eae0537fbf78312e2c9f3eb801e59a90
|
[] |
no_license
|
stkc282/wedding
|
c38afc7861119b8cf4490fa35007841d58e161c7
|
1799b72820787a59d0d5b7edf7748b1ab7af9a98
|
refs/heads/master
| 2021-06-18T04:15:20.293547
| 2019-08-19T10:17:13
| 2019-08-19T10:17:13
| 202,826,952
| 0
| 0
| null | 2021-06-10T21:52:12
| 2019-08-17T02:48:38
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,288
|
py
|
# from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse_lazy
from django.views.generic import DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django_filters.views import FilterView
from .filters import ItemFilter
from .forms import ItemForm
from .models import Item
from django.shortcuts import render
# # Create your views here.
# # 検索一覧画面
class ItemFilterView(FilterView):
model = Item
#
# # デフォルトの並び順を新しい順とする
# queryset = Item.objects.all().order_by('-created_at')
#
# # django-filter用設定
# filterset_class = ItemFilter
# strict = False
#
# # 1ページあたりの表示件数
# paginate_by = 10
#
# # 検索条件をセッションに保存する
# def get(self, request, **kwargs):
# if request.GET:
# request.session['query'] = request.GET
# else:
# request.GET = request.GET.copy()
# if 'query' in request.session.keys():
# for key in request.session['query'].keys():
# request.GET[key] = request.session['query'][key]
#
# return super().get(request, **kwargs)
# # 検索一覧画面
# class ItemFilterView(LoginRequiredMixin, FilterView):
# model = Item
#
# # デフォルトの並び順を新しい順とする
# queryset = Item.objects.all().order_by('-created_at')
#
# # django-filter用設定
# filterset_class = ItemFilter
# strict = False
#
# # 1ページあたりの表示件数
# paginate_by = 10
#
# # 検索条件をセッションに保存する
# def get(self, request, **kwargs):
# if request.GET:
# request.session['query'] = request.GET
# else:
# request.GET = request.GET.copy()
# if 'query' in request.session.keys():
# for key in request.session['query'].keys():
# request.GET[key] = request.session['query'][key]
#
# return super().get(request, **kwargs)
#
# 詳細画面
class ItemDetailView( DetailView):
model = Item
# # 詳細画面
# class ItemDetailView(LoginRequiredMixin, DetailView):
# model = Item
# 登録画面
class ItemCreateView(CreateView):
model = Item
form_class = ItemForm
success_url = reverse_lazy('thanks')
# 更新画面
class ItemUpdateView(UpdateView):
model = Item
form_class = ItemForm
success_url = reverse_lazy('index')
# 削除画面
class ItemDeleteView(DeleteView):
model = Item
success_url = reverse_lazy('index')
def invitation(request):
# post = get_object_or_404(Post, pk=pk )
return render(request, 'app/invitation.html', {})
def thanks(request):
return render(request, 'app/thanks.html', {})
def access(request):
return render(request, 'app/access.html', {})
# def create(request):
# if request.method == 'POST':
# form_class = ItemForm(request.POST)
# if form_class.is_valid():
# model = form_class.save(commit=False)
# model.save()
# return redirect('index', pk=form_class.pk)
# else:
# form_class = ItemForm
# return render(request, 'app/thanks.html', {'form': form_class})
|
[
"you@example.com"
] |
you@example.com
|
39d7269798832e93cc7391c6516b8df87b50ca36
|
59c0669a38c4178f2f5cf8f9dca7553849c286a2
|
/MyPro/pythonScript/QRCodeDetect/Invoice/hough_tansform_bad.py
|
437f292bb460649c54b3fb981f99722309b81288
|
[] |
no_license
|
AUGUSTRUSH8/ImageProcess
|
f33ceaabaac67436df47fd1e1f115a8f44a6f556
|
46fc85b61dab52c3876dfacb4dfd22c962dc13bf
|
refs/heads/master
| 2023-04-27T21:39:36.044320
| 2022-07-04T14:59:35
| 2022-07-04T14:59:35
| 174,789,186
| 31
| 17
| null | 2022-07-06T20:07:14
| 2019-03-10T07:01:13
|
Java
|
UTF-8
|
Python
| false
| false
| 4,007
|
py
|
# -*- coding: utf-8 -*-
import cv2
import numpy as np
from matplotlib import pyplot as plt
import math
def rotate_about_center2(src, radian, scale=1.):
#入参:弧度
w = src.shape[1]
h = src.shape[0]
angle = radian * 180 / np.pi
# now calculate new image width and height
nw = (abs(np.sin(radian)*h) + abs(np.cos(radian)*w))*scale
nh = (abs(np.cos(radian)*h) + abs(np.sin(radian)*w))*scale
# ask OpenCV for the rotation matrix
rot_mat = cv2.getRotationMatrix2D((nw*0.5, nh*0.5), angle, scale)
# calculate the move from the old center to the new center combined
# with the rotation
rot_move = np.dot(rot_mat, np.array([(nw-w)*0.5, (nh-h)*0.5,0]))
# the move only affects the translation, so update the translation
# part of the transform
rot_mat[0,2] += rot_move[0]
rot_mat[1,2] += rot_move[1]
return cv2.warpAffine(src, rot_mat, (int(math.ceil(nw)), int(math.ceil(nh))), flags=cv2.INTER_LANCZOS4)
def get_group(arr):
#按照4个弧度区间分组,返回不为空的分组数据
radian_45 = np.pi/4
radian_90 = np.pi/2
radian_135 = radian_45 * 3
radian_180 = np.pi
ret_arr = [[],[],[],[]]
for i in range(len(arr)):
if arr[i] < radian_45:
ret_arr[0].append(arr[i])
elif arr[i] < radian_90:
ret_arr[1].append(arr[i])
elif arr[i] < radian_135:
ret_arr[2].append(arr[i])
else:
ret_arr[3].append(arr[i])
while [] in ret_arr:
ret_arr.remove([])
#print ret_arr
return ret_arr
def get_min_var_avg(arr):
#按照不同弧度区间分组,返回方差最小的一个分组的弧度平均值
group_arr = get_group(arr)
print(group_arr)
cv2.waitKey(0)
var_arr = []
if len(group_arr) <= 1:
var_arr.append(np.var(group_arr[0]))
print(var_arr)
cv2.waitKey(0)
else:
for i in range(len(group_arr)):
var_arr.append(np.var(group_arr[i]))
print(var_arr)
min_var = 10000
min_i = 0
for i in range(len(var_arr)):
if var_arr[i] < min_var:
min_var = var_arr[i]
min_i = i
#print min_var, i
avg = np.mean(group_arr[min_i])
return avg
def get_rotate_radian(radian, reverse = False):
#旋转弧度转换
radian_45 = np.pi/4
radian_90 = np.pi/2
radian_135 = radian_45 * 3
radian_180 = np.pi
ret_radian = 0
if radian < radian_45:
ret_radian = radian
elif radian < radian_90:
ret_radian = radian - radian_90
elif radian < radian_135:
ret_radian = radian - radian_90
else:
ret_radian = radian - radian_180
if reverse:
ret_radian += radian_90
print(ret_radian)
return ret_radian
def rotate():
image = cv2.imread("test3.jpg", 0)
print(image.shape)
#高斯模糊
blur = cv2.GaussianBlur(image,(7,7),0)#自己调整,经验数据
cv2.imshow('image',blur)
cv2.waitKey(0)
#Canny边缘检测
canny = cv2.Canny(blur, 20, 150, 3)
cv2.imshow("canny",canny)
lines = cv2.HoughLines(canny, 1, np.pi/180, 200)#自己调整,经验数据
#求平均弧度
l = len(lines[0])
print(l)
theta_arr = [lines[0][i][1] for i in range(l)]
print(theta_arr)
cv2.waitKey(0)
rotate_theta = get_min_var_avg(theta_arr)
print(rotate_theta)
#print lines
'''for line in lines[0]:
rho = line[0]
theta = line[1]
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
cv2.line(image, (int(x0 - 1000*b), int(y0 + 1000*a)), (int(x0 + 1000*b), int(y0 - 1000*a)), (0,255,0), 2)
#cv2.imshow('image',image)
#cv2.waitKey(0)'''
img2 = rotate_about_center2(image, get_rotate_radian(rotate_theta, image.shape[0] > image.shape[1])) # hight > width
plt.imshow(img2)
plt.show()
if __name__ == '__main__':
rotate()
|
[
"l"
] |
l
|
f879c61a6efd8aa975d90fa1e48070580dd1f2ae
|
8081310a546b0bd93abebbac5066c81b21c38482
|
/utils/helper.py
|
6aaa4e42e5327833afeea950df2eb77bfb12884c
|
[] |
no_license
|
Xiang-Deng-DL/GFKD
|
dd9130169a59216aed63e9fc22baabf758f15add
|
e68e4d6777af526f84ef1efab0a261cb8f5ac968
|
refs/heads/main
| 2023-07-06T18:56:45.224307
| 2021-08-13T17:37:55
| 2021-08-13T17:37:55
| 366,174,979
| 24
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,505
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 13 01:33:09 2020
@author: xiangdeng
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import copy
from model.GIN import GIN_dict
from model.GCN import GCN_dict
from Temp.dataset import GINDataset
from utils.GIN.full_loader import GraphFullDataLoader
from Temp.stru_dataset import STRDataset
from utils.GIN.data_loader import GraphDataLoader, collate
import os
def adjust_learning_rate(epoch, learning_rate, optimizer, model):
"""Sets the learning rate to the initial LR decayed by decay rate every steep step"""
if model=='gin':
step = int(epoch/700)
new_lr = learning_rate * (0.1 ** step)
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
elif model=='gcn':
step = int(epoch/700)
new_lr = learning_rate * (0.1 ** step)
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
def nernulli_sample(params):
strus = []
for parm in params:
pro = torch.sigmoid(parm)
stru = (pro>=0.5).type(torch.int).cuda() #or torch.bernoulli(pro)
strus +=[stru]
return strus
def norm_loss(n, params, feature_paras, targets, criterion, self_loop, degree_as_label, net,
loss_r_feature_layers, bn_reg_scale, batch_size, gpu, onehot, onehot_cof):
strus = nernulli_sample(params)
if onehot:
graphfeatures = []
for fp in feature_paras:
fea = torch.softmax(fp,1)
graphfeatures += [fea]
else:
graphfeatures = feature_paras
dataset = STRDataset(strus, graphfeatures, targets, self_loop, degree_as_label)
train_loader = GraphFullDataLoader(dataset, batch_size=batch_size, device=gpu).train_loader()
for graphs, labels in train_loader:
labels = labels.cuda()
features = graphs.ndata['attr'].cuda()
outputs = net(graphs, features)
loss1 = criterion(outputs, labels)
loss_distr = sum([mod.r_feature for mod in loss_r_feature_layers])
loss = loss1 + bn_reg_scale*loss_distr
#print('start sample second')
for i in range(n-1):
strus = nernulli_sample(params)
dataset = STRDataset(strus, graphfeatures, targets, self_loop, degree_as_label)
train_loader = GraphFullDataLoader(dataset, batch_size=batch_size, device=gpu).train_loader()
for graphs, labels in train_loader:
labels = labels.cuda()
features = graphs.ndata['attr'].cuda()
outputs = net(graphs, features)
loss1 = criterion(outputs, labels)
loss_distr = sum([mod.r_feature for mod in loss_r_feature_layers])
curloss = loss1+bn_reg_scale*loss_distr
loss+=curloss
loss = loss/n
if onehot:
allfeatures = torch.cat(graphfeatures, dim=0)
b = allfeatures * torch.log(allfeatures)
h = -1.0 * b.sum()/len(allfeatures)
loss = loss + onehot_cof*h
return loss, strus
def generate_b(param):
num=len(param)
first =[]
second=[]
noise=[]
for i in range(num):
temparam=param[i]
noise_shape=temparam.shape
u_noise = torch.rand(size=noise_shape).cuda()
P1 = torch.sigmoid(-temparam)
E1 = (u_noise>P1).type(torch.int).cuda()
P2 = 1 - P1
E2 = (u_noise<P2).type(torch.int).cuda()
first+=[E1]
second+=[E2]
noise+=[u_noise]
return first, second, noise
def bernulli_fastgrad(params, feature_paras, targets, criterion_stru, self_loop, degree_as_label, net, batch_size, gpu,
onehot, loss_r_feature_layers, bn_reg_scale):
first, second, noise = generate_b(params)
if onehot:
graphfeatures = []
for fp in feature_paras:
fea = torch.softmax(fp,1)
graphfeatures += [fea]
else:
graphfeatures = feature_paras
grads = []
dataset1 = STRDataset(first, graphfeatures, targets, self_loop, degree_as_label)
train_loader1 = GraphFullDataLoader(dataset1, batch_size=batch_size, device=gpu).train_loader()
for graphs1, labels1 in train_loader1:
labels1 = labels1.cuda()
features1 = graphs1.ndata['attr'].cuda()
outputs1 = net(graphs1, features1)
loss_ce1 = criterion_stru(outputs1, labels1)
loss_distr1 = sum([mod.r_feature for mod in loss_r_feature_layers])*bn_reg_scale
dataset2 = STRDataset(second, graphfeatures, targets, self_loop, degree_as_label)
train_loader2 = GraphFullDataLoader(dataset2, batch_size=batch_size, device=gpu).train_loader()
for graphs2, labels2 in train_loader2:
labels2 = labels2.cuda()
features2 = graphs2.ndata['attr'].cuda()
outputs2 = net(graphs2, features2)
loss_ce2 = criterion_stru(outputs2, labels2)
loss_distr2 = sum([mod.r_feature for mod in loss_r_feature_layers])*bn_reg_scale
for i in range( len(noise) ):
grad = (loss_ce1[i]-loss_ce2[i] + loss_distr1-loss_distr2 )*(noise[i] - 0.5)
grads+=[grad]
return grads
def task_data(args):
# step 0: setting for gpu
if args.gpu >= 0:
torch.cuda.set_device(args.gpu)
# step 1: prepare dataset
dataset = GINDataset(args.dataset, args.self_loop, args.degree_as_label)
print(dataset.dim_nfeats)
# step 2: prepare data_loader
_, valid_loader = GraphDataLoader(
dataset, batch_size=32, device=args.gpu,
collate_fn=collate, seed=args.dataseed, shuffle=True,
split_name=args.split_name).train_valid_loader()
return dataset, valid_loader
def task_model(args, dataset):
# step 1: prepare model
assert args.tmodel in ['GIN', 'GCN']
assert args.smodel in ['GIN', 'GCN']
if args.tmodel == 'GIN':
modelt = GIN_dict[args.modelt](dataset)
elif args.tmodel == 'GCN':
modelt = GCN_dict[args.modelt](dataset)
else:
raise('Not supporting such model!')
if args.smodel == 'GIN':
models = GIN_dict[args.models](dataset)
elif args.smodel == 'GCN':
models = GCN_dict[args.models](dataset)
else:
raise('Not supporting such model!')
modelt = modelt.cuda()
models = models.cuda()
return modelt, models
def evaluate(model, dataloader, loss_fcn):
model.eval()
total = 0
total_loss = 0
total_correct = 0
with torch.no_grad():
for data in dataloader:
graphs, labels = data
feat = graphs.ndata['attr'].cuda()
labels = labels.cuda()
total += len(labels)
outputs = model(graphs, feat)
_, predicted = torch.max(outputs.data, 1)
total_correct += (predicted == labels.data).sum().item()
loss = loss_fcn(outputs, labels)
total_loss += loss * len(labels)
loss, acc = 1.0 * total_loss / total, 1.0 * total_correct / total
return loss, acc
def generate_graphs(sturectures, features, targets, path, daseed, trial, modelname, bat_num, total_num):
graph_num = len(sturectures)
filep = path+modelname+'fake_mutag'+ str(daseed)+'_'+str(trial)+ '.txt'
if bat_num ==0:
open(filep, 'w').close()
with open(filep,'a') as f:
if bat_num==0:
tnum = str(total_num)
f.write(tnum)
f.write('\n')
for i in range(graph_num):
# node num and label
feas = features[i]
feas = torch.argmax(feas, 1)
feas = feas.to('cpu').numpy()
stru = sturectures[i]
node_number, label = stru.shape[0], targets[i]
label = str(label)
content = str(node_number)+' '+label
#content = content.replace('/n', ' ')
f.write(content)
f.write('\n')
#
for j in range(node_number):
cur_row = stru[j]
neig = ((cur_row == 1).nonzero())
neig = neig[neig!=j]
num = len(neig)
neig = neig.to('cpu').numpy()
'''if num>7:
neig = list(neig)
num = 7
neig = np.array(random.sample(neig, 7))'''
if num>0:
neig=str(neig)[1:-1]
else:
neig = str(neig)
#node_label = random.sample(range(0, 7), 1)[0]
node_label = feas[j]
node_inf = str(node_label)+' '+str(num)+' '+neig
node_inf = node_inf.replace('\n', ' ').replace('\r', ' ')
f.write(node_inf)
f.write('\n')
def create_folder(directory):
# from https://stackoverflow.com/a/273227
if not os.path.exists(directory):
os.makedirs(directory)
|
[
"noreply@github.com"
] |
Xiang-Deng-DL.noreply@github.com
|
0660e025dd554b5703ef032f89a0902e86d1a771
|
7dad4550554888a865334df8023970378e17ae56
|
/Longitud.py
|
e1fbc88390d8150834b146b0e08cdf5e05066f06
|
[] |
no_license
|
JRLV14/Pensamiento_Computacional
|
8ba79875bfed8d67e76e3d24847c55d61f47e149
|
e2af455f1f7ae8922c414617c3b75ada40c7bc4f
|
refs/heads/master
| 2023-06-09T22:52:54.986837
| 2021-07-02T17:43:09
| 2021-07-02T17:43:09
| 381,734,397
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 240
|
py
|
def run():
Nombre = input("Escribe tu nombre: ")
Nombre = Nombre.replace (" ", "")
letras = int(len (Nombre))
print ("Hola " + Nombre + " Tu nombre tiene " + str(letras) + " letras")
if __name__ == '__main__':
run()
|
[
"jrlv4070@gmail.com"
] |
jrlv4070@gmail.com
|
0085fd62735222af905666b50be03358a0c3f7ec
|
1b52887970b2ed95e73b950862a050b58fa7269d
|
/network/core_net.py
|
c17213c039d4648e74e04ed41b5518b5566d0c86
|
[] |
no_license
|
FreescaleFlyaway/lizard
|
1516ff009f08a742ad835134f4278202a9714355
|
3db9d49cb45ff13f295f77fa592467cf793611c9
|
refs/heads/master
| 2020-05-19T10:12:13.315447
| 2018-03-26T08:36:43
| 2018-03-26T08:36:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,906
|
py
|
# $File: core_net.py
# $Author: Harvey Chang
import tensorflow as tf
import numpy as np
def actor_net(obs_ph, act_dim, suppress_ratio=1.0):
with tf.variable_scope('actor'):
obs_dim = obs_ph.shape.as_list()[-1] # the last dim of shape
hid1_size = obs_dim * 10
hid3_size = act_dim * 10
hid2_size = int(np.sqrt(hid1_size * hid3_size))
# hidden net:
out = tf.layers.dense(obs_ph, hid1_size, tf.tanh,
kernel_initializer=tf.random_normal_initializer(
stddev=suppress_ratio*np.sqrt(1/obs_dim)), name="h1")
out = tf.layers.dense(out, hid2_size, tf.tanh,
kernel_initializer=tf.random_normal_initializer(
stddev=suppress_ratio*np.sqrt(1/hid1_size)), name="h2")
out = tf.layers.dense(out, hid3_size, tf.tanh,
kernel_initializer=tf.random_normal_initializer(
stddev=suppress_ratio*np.sqrt(1/hid2_size)), name="h3")
means = tf.layers.dense(out, act_dim, tf.tanh, kernel_initializer=tf.random_normal_initializer(
stddev=suppress_ratio*np.sqrt(1 / hid3_size)), name='means')
# variance:
log_vars = tf.get_variable('logvars', [act_dim], tf.float32,
tf.random_normal_initializer(mean=-2, stddev=1.0/act_dim))
sigma_init = tf.variables_initializer([log_vars], 'sigma_initializer')
sigma = tf.exp(log_vars)
return means, sigma, sigma_init
def critic_net(obs_ph, suppress_ratio=1.0):
with tf.variable_scope('critic'):
obs_dim = obs_ph.shape.as_list()[-1]
hid1_size = obs_dim * 10
hid3_size = 10
hid2_size = int(np.sqrt(hid1_size * hid3_size))
out = tf.layers.dense(obs_ph, hid1_size, tf.tanh,
kernel_initializer=tf.random_normal_initializer(
stddev=suppress_ratio * np.sqrt(1 / obs_dim)), name="h1")
out = tf.layers.dense(out, hid2_size, tf.tanh,
kernel_initializer=tf.random_normal_initializer(
stddev=suppress_ratio * np.sqrt(1 / hid1_size)), name="h2")
out = tf.layers.dense(out, hid3_size, tf.tanh,
kernel_initializer=tf.random_normal_initializer(
stddev=suppress_ratio * np.sqrt(1 / hid2_size)), name="h3")
out = tf.layers.dense(out, 1,
kernel_initializer=tf.random_normal_initializer(
stddev=suppress_ratio * np.sqrt(1 / hid3_size)), name='output')
out = tf.squeeze(out)
return out
def activate_net(obs_ph):
with tf.variable_scope('activate'):
obs_dim = obs_ph.shape.as_list()[-1]
hid1_size = obs_dim * 10
hid3_size = 10
hid2_size = int(np.sqrt(hid1_size * hid3_size))
out = tf.layers.dense(obs_ph, hid1_size, tf.tanh,
kernel_initializer=tf.random_normal_initializer(
stddev=np.sqrt(1 / obs_dim)), name="h1")
out = tf.layers.dense(out, hid2_size, tf.tanh,
kernel_initializer=tf.random_normal_initializer(
stddev=np.sqrt(1 / hid1_size)), name="h2")
out = tf.layers.dense(out, hid3_size, tf.tanh,
kernel_initializer=tf.random_normal_initializer(
stddev=np.sqrt(1 / hid2_size)), name="h3")
out = tf.layers.dense(out, 1,
kernel_initializer=tf.random_normal_initializer(
stddev=np.sqrt(1 / hid3_size)), name='output')
out = tf.squeeze(out)
return out
|
[
"zhengwxthu@163.com"
] |
zhengwxthu@163.com
|
14a4fe4b25d073188b3eb9b8c1910bf1d861c999
|
97e06c68fe4ddb3a93ab665b6c8ae3d835eb484b
|
/catch/baidubaike_catch_data.py
|
64196fb01683723e6f1448a6e0a58b0462a3d1d7
|
[] |
no_license
|
baby-H/MachineStudy
|
2b4545ff9af00f9121210fc94469db1f60ad259a
|
4bfb3f9cc13ebbfbf6652d94697d87fd12b47179
|
refs/heads/master
| 2020-04-28T03:25:04.244351
| 2019-05-25T07:44:05
| 2019-05-25T07:44:05
| 174,936,531
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,519
|
py
|
# -*- coding: utf-8 -*-
# Author: Hu Ying Jie ( huyingjie2123@163.com )
from bs4 import BeautifulSoup
import urllib
from urllib import request
import re
import os
def get_html(url):
return urllib.request.urlopen(url, timeout=20).read().decode('utf-8')
temp_str = ''
n = 0
f = open(r'test.txt', 'r', encoding='utf8')
fp = open(r'word_0_out.txt', 'w+', encoding='utf8')
for line in f:
if len(temp_str) > 400000:
fp.close()
path = os.path.join('word_' + str(++n) + '_out.txt')
fp = open(path, 'w+', encoding='utf8')
fp.write(line)
line = line[:-1]
print(line)
try:
url0 = "https://baike.baidu.com/item/"
url = url0 + urllib.parse.quote(str(line))
html = get_html(url)
soup = BeautifulSoup(html, 'html.parser')
if str(soup.title) == '<title>百度百科——全球最大中文百科全书</title>':
print('404')
continue
for text in soup.find_all('div', class_="para"):
for div_tag in text.find_all('div', class_="description"):
div_tag.decompose()
if text.span:
text.span.decompose()
new_str = "".join(text.get_text().split())
new_str = re.sub(r'\[[\d]*\]', '', new_str)
new_str = re.sub(r'\[[\d]*-[\d]\]', '', new_str)
temp_str = temp_str + new_str
fp.write(new_str)
print()
fp.write(u"\n")
except:
print('error')
continue
fp.close()
f.close()
|
[
"huyj@tongc-soft.com"
] |
huyj@tongc-soft.com
|
e69735542275999d2049a87b2ac118f4185c1903
|
5abdea0be9021f13909c38b09a68bde2d153b210
|
/src/imbalance_strategies.py
|
da7a33632423a1ac5fad4d93f683776a55ae6493
|
[] |
no_license
|
NWPU-IST/hbrPredictor
|
cdcccf0f900d6135f8bab355a71f9b8bc2f2c981
|
a67ca29f7191f816e8bc388449059984e1d86e81
|
refs/heads/master
| 2023-03-19T04:50:15.458017
| 2021-03-09T15:15:44
| 2021-03-09T15:15:44
| 326,311,570
| 9
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,948
|
py
|
# -*- coding: utf-8 -*-
__author__ = 'Junzheng Chen'
import random
import numpy as np
from sklearn.neighbors import NearestNeighbors
from imblearn.over_sampling import SMOTE
from imblearn.over_sampling import SMOTENC
from imblearn.over_sampling import ADASYN
from imblearn.over_sampling import BorderlineSMOTE
from imblearn.under_sampling import RandomUnderSampler
from imblearn.under_sampling import NearMiss
from imblearn.under_sampling import EditedNearestNeighbours
from imblearn.under_sampling import CondensedNearestNeighbour
from imblearn.combine import SMOTEENN
from imblearn.combine import SMOTETomek
class Smote:
def __init__(self, samples, N=10, k=5):
self.n_samples, self.n_attrs = samples.shape
self.N = N
self.k = k
self.samples = samples
self.newindex = 0
# self.synthetic=np.zeros((self.n_samples*N,self.n_attrs))
def over_sampling(self):
N = self.N
print(self.n_attrs)
self.synthetic = np.zeros((self.n_samples * N, self.n_attrs))
neighbors = NearestNeighbors(n_neighbors=self.k).fit(self.samples)
print('neighbors', neighbors)
for i in range(len(self.samples)):
nnarray = neighbors.kneighbors(self.samples[i].reshape(1, -1), return_distance=False)[0]
self._populate(N, i, nnarray)
return self.synthetic
# for each minority class samples,choose N of the k nearest neighbors and generate N synthetic samples.
def _populate(self, N, i, nnarray):
for j in range(N):
nn = random.randint(0, self.k - 1)
dif = self.samples[nnarray[nn]] - self.samples[i]
gap = random.random()
self.synthetic[self.newindex] = self.samples[i] + gap * dif
self.newindex += 1
def get_smote_result(data_list, label, N):
length = len(data_list)
postive_data = []
for i in range(0, length):
if label[i] == 1:
postive_data.append(data_list[i])
data_array = np.array(postive_data)
smoke = Smote(data_array, N, 5)
return smoke.over_sampling()
# Combination of over-and under-sampling methods
def get_cbs_smoteenn(data_list, label):
smo = SMOTEENN(random_state=42)
X_smo, y_smo = smo.fit_resample(data_list, label)
return X_smo, y_smo
def get_cbs_smotetomek(data_list, label):
smo = SMOTETomek(random_state=42)
X_smo, y_smo = smo.fit_resample(data_list, label)
return X_smo, y_smo
# Under sampling
def get_uds_rdm(data_list, label):
rdm = RandomUnderSampler()
X_rdm, y_rdm = rdm.fit_resample(data_list, label)
return X_rdm, y_rdm
def get_uds_nm(data_list, label):
nm = NearMiss()
X_nm, y_nm = nm.fit_resample(data_list, label)
return X_nm, y_nm
def get_uds_enn(data_list, label):
enn = EditedNearestNeighbours()
X_res, y_res = enn.fit_resample(data_list, label)
def get_uds_CNN(data_list, label):
cnn = CondensedNearestNeighbour(random_state=42)
X_res, y_res = cnn.fit_resample(data_list, label)
# Over sampling
def get_ovs_smote_standard(data_list, label):
smo = SMOTE(random_state=42)
X_smo, y_smo = smo.fit_sample(data_list, label)
return X_smo, y_smo
def get_ovs_adasyn(data_list, label):
smo = ADASYN(random_state=42)
X_smo, y_smo = smo.fit_resample(data_list, label)
return X_smo, y_smo
def get_ovs_smotenc(data_list, label):
smo = SMOTENC(random_state=42, categorical_features=[18, 19])
X_smo, y_smo = smo.fit_resample(data_list, label)
return X_smo, y_smo
def get_ovs_BorderlineSMOTE(data_list, label):
bd_smt = BorderlineSMOTE()
X_smo, y_smo = bd_smt.fit_resample(data_list, label)
return X_smo, y_smo
def get_ovs_smote_borderline_1(clf, data, label, m, s, k=5):
label_local = label[:]
clf.fit(data, label_local)
data_list = data.tolist()
data_list = data_list[:]
length = len(data_list)
T = np.array(data_list)
n_samples, n_attrs = T.shape
# get p list
P = []
for i in range(0, length):
if label_local[i] == 1:
P.append(i)
n_samples = len(P)
# calc m for all the positive sample
neighbors = NearestNeighbors(n_neighbors=k).fit(T)
synthetic = np.zeros((n_samples * m, n_attrs))
newindex = 0
for i in range(len(P)):
nnarray = neighbors.kneighbors(T[P[i]].reshape(1, -1), return_distance=False)[0]
for j in range(m):
nn = random.randint(0, k - 1)
dif = T[nnarray[nn]] - T[P[i]]
gap = random.random()
synthetic[newindex] = T[P[i]] + gap * dif
newindex += 1
pred = []
danger = []
noise = []
for i in range(0, n_samples * m):
pred.append(clf.predict(synthetic[i].reshape(1, -1)))
for i in range(0, len(pred)):
if i % 5 != 0:
continue
count = 0
for j in range(0, 5):
if i + j >= len(pred) - 1:
continue
if pred[i + j] == 0:
count += 1
if count == 5:
noise.append(P[int(i / 5)])
elif count > 2:
danger.append(P[int(i / 5)])
n_samples_danger = len(danger)
# calc m for all the positive sample
danger_list = []
for i in danger:
danger_list.append(T[i])
if not danger_list:
result = []
result.append(data_list)
result.append(label)
return result
neighbors = NearestNeighbors(n_neighbors=k).fit(danger_list)
synthetic_danger = np.zeros((n_samples_danger * s, n_attrs), dtype=float)
newindex_danger = 0
for i in range(len(danger)):
if 5 > len(danger):
result = []
result.append(data_list)
result.append(label)
return result
nnarray = neighbors.kneighbors(T[danger[i]].reshape(1, -1), return_distance=False)[0]
for j in range(m):
nn = random.randint(0, k - 1)
dif = T[nnarray[nn]] - T[danger[i]]
gap = random.random()
synthetic_danger[newindex_danger] = T[danger[i]] + gap * dif
newindex_danger += 1
synthetic_danger_list = synthetic_danger.tolist()
noise.reverse()
# 删除noise
for i in range(0,len(noise)):
del data_list[noise[i]]
del label_local[noise[i]]
# 添加正项
random_list = []
for i in range(0, len(synthetic_danger_list)):
random_list.append(int(random.random() * len(data_list)))
for i in range(0, len(random_list)):
data_list.insert(random_list[i], synthetic_danger_list[i])
label_local.insert(random_list[i], 1)
result = []
result.append(data_list)
result.append(label_local)
return result
|
[
"noreply@github.com"
] |
NWPU-IST.noreply@github.com
|
342d4a56a9680e0518979d48af56e27109bc1403
|
56c89d49b0b5fd567783f056637a312d81b187bd
|
/lab 3/a5.py
|
10240b377d5d4806ada151f46c5f9b54e8e4067c
|
[] |
no_license
|
vedeninvv/Algorithms
|
acdfb16721437a81d8b0c5abd5b5185abf45254d
|
8e4e0bbc9ebf872f44ebbe709b6045f453e42aee
|
refs/heads/master
| 2022-12-19T06:08:35.259425
| 2020-09-28T18:31:35
| 2020-09-28T18:31:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 539
|
py
|
fin = open("radixsort.in")
fout = open("radixsort.out", "w")
input = fin.readline().split()
n = int(input[0])
m = int(input[1])
k = int(input[2])
strings = []
for line in fin:
strings.append(line[0: len(line) - 1])
i = min(k, m)
while i > 0:
buf = [[] for j in range(57)]
for j in range(n):
buf[ord(strings[j][m - 1]) - 65].append(strings[j])
kol = 0
for j in range(57):
for el in buf[j]:
strings[kol] = el
kol += 1
i -= 1
m -= 1
for i in strings:
print(i, file=fout)
|
[
"vedeninvv@mail.ru"
] |
vedeninvv@mail.ru
|
2bd8d6cbcaa1f087d3413725ed8af20316077c61
|
a20db420b58321756676ddf41a2833f0283c6f66
|
/src/Chrysalis.py
|
9b2b01b931d3063fbaa23bb87d37f3ec12972c9c
|
[] |
no_license
|
CPSibo/Chrysalis
|
5a3194cfb0be8c24543ffb51dd52643afea9c2b1
|
a2cfaaf4aeb4ad7adb48f1229ba291a9af6dc263
|
refs/heads/master
| 2020-04-15T07:54:00.249188
| 2019-10-01T01:12:06
| 2019-10-01T01:12:06
| 164,506,527
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,298
|
py
|
# region Imports
import os
import subprocess
import pathlib
import sys
import argparse
import json
from dotenv import load_dotenv
from Utilities.Arguments import args
from Utilities.Logger import Logger
from Subscription import Subscription
from Repositories import REPOSITORIES
from Destinations import DESTINATIONS
# endregion
class Chrysalis:
"""
The entry point for Chrysalis.
Attributes:
subscriptions (dict): Decoded subscription settings.
"""
# region Attributes
subscriptions = []
# endregion
# region Constructors
def __init__(self):
load_dotenv()
self.load_subscriptions()
# endregion
# region Functions
def load_subscriptions(self):
"""
Reads in subscriptions.json and decodes all the settings
into Subscription objects.
"""
with open('src/subscriptions.json', 'r') as myfile:
subscription_encoded=myfile.read()
subscriptions_decoded = json.loads(subscription_encoded)
self.subscriptions = []
for sub in subscriptions_decoded:
self.subscriptions.append(Subscription(dict_config = sub))
def process_subscription(self, subscription: Subscription):
"""
Runs youtube-dl and the post-processing for the given subscription.
Parameters:
subscription (Subscription): The subscription to process.
"""
if not subscription.enabled:
return
Logger.log(r'Chrysalis', r'Processing "{}"...'.format(subscription.name))
self.setup_staging_directory(subscription)
if subscription.logging and subscription.logging.path:
pathlib.Path(subscription.logging.path).parent.mkdir(parents=True, exist_ok=True)
command = self.construct_command(subscription)
subprocess.run(command, shell=True)
def setup_staging_directory(self, subscription: Subscription) -> str:
"""
Constructs and creates the staging directory for the given subscription.
Parameters:
subscription (Subscription): The subscription to process.
Returns:
str: The path to the staging directory.
"""
pathlib.Path(subscription.staging_directory).mkdir(parents=True, exist_ok=True)
return subscription.staging_directory
def construct_command(self, subscription: Subscription) -> str:
"""
Builds the youtube-dl command for the given subscription.
Args:
subscription (Subscription): The subscription to process.
Returns:
str: The youtube-dl command with all desired arguments.
"""
command = r'youtube-dl'
# Add the youtube-dl config path.
if subscription.youtubedl_config.config:
config_path = os.path.join(os.getenv('youtubedl_config_directory'), subscription.youtubedl_config.config)
command += r' --config-location "{}"'.format(config_path)
# Add the metadata-from-title pattern.
if subscription.youtubedl_config.metadata_format:
command += r' --metadata-from-title "{}"'.format(subscription.youtubedl_config.metadata_format)
# Add the output pattern.
if subscription.youtubedl_config.output_format:
output_format = subscription.staging_directory + '/staging_area/' + subscription.youtubedl_config.output_format
command += r' -o "{}"'.format(output_format)
# Add the path to the video ID archive.
if subscription.youtubedl_config.archive:
archive_path = os.path.join(subscription.staging_directory, subscription.youtubedl_config.archive)
command += r' --download-archive "{}"'.format(archive_path)
# Add any extra arguments this sub has.
if subscription.youtubedl_config.extra_commands:
command += " " + subscription.youtubedl_config.extra_commands
# Add the subscription URL.
command += r' "{}"'.format(subscription.url)
# Construct the post-processing call back into
# Chrysalis to be run after each successful download.
if subscription.post_processing:
command += ' --exec \'"{}" "{}" --postprocess {{}} --subscription "{}"\''.format(
sys.executable,
__file__,
subscription.name
)
# Construct the stdout redirect to the log file.
if subscription.logging.path:
command += r' {} "{}"'.format(
'>>' if subscription.logging.append == True else '>',
subscription.logging.path
)
Logger.log(r'Chrysalis', r'Command to be run: [{}]'.format(command))
return command
def postprocess(self, file: str, subscription: Subscription) -> str:
"""
Runs the post-processing for the given youtube-dl output file.
Args:
file (str): Absolute path to the youtube-dl output file.
subscription (Subscription): The settings to process the file under.
Returns:
str: The absolute path to the folder where all the files were moved.
"""
from PostProcessor import PostProcessor
Logger.log(r'Crysalis', r'Starting PostProcessor for {}'.format(file), 1)
postprocessor = PostProcessor(
file = file,
settings = subscription
)
postprocessor.run()
Logger.tabs -= 1
def run(self) -> int:
"""
Entry point for the Chrysalis process.
Returns:
int: Status code.
"""
if args.postprocess is not None:
subs = [item for item in self.subscriptions if item.name == args.subscription]
subscription = subs[0] if subs else None
if not subscription:
return -1
self.postprocess(args.postprocess, subscription)
else:
for subscription in self.subscriptions:
self.process_subscription(subscription)
# endregion
Chrysalis().run()
|
[
"cpsibo@gmail.com"
] |
cpsibo@gmail.com
|
bb6e52fee441903389167e2b4292125b69cdb8b8
|
ce3964c7195de67e07818b08a43286f7ec9fec3e
|
/dl_poly/get_pvt.py
|
6fd5f7613ff6286470a47abe111c368b60d57ff7
|
[] |
no_license
|
zhuligs/physics
|
82b601c856f12817c0cfedb17394b7b6ce6b843c
|
7cbac1be7904612fd65b66b34edef453aac77973
|
refs/heads/master
| 2021-05-28T07:39:19.822692
| 2013-06-05T04:53:08
| 2013-06-05T04:53:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,852
|
py
|
#!/usr/bin/env python
# Try retrieving P,V,T, etc. from the STATIS file, may be easier than from OUTPUT...
import os, sys, commands
def readlines(FILE,n):
'''Read n lines from FILE'''
for i in range(n):
FILE.readline()
try:
s = open('STATIS','r')
header1 = s.readline()
header2 = s.readline()
c = open('CONTROL','r')
lines = c.readlines()
for line in lines:
if len(line.split()) == 2:
var, value = line.split()
if var == 'steps':
steps = int(value)
elif var == 'stats':
stats = int(value)
c.close()
except:
print 'Could not open STATIS and CONTROL files successfully--stopping'
sys.exit(0)
# Total energy is row 1 value 1
# Temp is row 1, value 2
# Pres is row 6, value 2
# Vol is row 4, value 4
nblocks = int(steps)/int(stats)
out = open('pvt.dat','w')
out.write('# --Data extracted from STATIS file--\n')
out.write('#tstep\tpres (GPa)\tvol (ang^3)\ttemp (K)\tetot (eV)\t\tpot (eV)\n')
for i in range(nblocks):
tstep, t, elements = s.readline().split()
row1 = s.readline().split()
Etot = str( float(row1[0]) * 1.036426865E-4 ) # convert unit to eV
T = row1[1]
s.readline()
s.readline()
V = s.readline().split()[3]
s.readline()
P = str( float(s.readline().split()[1]) * 0.016605402 ) # convert atm unit to GPa
# Every line has 5 values, each line read is 5 elements gone
leftover = int(elements) - 5*6
if leftover % 5 == 0:
extra_lines = leftover/5
else:
extra_lines = leftover/5 + 1
readlines(s,extra_lines)
# Calculate Etot - 3*k_b*T
k_b = 8.617343E-5 # Boltzmann's const in eV/K
pot = str( float(Etot) - 3*k_b*float(T) )
out.write(tstep+'\t'+P+' \t'+V+'\t'+T+'\t'+Etot+'\t'+pot+'\n')
s.close()
out.close()
|
[
"boates@gmail.com"
] |
boates@gmail.com
|
bc643726dc086d01106b5695c1317266b5900390
|
f269b417034e397139adf2802514165b0eb26f7c
|
/Python/food_choice_assay/food_choice.py
|
cea19764b5e3f648af452c4bde1d79a5a3d5567d
|
[] |
no_license
|
saulmoore1/PhD_Project
|
2d333f7fdbd8b2b1932007e7cc6e05b3108ed325
|
a235bf8700e4b5a311fc1dfd79c474c5467e9c7a
|
refs/heads/master
| 2023-08-03T17:51:17.262188
| 2023-07-26T12:35:37
| 2023-07-26T12:35:37
| 158,314,469
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 27,255
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
SCRIPT: FOOD CHOICE
A script written to analyse the food choice assay videos and Tierpsy-generated
feature summary data. It calculates, plots and saves results for worm food preference
(for each video separately).
@author: sm5911
@date: 21/03/2019
"""
#%% Imports
import os, time
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib import patches as mpatches
# Custom imports
from time_series.time_series_helper import plot_timeseries_phenix
from visualisation.plotting_helper import hexcolours, plot_pie
from food_choice_assay.food_choice_helper import foodchoice, summarystats
from _deprecated.find import change_path_phenix
#%% Globals
PROJECT_ROOT_DIR = '/Volumes/behavgenom$/Saul/FoodChoiceAssay/' # Project working directory
DATA_DIR = PROJECT_ROOT_DIR.replace('Saul', 'Priota/Data') # Location of features files
# Plot parameters
fps = 25 # frames per second
smooth_window = fps*60*2 # 2-minute moving average window for time-series plot smoothing
OpticalDensity600 = 1.8 # E. coli average OD600
NEW = True # Conduct analysis on new videos only?
#%% Preamble
# Read metadata
fullMetaData = pd.read_csv(os.path.join(PROJECT_ROOT_DIR, "fullmetadata.csv"), header=0, index_col=0)
if NEW:
fullMetaData = fullMetaData[fullMetaData['worm number']==10]
n_files = len(fullMetaData['filename'])
if NEW:
print("%d NEW video file entries found in metadata." % n_files)
else:
print("%d video file entries found in metadata." % n_files)
# Extract assay information
pretreatments = list(np.unique(fullMetaData['Prefed_on']))
assaychoices = list(np.unique(fullMetaData['Food_Combination']))
treatments = list(np.unique([assay.split('/') for assay in assaychoices]))
treatments.insert(len(treatments),"None") # treatments = [OP50, HB101, None]
concentrations = list(np.unique(fullMetaData['Food_Conc']))
# Plot parameters
colours = hexcolours(len(treatments)) # Create a dictionary of colours for each treatment (for plotting)
colour_dict = {key: value for (key, value) in zip(treatments, colours)}
#%% CALCULATE MEAN NUMBER OF WORMS ON/OFF FOOD IN EACH FRAME (FOR EACH VIDEO SEPARATELY)
# - PROPORTION of total worms in each frame
errorlog = 'ErrorLog_FoodChoice.txt'
FAIL = []
tic = time.time()
for i, maskedfilepath in enumerate(fullMetaData['filename']):
toc = time.time()
# Extract file information
file_info = fullMetaData.iloc[i,:]
date = file_info['date(YEARMODA)']
conc = file_info['Food_Conc']
assaychoice = file_info['Food_Combination']
prefed = file_info['Prefed_on']
print("\nProcessing file: %d/%d\n%s\nAssay: %s\nConc: %.3f\nPrefed: %s" % (i + 1,\
len(fullMetaData['filename']), maskedfilepath, assaychoice, conc, prefed))
try:
# Specify file paths
onfoodpath = change_path_phenix(maskedfilepath, returnpath='onfood')
foodchoicepath = change_path_phenix(maskedfilepath, returnpath='foodchoice')
# Read on/off food results
onfood_df = pd.read_csv(onfoodpath, header=0, index_col=0)
# Calculate mean + count number of worms on/off food in each frame
# NB: Store proportions, along with total nworms, ie. mean (worms per frame) and later calculate mean (per frame across videos)
choice_df = foodchoice(onfood_df, mean=True, tellme=True)
# Save food choice results
directory = os.path.dirname(foodchoicepath)
if not os.path.exists(directory):
os.makedirs(directory)
choice_df.to_csv(foodchoicepath)
print("Food choice results saved. \n(Time taken: %d seconds)\n" % (time.time() - toc))
except:
FAIL.append(maskedfilepath)
print("ERROR! Failed to calculate food preference in file:\n %s\n" % maskedfilepath)
print("Complete!\n(Total time taken: %d seconds.)\n" % (time.time() - tic))
# If errors, save error log to file
if FAIL:
fid = open(os.path.join(PROJECT_ROOT_DIR, errorlog), 'w')
print(FAIL, file=fid)
fid.close()
#%% FOOD CHOICE SUMMARY STATS + PIE/BOX PLOTS (FOR EACH VIDEO SEPARATELY)
# - Calculate summary statistics for mean proportion worms feeding in each video
# - Plot and save box plots + pie charts of mean proportion of worms on food
# =============================================================================
# # NB: Cannot pre-allocate full results dataframe to store food choice mean
# # proportion feeding per frame across all videos due to file size = 23GB
# colnames = ['filename','worm_number','Food_Conc','Food_Combination','Prefed_on',\
# 'Acclim_time_s','frame_number','Food','Mean']
# results_df = pd.DataFrame(columns=colnames)
# =============================================================================
tic = time.time()
for i, maskedfilepath in enumerate(fullMetaData['filename']):
# Extract file information
file_info = fullMetaData.iloc[i,:]
date = file_info['date(YEARMODA)']
conc = file_info['Food_Conc']
assaychoice = file_info['Food_Combination']
prefed = file_info['Prefed_on']
print("\nProcessing file: %d/%d\n%s\nAssay: %s\nConc: %.3f\nPrefed: %s" % (i + 1,\
len(fullMetaData['filename']), maskedfilepath, assaychoice, conc, prefed))
# Specify file paths
foodchoicepath = change_path_phenix(maskedfilepath, returnpath='foodchoice')
statspath = change_path_phenix(maskedfilepath, returnpath='summary')
pieplotpath = change_path_phenix(maskedfilepath, returnpath='plots', figname='PiePlot.eps')
boxplotpath = change_path_phenix(maskedfilepath, returnpath='plots', figname='BoxPlot.eps')
try:
# READ FOOD CHOICE RESULTS (csv)
choice_df = pd.read_csv(foodchoicepath, header=0, index_col=0)
# SUMMARY STATISTICS
feeding_stats = summarystats(choice_df)
# Save summary stats
feeding_stats.to_csv(statspath) # Save to CSV
# Define plot labels + colours
colnames = list(choice_df.columns)
labels = [lab.split('_')[0] for lab in colnames]
colours = [colour_dict[treatment] for treatment in labels]
# Specify seaborn colour palette
RGBAcolours = sns.color_palette(colours)
palette = {key: val for key, val in zip(colnames, RGBAcolours)}
# sns.palplot(sns.color_palette(values))
# PIE CHARTS - mean proportion on food
df_pie = feeding_stats.loc['mean']
df_pie.index = df_pie.index.get_level_values(0)
df_pie = df_pie.loc[df_pie!=0] # Remove any empty rows
plt.close("all")
fig = plot_pie(df_pie, rm_empty=False, show=True, labels=df_pie.index,\
colors=colours, textprops={'fontsize': 15}, startangle=90,\
wedgeprops={'edgecolor': 'k', 'linewidth': 1,\
'linestyle': 'solid', 'antialiased': True})
# Save pie charts
directory = os.path.dirname(pieplotpath)
if not os.path.exists(directory):
os.makedirs(directory)
plt.tight_layout()
plt.savefig(pieplotpath, dpi=300)
# Convert to long format
choice_df['frame_number'] = choice_df.index
choice_df_long = choice_df.melt(id_vars='frame_number', value_vars=choice_df.columns[:-1],\
var_name='Food', value_name='Mean')
# BOX PLOTS (Seaborn) - Mean proportion of worms on each food
plt.close("all")
fig, ax = plt.subplots(figsize=(9,7))
ax = sns.boxplot(x='Food', y='Mean', hue='Food', data=choice_df_long, palette=palette, dodge=False)
# NB: Could also produce violinplots, but why not swarmplots? Too many points?
# ax = sns.violinplot(x='Food', y='Mean', hue='Food', data=choice_df_long, palette=palette, dodge=False)
ax.set_ylim(-0.1,1.1)
ax.set_xlim(-1,len(treatments)+0.25)
ax.set_xlabel("Food",fontsize=20)
ax.set_ylabel("Mean Proportion Feeding",fontsize=20)
ax.xaxis.labelpad = 15; ax.yaxis.labelpad = 15
ax.tick_params(labelsize=13, pad=5)
fig.tight_layout(rect=[0.02, 0.07, 0.95, 0.95])
plt.text(0.03, 0.93, "{0} worms".format(file_info['worm number']), transform=ax.transAxes, fontsize=20)
plt.text(len(treatments)+0.25, -0.35, "Prefed on: {0}".format(prefed),\
horizontalalignment='right', fontsize=25)
plt.legend(loc="upper right", borderaxespad=0.4, frameon=False, fontsize=15)
plt.show(); plt.pause(0.0001)
# Save box plots
plt.tight_layout()
plt.savefig(boxplotpath, format='eps', dpi=300)
print("Plots saved.\n")
# =============================================================================
# # Append file info
# choice_df_long['filename'] = maskedfilepath
# choice_df_long['worm_number'] = file_info['worm number']
# choice_df_long['Food_Conc'] = conc
# choice_df_long['Food_Combination'] = assaychoice
# choice_df_long['Prefed_on'] = prefed
# choice_df_long['Acclim_time_s'] = file_info['Acclim_time_s']
#
# # Append to full results dataframe
# results_df = results_df.append(choice_df_long[colnames])
# =============================================================================
except:
print("Error processing file:\n%s" % maskedfilepath)
continue
print("Done.\n(Time taken: %d seconds.)" % (time.time() - tic))
# =============================================================================
# size = sys.getsizeof(results_df)
# # File size is too big! Not a good idea to save as full results file
# =============================================================================
#%% Time-series plots of proportion feeding through time (FOR EACH VIDEO SEPARATELY)
tic = time.time()
for i, maskedfilepath in enumerate(fullMetaData['filename']):
toc = time.time()
# Extract file information
file_info = fullMetaData.iloc[i,:]
conc = file_info['Food_Conc']
assaychoice = file_info['Food_Combination']
prefed = file_info['Prefed_on']
print("\nProcessing file: %d/%d\n%s\nAssay: %s\nConc: %.3f\nPrefed: %s" % (i + 1,\
len(fullMetaData['filename']), maskedfilepath, assaychoice, conc, prefed))
# Specify file paths
onfoodpath = change_path_phenix(maskedfilepath, returnpath='onfood')
foodchoicepath = change_path_phenix(maskedfilepath, returnpath='foodchoice')
plotpath = change_path_phenix(maskedfilepath, returnpath='plots', figname='FoodChoiceTS.png') # Path to save time series plots
onfood_df = pd.read_csv(onfoodpath, header=0, index_col=0)
# READ FOOD CHOICE RESULTS
# df = pd.read_csv(foodchoicepath, header=0, index_col=0)
df = foodchoice(onfood_df, mean=True, std=True, tellme=True)
# Shift plot to include acclimation time prior to assay recording (ie. t(0) = pick time)
acclim = int(file_info['Acclim_time_s'] * fps)
df.index = df.index + acclim
# Caclculate mean + standard deviation per frame across videos
colnames = list(df.columns.levels[0])
# Remove erroneous frames where on/off food does not sum to 1
frames_to_rm = np.where(np.sum([df[x]['mean'] for x in colnames], axis=0).round(decimals=5)!=1)[0]
assert frames_to_rm.size == 0,\
"{:d} frames found in which feeding proportions do not sum to 1.".format(len(frames_to_rm))
# PLOT TIME-SERIES ON/OFF FOOD (count)
plt.close("all")
fig = plot_timeseries_phenix(df=df, colour_dict=colour_dict, window=smooth_window,\
acclimtime=acclim, annotate=True, legend=True, ls='-')
# SAVE TIME SERIES PLOTS
directory = os.path.dirname(plotpath)
if not os.path.exists(directory):
os.makedirs(directory)
plt.tight_layout()
plt.savefig(plotpath, format='png', dpi=300)
print("Time series plots saved.\n(Time taken: %d seconds.)\n" % (time.time() - toc))
print("Complete!\n(Total time taken: %d seconds.)\n" % (time.time() - tic))
#%% FIGURE 1 - Box plots of food choice (Grouped by treatment combination: prefed on (HB101/OP50), food combination (control/choice), and concentration (0.125,0.25,0.5,1))
# - Subset results by grouping files by assay type (control/choice experiment) and by food concentration
tic = time.time()
# Group files in metadata by prefed, assaychoice and concentration treatment combinations
groupedMetaData = fullMetaData.groupby(['Prefed_on','Food_Combination','Food_Conc'])
# For each prefood-assaychoice-concentration treatment combination
for p, prefood in enumerate(pretreatments):
# Initialise plot for prefed group (12 subplots - 3 food combinations, 4 concentrations)
plt.close("all")
fig, axs = plt.subplots(nrows=len(assaychoices), ncols=len(concentrations),\
figsize=(14,10), sharey=True)
for a, assay in enumerate(assaychoices):
for c, conc in enumerate(concentrations):
try:
# Get prefood-assaychoice-concentration group
df_conc = groupedMetaData.get_group((prefood,assay,conc))
# Get group info
info = df_conc.iloc[0,:]
colnames = info['Food_Combination'].split('/')
if colnames[0] == colnames[1]:
colnames = ["{}_{}".format(food, f + 1) for f, food in enumerate(colnames)]
colnames.insert(len(colnames), "None")
# Pre-allocate dataframe for boxplots
df = pd.DataFrame(index=range(df_conc.shape[0]), columns=colnames)
# If single file, read full food choice data (mean proportion feeding)
if df_conc.shape[0] == 1:
foodchoicepath = change_path_phenix(info['filename'], returnpath='foodchoice')
df = pd.read_csv(foodchoicepath, header=0, index_col=0)
# Read summary stats for mean proportion feeding in each video
elif df_conc.shape[0] > 1:
for i in range(df_conc.shape[0]):
info = df_conc.iloc[i]
# Read in food choice summary stats (mean proportion feeding)
statspath = change_path_phenix(info['filename'], returnpath='summary')
df.iloc[i] = pd.read_csv(statspath, header=0, index_col=0).loc['mean']
# =============================================================================
# # Read food choice data for each file and compile into df for plotting
# df = pd.DataFrame()
# for row in range(df_conc.shape[0]):
# info = df_conc.iloc[row,:]
# foodchoicepath = changepath(info['filename'], returnpath='foodchoice')
# tmp_df = pd.read_csv(foodchoicepath, header=0, index_col=0)
# if df.empty:
# df = tmp_df
# else:
# df = df.append(tmp_df, sort=True)
# =============================================================================
# Plot labels/colours
labels = [lab.split('_')[0] for lab in colnames]
colours = [colour_dict[treatment] for treatment in labels]
# Seaborn colour palette
RGBAcolours = sns.color_palette(colours)
palette = {key: val for key, val in zip(colnames, RGBAcolours)}
# sns.palplot(sns.color_palette(values))
# Convert to long format
df['videoID'] = df.index
df_long = df.melt(id_vars='videoID', value_vars=df.columns[:-1],\
var_name='Food', value_name='Mean')
df_long['Mean'] = df_long['Mean'].astype(float)
# =============================================================================
# # Convert to long format
# df['frame_number'] = df.index
# df_long = df.melt(id_vars='frame_number', value_vars=df.columns[:-1],\
# var_name='Food', value_name='Mean')
# =============================================================================
# Plot Seaborn boxplots
sns.boxplot(data=df_long, x='Food', y='Mean', hue='Food', ax=axs[a,c], palette=palette, dodge=False)
axs[a,c].get_legend().set_visible(False)
axs[a,c].set_ylabel('')
axs[a,c].set_xlabel('')
xlabs = axs[a,c].get_xticklabels()
xlabs = [lab.get_text().split('_')[0] for lab in xlabs[:]]
axs[a,c].set_xticklabels(labels=xlabs, fontsize=12)
axs[a,c].set_ylim(-0.05, 1.05)
axs[a,c].set_xlim(-0.75,len(np.unique(df_long['Food']))-0.25)
axs[a,c].text(0.81, 0.9, ("n={0}".format(df_conc.shape[0])),\
transform=axs[a,c].transAxes, fontsize=12)
if a == 0:
axs[a,c].text(0.5, 1.1, "$OD_{{{}}}={}$".format(600, conc*OpticalDensity600),\
horizontalalignment='center', fontsize=18,\
transform=axs[a,c].transAxes)
if c == 0:
axs[a,c].set_ylabel("{0}".format(assay), labelpad=15, fontsize=18)
if a == 1:
axs[a,c].text(-2.75, 0.5, "Mean Proportion Feeding",\
fontsize=25, rotation=90, horizontalalignment='center',\
verticalalignment='center')
except Exception as e:
print("No videos found for concentration: %s\n(Assay: %s, Prefed on: %s)\n" % (e, assay, prefood))
axs[a,c].axis('off')
axs[a,c].text(0.81, 0.9, "n=0", fontsize=12, transform=axs[a,c].transAxes)
if a == 0:
axs[a,c].text(0.5, 1.1, "$OD_{{{}}}={}$".format(600, conc*OpticalDensity600),\
horizontalalignment='center', fontsize=18,\
transform=axs[a,c].transAxes)
if c == 0:
axs[a,c].set_ylabel("{0}".format(assay), labelpad=15, fontsize=18)
if a == 1:
axs[a,c].text(-3.2, 0.5, "Mean Proportion Feeding",\
fontsize=25, rotation=90, horizontalalignment='center',\
verticalalignment='center')
# plt.text(3, -0.7, "Prefed on: {0}".format(prefood), horizontalalignment='center', fontsize=25)
patches = []
for i, (key, value) in enumerate(colour_dict.items()):
patch = mpatches.Patch(color=value, label=key)
patches.append(patch)
fig.legend(handles=patches, labels=list(colour_dict.keys()), loc="upper right", borderaxespad=0.4,\
frameon=False, fontsize=15)
fig.tight_layout(rect=[0.07, 0.02, 0.88, 0.95])
fig.subplots_adjust(hspace=0.2, wspace=0.1)
plt.show(); plt.pause(2)
# Save figure 1
fig_name = "FoodChoiceBox_prefed" + prefood + ".eps"
figure_out = os.path.join(PROJECT_ROOT_DIR, "Results", "Plots", fig_name)
plt.savefig(figure_out, format='eps', dpi=300)
print("Complete!\n(Time taken: %d seconds)" % (time.time() - tic))
#%% FIGURE 2
# - OPTIONAL: Plot as fraction of a constant total?
#%% FIGURE 3 - Time series plots of food choice by concentration and by assay type (GROUPED BY ASSAY/CONC)
# Plot time series plots - proportion on-food through time
tic = time.time()
# Group files in metadata by prefed, assaychoice and concentration treatment combinations
groupedMetaData = fullMetaData.groupby(['Prefed_on','Food_Combination','Food_Conc'])
# For each prefood-assaychoice-concentration treatment combination
for p, prefood in enumerate(pretreatments):
# Initialise plot for prefed group
plt.close("all")
xmax = 180000
fig, axs = plt.subplots(nrows=len(assaychoices), ncols=len(concentrations),\
figsize=(16,7), sharex=True) # 12 subplots (3 food combinations, 4 food concentrations)
for a, assay in enumerate(assaychoices):
for c, conc in enumerate(concentrations):
try:
# Get prefood-assaychoice-concentration group
df_conc = groupedMetaData.get_group((prefood,assay,conc))
# Get acclim time
info = df_conc.iloc[0,:]
acclim = int(info['Acclim_time_s'] * fps)
# If single file, read food choice data (mean proportion feeding)
if df_conc.shape[0] == 1:
foodchoicepath = change_path_phenix(info['filename'], returnpath='foodchoice')
df = pd.read_csv(foodchoicepath, header=0, index_col=0)
# Shift df indices to account for acclimation (t0 = pick time)
acclim = int(info['Acclim_time_s'] * fps)
df.index = df.index + acclim
# If multiple files, read food choice data for each file and compile into df for plotting
elif df_conc.shape[0] > 1:
df = pd.DataFrame()
for row in range(df_conc.shape[0]):
info = df_conc.iloc[row,:]
foodchoicepath = change_path_phenix(info['filename'], returnpath='foodchoice')
tmp_df = pd.read_csv(foodchoicepath, header=0, index_col=0)
# Shift df indices to account for acclimation (t0 = pick time)
acclim = int(info['Acclim_time_s'] * fps)
tmp_df.index = tmp_df.index + acclim
if df.empty:
df = tmp_df
else:
df = df.append(tmp_df, sort=True)
# Caclculate mean + standard deviation per frame across videos
colnames = list(df.columns)
df['frame'] = df.index
fundict = {x:['mean','std'] for x in colnames}
df_plot = df.groupby('frame').agg(fundict)
# Remove erroneous frames where on/off food does not sum to 1
frames_to_rm = np.where(np.sum([df_plot[x]['mean'] for x in colnames], axis=0).round(decimals=5)!=1)[0]
assert frames_to_rm.size == 0,\
"{:d} frames found in which feeding proportions do not sum to 1.".format(len(frames_to_rm))
# Time series plots
plot_timeseries_phenix(df_plot, colour_dict, window=smooth_window,
legend=False, annotate=False, acclimtime=acclim, ax=axs[a,c])
# Add number of replicates (videos) for each treatment combination
axs[a,c].text(0.79, 0.9, ("n={0}".format(df_conc.shape[0])),\
transform=axs[a,c].transAxes, fontsize=13)
# Set axis limits
if max(df_plot.index) > xmax:
xmax = max(df_plot.index)
axs[a,c].set_xlim(0, np.round(xmax,-5))
axs[a,c].set_ylim(-0.05, 1.05)
# Set column labels on first row
if a == 0:
axs[a,c].text(0.5, 1.15, "$OD_{{{}}}={}$".format(600, conc*OpticalDensity600),\
horizontalalignment='center', fontsize=18,\
transform=axs[a,c].transAxes)
# Set main y axis label + ticks along first column of plots
if c == 0:
yticks = list(np.round(np.linspace(0,1,num=6,endpoint=True),decimals=1))
axs[a,c].set_yticks(yticks)
axs[a,c].set_yticklabels(yticks)
axs[a,c].set_ylabel("{0}".format(assay), labelpad=15, fontsize=15)
if a == 1:
axs[a,c].text(-np.round(xmax,-5)/2, 0.5, "Mean Proportion Feeding",\
fontsize=22, rotation=90, horizontalalignment='center',\
verticalalignment='center')
else:
axs[a,c].set_yticklabels([])
# Set main x axis label + ticks along final row of plots
if a == len(assaychoices) - 1:
xticklabels = ["0", "30", "60", "90", "120"]
xticks = [int(int(lab)*fps*60) for lab in xticklabels]
axs[a,c].set_xticks(xticks)
axs[a,c].set_xticklabels(xticklabels)
if c == 1:
axs[a,c].set_xlabel("Time (minutes)", labelpad=25, fontsize=20, horizontalalignment='left')
else:
axs[a,c].set_xticklabels([])
except Exception as e:
# Empty plots
print("No videos found for concentration: %s\n(Assay: %s, Prefed on: %s)\n" % (e, assay, prefood))
# Add number of replicates (videos) for each treatment combination
axs[a,c].text(0.79, 0.9, "n=0", fontsize=13, transform=axs[a,c].transAxes)
# Set column labels on first row
if a == 0:
axs[a,c].text(0.5, 1.15, ("conc={0}".format(conc)),\
horizontalalignment='center', fontsize=18,\
transform=axs[a,c].transAxes)
axs[a,c].axis('off')
# Add 'prefed on' to multiplot
# plt.text(max(df_plot.index), -0.7, "Prefed on: {0}".format(prefood), horizontalalignment='right', fontsize=30)
# Add legend
patches = []
for key, value in colour_dict.items():
patch = mpatches.Patch(color=value, label=key)
patches.append(patch)
fig.legend(handles=patches, labels=treatments, loc="upper right", borderaxespad=0.4,\
frameon=False, fontsize=15)
# Tight-layout + adjustments
fig.tight_layout(rect=[0.07, 0.02, 0.9, 0.93])
fig.subplots_adjust(hspace=0.1, wspace=0.1)
plt.show(); plt.pause(1)
# Save figure 3
fig_name = "FoodChoiceTS_prefed" + prefood + ".png"
figure_out = os.path.join(PROJECT_ROOT_DIR, "Results", "Plots", fig_name)
plt.savefig(figure_out, saveFormat='png', dpi=300)
print("Complete!\n(Time taken: %d seconds)" % (time.time() - tic))
|
[
"saulmoore1@bitbucket.org"
] |
saulmoore1@bitbucket.org
|
b536af523c5e69ea18ffeaf04df2fc80b986dd1f
|
a1aea2567fd4176fdcdf66250e933b32d1d3db27
|
/vdW_force.py
|
1587ff12264830cc0ea3cc4fa7f4c66e98e01689
|
[] |
no_license
|
toftul/fiber_binding_advanced
|
c4df1a7b60d392745ac3eb8e4424659750ccafa4
|
5fc737de0ce6f4d4253932044f6f1ef8b4e11e0d
|
refs/heads/master
| 2021-05-15T15:02:11.976213
| 2018-05-25T11:39:00
| 2018-05-25T11:39:00
| 107,274,065
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,400
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat May 12 12:06:10 2018
@author: ivan
"""
import numpy as np
from scipy.integrate import quad
import matplotlib.pyplot as plt
def integrand_drho(H, A, y, phi):
def f(rho):
C1 = H + A + y + rho * np.cos(phi)
return(rho * C1 / (C1 * C1 + rho*rho*np.sin(phi)*np.sin(phi))**3.5)
return(quad(f, 0, A)[0])
def integrand_dphi(H, A, y):
f = lambda phi: integrand_drho(H, A, y, phi)
return(quad(f, 0, np.pi)[0])
def FvdW(H, A, a, A123):
int_y = lambda y: (2 - y) * y * integrand_dphi(H, A, y)
f = -15/4 * quad(int_y, 0, 2)[0]
return(A123/a * f)
# %%
rp = 120e-9 # [m] particle radius
rfMM = 495e-9 # [m] fiber radius
rfSM = 130e-9 # [m] fiber radius
# A = R / a
ASM = rfSM / rp
AMM = rfMM / rp
A123 = 10e-20 # [J] approx value for the Hamaker constant
# gap between particle and fiber surface
gapSpace = np.logspace(np.log10(rp/50), np.log10(5*rp), 30) # [m]
FSpaceSM = np.zeros(len(gapSpace))
FSpaceMM = np.zeros(len(gapSpace))
for i, gap in enumerate(gapSpace):
print(i/len(gapSpace))
FSpaceSM[i] = FvdW(gap/rp, ASM, rp, A123)
FSpaceMM[i] = FvdW(gap/rp, AMM, rp, A123)
# %%
plt.rcParams.update({'font.size': 14})
plt.figure(figsize=(8,5))
#plt.title('Attractive vdW force')
plt.semilogy(gapSpace/rp, np.abs(FSpaceSM)*1e12, 'k-', label='SM')
plt.semilogy(gapSpace/rp, np.abs(FSpaceMM)*1e12, 'k--', label='MM')
plt.xlabel('Relative gap width, $g/R_p$')
plt.ylabel('vdW attractive force $|F^{wdW}_r|$, pN')
#plt.xlim((1e-2, 1e1))
#plt.xlim((np.min(gapSpace/rp), np.max(gapSpace/rp)))
plt.ylim((0.1*np.min(np.abs(FSpaceSM)*1e12), 10*np.max(np.abs(FSpaceSM)*1e12)))
plt.legend()
#plt.grid(True,which="both",ls="-", alpha=0.4)
#plt.savefig('results/vdw.pdf')
plt.show()
# %%
### sanity check
# Compare with
# 'The van der Waals Interaction between a Spherical Particle and a Cylinder'
#A = 1
#
#Hspace = np.logspace(-2, 2, num=10)
#FSpace = np.zeros(len(Hspace))
#for i, H in enumerate(Hspace):
# print(i)
# FSpace[i] = FvdW(H, A, 1, 1)
# %%
# plot
#plt.figure(figsize=(8,5))
#plt.rcParams.update({'font.size': 16})
#plt.loglog(Hspace, -FSpace, '-k')
#plt.xlabel('D, nm')
#plt.ylabel('$F^{wdW}_r$, pN')
#plt.grid()
#plt.show()
# STATUS: CHACKED!
|
[
"toftul.ivan@gmail.com"
] |
toftul.ivan@gmail.com
|
9cfcec30b699e1871fb85a7b2b0c628a6a1052da
|
2502facbf895f3e27b3ce77b223452171ab1d532
|
/utilities/customLogger.py
|
be6c5325a9ac366defb8ba9f630e35b0df58ac5b
|
[] |
no_license
|
Kavya1709/nopCommerce
|
538dac6e9f62dd8fa716a77bde4c119347d49c8d
|
024c149a2bf5caa493c82929feb6497fe22eda8a
|
refs/heads/master
| 2022-12-17T19:49:41.865873
| 2020-09-11T13:38:55
| 2020-09-11T13:38:55
| 294,701,209
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 956
|
py
|
import logging
class LogGen:
@staticmethod
def loggen():
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logging.basicConfig(filename='.\\Logs\\automation.log', format='%(asctime)s: %(levelname)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
return logger
# @staticmethod
# def log_gen():
#
# LOG_FILENAME = '.\\Logs\\automation.log'
#
# # Set up a specific logger with our desired output level
# my_logger = logging.getLogger('nopCommerce')
# my_logger.setLevel(logging.INFO)
#
# # Add the log message handler to the logger
# handler = logging.handlers.RotatingFileHandler(
# LOG_FILENAME, maxBytes=20, backupCount=5)
#
# my_logger.addHandler(handler)
#
# return my_logger
|
[
"kavyaprakash17@gmail.com"
] |
kavyaprakash17@gmail.com
|
6000fbee425fac253a92896202dbe378a0ccf7ea
|
e50994cf741d5221080cc5c4d7a5e53e43d58b36
|
/20190119/testcase2/test3.py
|
5c1a53666c46fa65064b98a62bfa812c154edbd9
|
[
"Apache-2.0"
] |
permissive
|
sly1314sly/selenium_basic
|
5fff7f9aa11d95d892ebfe013007bc5aaba2ea84
|
53bc2bf4d8a81bcd71f7fe5910cbc34ecfc6869a
|
refs/heads/master
| 2020-04-13T08:49:01.812269
| 2020-01-10T09:29:10
| 2020-01-10T09:29:10
| 163,092,788
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 544
|
py
|
import unittest
class TestStringMethods3(unittest.TestCase):
def test_upper3(self):
self.assertEqual('foo'.upper(), 'FOO')
def test_isupper3(self):
self.assertTrue('FOO'.isupper())
self.assertFalse('Foo'.isupper())
def test_split3(self):
s = 'hello world'
self.assertEqual(s.split(), ['hello', 'world'])
# check that s.split fails when the separator is not a string
with self.assertRaises(TypeError):
s.split(2)
if __name__ == '__main__':
unittest.main()
|
[
"928446761@qq.com"
] |
928446761@qq.com
|
edc58be22d8da0892938e5e2414d522b0016bddc
|
9b9ae524386d16396a8c69d671d1e588ac1fb7f9
|
/week13/maoyan/maoyan/items.py
|
25ca9521abf8bf9253dec44bfa2263b81e6b7382
|
[] |
no_license
|
WeiZhixiong/Python006-006
|
d4d15977ac3f44566123cb029ae2015e2d24f133
|
6263b78f211c66332c27949bacadd28f6f19ffdb
|
refs/heads/main
| 2023-04-15T10:35:42.376722
| 2021-04-29T05:25:50
| 2021-04-29T05:25:50
| 322,471,510
| 0
| 0
| null | 2020-12-18T02:52:30
| 2020-12-18T02:52:30
| null |
UTF-8
|
Python
| false
| false
| 271
|
py
|
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class MaoYanItem(scrapy.Item):
movie_name = scrapy.Field()
movie_type = scrapy.Field()
show_time = scrapy.Field()
|
[
"zhixiong.wei@qq.com"
] |
zhixiong.wei@qq.com
|
df268ec9f4a7b257c6ca9892b85d4be9155b5b5c
|
27824a7e18764de82ad0a80025d1390094abfe65
|
/introduction/start.py
|
63a1e1009de6a0feceda5b9d04e8a08b2cf91b94
|
[] |
no_license
|
oliviergimenez/python-training
|
aec501740130edc39b78dd87976384732e5fb7df
|
3d0f8f19d1d3dff9d372950dbb95a5f043d60f97
|
refs/heads/master
| 2023-01-28T21:06:46.182978
| 2020-12-08T07:40:34
| 2020-12-08T07:40:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,081
|
py
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.7.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Getting started
#
# ## Python Install
#
# ### Anaconda
#
# Is is strongly advised to install Python by using [Anaconda](https://www.anaconda.com/):
#
# - Ready to go Python, with the main libraries installed (Numpy, Scipy, Matplotlib)
# - Possibility to create multiple environments with different versions of Python and packages ([conda](https://conda.io/en/latest/)).
#
# In practice:
#
# - Download the distribution corresponding to your system (cf. [Download](https://www.anaconda.com/distribution/#download-section))
# - Install it in a place where you have read and write access.
#
#
#
#
# ## Running Python
#
# ### Python console
#
# To run Python in normal mode, type in a terminal:
#
# ```
# python
# ```
#
# <img src="figs/console.png" width="70%">
#
# ### Interactive Python console
#
# To run Python in interactive mode, type in a terminal:
#
# ```
# ipython
# ```
#
# <img src="figs/ipython.png" width="70%">
#
# ### Spyder (IDE)
#
# To run the Python IDE, type in a terminal:
#
# ```
# spyder &
# ```
#
# <img src="figs/spyder.png" width="70%">
#
#
# ### Jupyter Notebook
#
# To run the Jupyter Notebook, type in a terminal:
#
# ```
# jupyter notebook &
# ```
#
# <img src="figs/notebook.png" width="70%">
# ## Running scripts
#
# Open a text editor and type in:
#
# ```
# import sys
#
# # my first program (comment)
# print('hello ', sys.argv)
# ```
#
# Save as ```hello.py```
#
# ### Running using python
#
# From the terminal type:
#
# ```
# python hello.py arg1 arg2 arg3
# ```
#
# You should see:
#
# ```
# hello ['hello.py', 'arg1', 'arg2', 'arg3']
# ```
#
# <div class='alert alert-info'>
# <strong>Note: </strong>The <i>sys.argv</i> statements returns the list of arguments, with the 1st element the name of the script.
# </div>
#
#
# ### Running using ipython
#
# Open `ipython` from the terminal, then type:
#
# ```
# run hello.py arg1 arg2 arg3
# ```
#
# To check the environment, type `whos`. You should see:
#
# ```
# In [2]: whos
# Variable Type Data/Info
# ------------------------------
# sys module <module 'sys' (built-in)>
# ```
#
# ### Running from Spyder
#
# Open `spyder`, open the file and click on the **Run -> Configuration per file** menu. Add arguments to the program as follows:
#
# <img src="figs/args_spyder.png" width="40%">
#
# Then, click on the **Run file** button to run all the program or the **Run selection** button to run the current line
#
# <br>
# <figure>
# <center>
# <img src="figs/run_file.png" width="50" text-align=center>
# <figcaption text-align=center><i>Run file button</i></figcaption>
# </figure>
#
# <br>
# <figure>
# <center>
# <img src="figs/run_sel.png" width="50">
# <figcaption text-align=center><i>Run selection button</i></figcaption>
# </figure>
|
[
"nicolas.barrier@ird.fr"
] |
nicolas.barrier@ird.fr
|
07b351aef518fd66e7d562465c2c742ce426dfb8
|
f73d1fcf5ab749a703881971310518762c823713
|
/BarrettDylanRockPaperScissors/RPS.py
|
8824f750bb8b3436f96c8a008a04bf22e446583f
|
[] |
no_license
|
DylanBarrett/IT1040-Mizzou
|
42f3766158b1c30178f8004303062ea06b7026f8
|
64c7e07af83d45c79974e469d4225adbf145ae08
|
refs/heads/master
| 2020-03-26T15:33:24.407276
| 2018-08-16T23:52:42
| 2018-08-16T23:52:42
| 145,050,316
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,172
|
py
|
import random
import pickle
class GameStatus():
def __init__(self, name):
self.tie = 0
self.playerWon = 0
self.pcWon = 0
self.name = name
def get_round(self):
return self.tie + self.playerWon + self.pcWon + 1
# Displays program information, starts main play loop
def main():
print("Welcome to a game of Rock, Paper, Scissors!")
print("What would you like to choose?")
print("")
game_status = welcomemenu()
while True:
play(game_status)
endGameSelect(game_status)
def welcomemenu():
while True:
print("[1]: Start New Game")
print("[2]: Load Game")
print("[3]: Quit")
print("")
menuselect = int(input("Enter your choice: "))
if int(menuselect) in [1, 2, 3]:
break
else:
print("Wrong choice. select again.")
if menuselect == 1:
name = input("What is your name?: ")
print(("Hello %s.") % name)
print("Let's play!")
game_status = GameStatus(name)
elif menuselect == 2:
while True:
name = input("What is your name?: ")
try:
user_file = open('%s.rsp' % name, 'r')
except IOError:
print(("Sorry there is no game found with name %s") % name)
continue
break
print(("Welcome back %s.") % name)
print("Let's play!")
game_status = pickle.load(user_file)
displayScoreBoard(game_status)
user_file.close()
elif menuselect == 3:
print("Bye!!!")
exit()
return
return game_status
def play(game_status):
playerChoice = int(playerMenu())
pcChoice = pcGenerate()
outcome = evaluateGame(playerChoice, pcChoice)
updateScoreBoard(outcome, game_status)
def playerMenu():
print("Select a choice: \n [1]: Rock \n [2]: Paper \n [3]: Scissors\n")
menuSelect = int(input("What will it be? "))
while not validateInput(menuSelect):
invalidChoice(menuSelect)
menuSelect = input("Enter a correct value: ")
return menuSelect
def validateInput(menuSelection):
if menuSelection in [1, 2, 3]:
return True
else:
return False
def pcGenerate():
pcChoice = random.randint(1,3)
return pcChoice
# Calculate ties,wins,lose
def evaluateGame(playerChoice, pcChoice):
rsp = ['rock', 'paper', 'scissors']
win_statement = ['Rock breaks scissors', 'Paper covers rock', 'Scissors cut paper']
win_status = (playerChoice - pcChoice) % 3
print(("You have chosen %s") % rsp[playerChoice - 1])
what_to_say =(("Computer has chose %s") % rsp[pcChoice - 1])
if win_status == 0:
what_to_say +=(" as Well. TIE!")
elif win_status == 1:
what_to_say +=((". %s. You WIN!") % win_statement[playerChoice - 1])
else:
what_to_say +=((". %s. You LOSE!") % win_statement[pcChoice - 1])
print(what_to_say)
return win_status
# Update track of ties, player wins, and computer wins
def updateScoreBoard(outcome, game_status):
if outcome == 0:
game_status.tie += 1
elif outcome == 1:
game_status.playerWon += 1
else:
game_status.pcWon += 1
# If user input is invalid, let them know.
def invalidChoice(menuSelect):
print(menuSelect,("is not a valid option. Please select 1-3"))
# Print the scores before terminating the program.
def displayScoreBoard(game_status):
print("")
print("Statistics:")
print(("Ties: %d") % game_status.tie)
print(("Player Wins: %d") % game_status.playerWon)
print(("Computer Wins: %d") % game_status.pcWon)
if game_status.pcWon > 0:
print(("Win/Loss Ratio: %f") % (float(game_status.playerWon) / game_status.pcWon))
else:
print("Win/Loss Ratio: Always Win.")
print(("Rounds: %d") % game_status.get_round())
def endGameSelect(game_status):
print("")
print("[1]: Play again")
print("[2]: Show Statistics")
print("[3]: Save Game")
print("[4]: Quit")
print("")
while True:
menuselect = int(input("Enter your choice: "))
if menuselect in [1, 2, 3, 4]:
break
else:
print("Wrong input.")
if menuselect == 2:
displayScoreBoard(game_status)
endGameSelect(game_status)
elif menuselect == 3:
def load_users(self):
try:
f = open("%s.rsp" % game_status.name, 'wb')
pickle.dump(game_status, f)
f.close()
except:
print("error loading make sure file is valid")
print("Your game is saved successfully.")
endGameSelect(game_status)
elif menuselect == 4:
print("Bye!!!")
exit()
main()
|
[
"noreply@github.com"
] |
DylanBarrett.noreply@github.com
|
e396119de92c2a9d0442f560d6abcdd894436e17
|
484f111548e9d7192a5748eb202c08802484d747
|
/fw/flash.py
|
8361fc57a27f60367e21952493f6068dcb8a037a
|
[
"Apache-2.0"
] |
permissive
|
cmcmurrough/moteus
|
dafb2e5224409aaf1d57b66f58965d298845678d
|
6780967ec40ad7f1ab76cdbd7021f2d07b739efe
|
refs/heads/main
| 2023-07-11T10:29:58.645291
| 2021-08-13T13:38:32
| 2021-08-13T13:38:32
| 396,627,837
| 2
| 0
|
Apache-2.0
| 2021-08-16T05:07:08
| 2021-08-16T05:07:07
| null |
UTF-8
|
Python
| false
| false
| 2,193
|
py
|
#!/usr/bin/python3
# Copyright 2021 Josh Pieper, jjp@pobox.com.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import platform
import subprocess
import sys
import tempfile
BINPREFIX = '' if platform.machine().startswith('arm') else 'arm-none-eabi-'
OBJCOPY = BINPREFIX + 'objcopy'
OPENOCD = 'openocd -f interface/stlink.cfg -f target/stm32g4x.cfg '
def main():
tmpdir = tempfile.TemporaryDirectory()
moteus_elffile = (
sys.argv[1]
if len(sys.argv) > 1 else
'bazel-out/stm32g4-opt/bin/fw/moteus.elf')
bootloader_elffile = (
sys.argv[2]
if len(sys.argv) > 2 else
'bazel-out/stm32g4-opt/bin/fw/can_bootloader.elf')
subprocess.check_call(
f'{OBJCOPY} -Obinary ' +
f'-j .isr_vector ' +
f'{moteus_elffile} {tmpdir.name}/out.08000000.bin',
shell=True)
subprocess.check_call(
f'{OBJCOPY} -Obinary ' +
f'-j .text -j .ARM.extab -j .ARM.exidx -j .data -j .bss ' +
f'{bootloader_elffile} {tmpdir.name}/out.0800c000.bin',
shell=True)
subprocess.check_call(
f'{OBJCOPY} -Obinary ' +
f'-j .text -j .ARM.extab -j .ARM.exidx -j .data -j .ccmram -j .bss ' +
f'{moteus_elffile} {tmpdir.name}/out.08010000.bin',
shell=True)
subprocess.check_call(
f'{OPENOCD} -c "init" ' +
f'-c "reset_config none separate; ' +
f' program {tmpdir.name}/out.08000000.bin verify 0x8000000; ' +
f' program {tmpdir.name}/out.0800c000.bin verify 0x800c000; ' +
f' program {tmpdir.name}/out.08010000.bin verify ' +
f' reset exit 0x08010000"',
shell=True)
if __name__ == '__main__':
main()
|
[
"jjp@pobox.com"
] |
jjp@pobox.com
|
df68a0ed14ee3d9143270beb9c86e524ff1c717e
|
907d9d6ab8a2bb296a704e9338badde755e7d9f3
|
/testSpn.py
|
1e0679dff3917a51a722731b42278161d6d16da2
|
[] |
no_license
|
nisargap/spn-crypto
|
ddd6082629677fd21879e9feecac51b7bef73363
|
f072264123c0058b5e89356689e11b37723b9d8f
|
refs/heads/master
| 2021-01-10T05:43:45.970971
| 2015-10-05T19:31:30
| 2015-10-05T19:31:30
| 43,708,513
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,541
|
py
|
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
' Author: Nisarga Patel
' Document: testSpn.py
' Description: This is a test file for the Substitution Permutation
' Network module created to encrypt binary plaintext strings. The
' exact key for this test is defined at the top of the SPNencrypt
' module. This test file was created in order to look for cipher
' text attacks in this specific Substitution Permutation Network.
'
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
import SPNencrypt
def main():
plain = "0010011010110111"
SPNencrypt.spn(plain)
plain = "1111111111111111"
SPNencrypt.spn(plain)
plain = "0000000000000000"
SPNencrypt.spn(plain)
plain = "1111111111111110"
SPNencrypt.spn(plain)
plain = "1110111111011110"
SPNencrypt.spn(plain)
plain = "1110101011011110"
SPNencrypt.spn(plain)
plain = "1110101000011110"
SPNencrypt.spn(plain)
plain = "0101010101010101"
SPNencrypt.spn(plain)
plain = "0101010101010101"
SPNencrypt.spn(plain)
plain = "0101000000000101"
SPNencrypt.spn(plain)
plain = "0101000010100101"
SPNencrypt.spn(plain)
plain = "1110111111110101"
SPNencrypt.spn(plain)
plain = "0000000100001000"
SPNencrypt.spn(plain)
plain = "0001000100001001"
SPNencrypt.spn(plain)
plain = "0110111001110111"
SPNencrypt.spn(plain)
plain = "1111011111011101"
SPNencrypt.spn(plain)
main()
|
[
"patelnisarga1@gmail.com"
] |
patelnisarga1@gmail.com
|
fbb7c0b773c663b598397c813719054f055a6897
|
1dacbf90eeb384455ab84a8cf63d16e2c9680a90
|
/lib/python2.7/site-packages/openpyxl/worksheet/pivot.py
|
b1905be6298ea1c57f774cae821fbc482b8bf25b
|
[
"Python-2.0",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] |
permissive
|
wangyum/Anaconda
|
ac7229b21815dd92b0bd1c8b7ec4e85c013b8994
|
2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6
|
refs/heads/master
| 2022-10-21T15:14:23.464126
| 2022-10-05T12:10:31
| 2022-10-05T12:10:31
| 76,526,728
| 11
| 10
|
Apache-2.0
| 2022-10-05T12:10:32
| 2016-12-15T05:26:12
|
Python
|
UTF-8
|
Python
| false
| false
| 4,984
|
py
|
from __future__ import absolute_import
# Copyright (c) 2010-2015 openpyxl
from openpyxl.descriptors import (
Bool,
Integer,
String,
Set,
NoneSet,
)
from openpyxl.descriptors.serialisable import Serialisable
class PivotSelection(Serialisable):
pane = Set(values=("bottomRight", "topRight", "bottomLeft", "topLeft"))
showHeader = Bool()
label = Bool()
data = Bool()
extendable = Bool()
count = Integer()
axis = String(allow_none=True)
dimension = Integer()
start = Integer()
min = Integer()
max = Integer()
activeRow = Integer()
activeCol = Integer()
previousRow = Integer()
previousCol = Integer()
click = Integer()
def __init__(self,
pane=None,
showHeader=None,
label=None,
data=None,
extendable=None,
count=None,
axis=None,
dimension=None,
start=None,
min=None,
max=None,
activeRow=None,
activeCol=None,
previousRow=None,
previousCol=None,
click=None):
self.pane = pane
self.showHeader = showHeader
self.label = label
self.data = data
self.extendable = extendable
self.count = count
self.axis = axis
self.dimension = dimension
self.start = start
self.min = min
self.max = max
self.activeRow = activeRow
self.activeCol = activeCol
self.previousRow = previousRow
self.previousCol = previousCol
self.click = click
class PivotArea(Serialisable):
field = Integer(allow_none=True)
type = NoneSet(values=("normal", "data", "all", "origin", "button", "topEnd"))
dataOnly = Bool()
labelOnly = Bool()
grandRow = Bool()
grandCol = Bool()
cacheIndex = Bool()
outline = Bool()
offset = String()
collapsedLevelsAreSubtotals = Bool()
axis = String(allow_none=True)
fieldPosition = Integer(allow_none=True)
def __init__(self,
field=None,
type=None,
dataOnly=None,
labelOnly=None,
grandRow=None,
grandCol=None,
cacheIndex=None,
outline=None,
offset=None,
collapsedLevelsAreSubtotals=None,
axis=None,
fieldPosition=None):
self.field = field
self.type = type
self.dataOnly = dataOnly
self.labelOnly = labelOnly
self.grandRow = grandRow
self.grandCol = grandCol
self.cacheIndex = cacheIndex
self.outline = outline
self.offset = offset
self.collapsedLevelsAreSubtotals = collapsedLevelsAreSubtotals
self.axis = axis
self.fieldPosition = fieldPosition
class PivotAreaReferences(Serialisable):
count = Integer()
def __init__(self, count=None):
count = count
class PivotAreaReference(Serialisable):
field = Integer(allow_none=True)
count = Integer()
selected = Bool()
byPosition = Bool()
relative = Bool()
defaultSubtotal = Bool()
sumSubtotal = Bool()
countASubtotal = Bool()
avgSubtotal = Bool()
maxSubtotal = Bool()
minSubtotal = Bool()
productSubtotal = Bool()
countSubtotal = Bool()
stdDevSubtotal = Bool()
stdDevPSubtotal = Bool()
varSubtotal = Bool()
varPSubtotal = Bool()
def __init__(self,
field=None,
count=None,
selected=None,
byPosition=None,
relative=None,
defaultSubtotal=None,
sumSubtotal=None,
countASubtotal=None,
avgSubtotal=None,
maxSubtotal=None,
minSubtotal=None,
productSubtotal=None,
countSubtotal=None,
stdDevSubtotal=None,
stdDevPSubtotal=None,
varSubtotal=None,
varPSubtotal=None):
self.field = field
self.count = count
self.selected = selected
self.byPosition = byPosition
self.relative = relative
self.defaultSubtotal = defaultSubtotal
self.sumSubtotal = sumSubtotal
self.countASubtotal = countASubtotal
self.avgSubtotal = avgSubtotal
self.maxSubtotal = maxSubtotal
self.minSubtotal = minSubtotal
self.productSubtotal = productSubtotal
self.countSubtotal = countSubtotal
self.stdDevSubtotal = stdDevSubtotal
self.stdDevPSubtotal = stdDevPSubtotal
self.varSubtotal = varSubtotal
self.varPSubtotal = varPSubtotal
class Index(Serialisable):
v = Integer()
def __init__(self, v=None):
self.v = v
|
[
"wgyumg@mgail.com"
] |
wgyumg@mgail.com
|
40b5a7f814ed68cbc12969cb867747a1687e0e1b
|
ac1e60fd4bb3b7cc04e413ae394836abad8947b1
|
/email_verification_api/wsgi.py
|
e60483842d64ef833b28dfd12be0cfe5d6bf9eba
|
[] |
no_license
|
Taycode/email-verification-api
|
9c48642f34671232c388a7c763541f02ff9ae614
|
f3abe35a010d5b2d3d2c269fa728eb40f26630a0
|
refs/heads/master
| 2020-08-04T11:00:29.103892
| 2019-10-01T14:49:14
| 2019-10-01T14:49:14
| 212,114,710
| 0
| 0
| null | 2019-10-01T14:16:59
| 2019-10-01T14:16:58
| null |
UTF-8
|
Python
| false
| false
| 421
|
py
|
"""
WSGI config for email_verification_api project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'email_verification_api.settings')
application = get_wsgi_application()
|
[
"tay2druh@gmail.com"
] |
tay2druh@gmail.com
|
575ec3fde6902f2199dd97ec7bcc0c58ef03bab3
|
ec0b547830e10e11de13f6d5e375f2ee746c64ff
|
/pos_retail/wizards/__init__.py
|
8dfb1563dff3e3ae399cce0e45dc71a6969c3c6c
|
[] |
no_license
|
babarlhr/third-party-apps
|
cc4a83df48e00d35c3fd7bbd06ed9ef738f1ba99
|
f882e65b9873a937aa7f62171bcefb8b4982366b
|
refs/heads/13.0
| 2023-04-02T14:32:37.980953
| 2021-04-10T19:58:07
| 2021-04-10T19:58:07
| 584,900,514
| 1
| 0
| null | 2023-01-03T20:06:22
| 2023-01-03T20:06:22
| null |
UTF-8
|
Python
| false
| false
| 150
|
py
|
from . import StockImmediateTransfer
from . import RemovePosOrder
from . import PosRemoteSession
from . import PosMakePayment
from . import CashBoxOut
|
[
"lucaslsoto95@gmail.com"
] |
lucaslsoto95@gmail.com
|
84c1041757c13e9f14ac643dc4bfae4d661fad21
|
04a17e0f43befad7be3518a99eadbf5123e31dd3
|
/semana5/MyHolidays/myholidays/holidays.py
|
b49f9254358e14edfca59fffdb88133641e5f8d4
|
[] |
no_license
|
grcopia/NappAcademy
|
587b49263e704b56d804567ec1efadc1c72bf0c0
|
64f3732c3ca4337c4811bee3736be61b6169985a
|
refs/heads/master
| 2023-04-23T04:47:45.789203
| 2021-04-17T18:33:32
| 2021-04-17T18:33:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,427
|
py
|
from datetime import date
from datetime import datetime
from dateutil.parser import parse
class MyCalendar:
def __init__(self, *args):
self.datas = []
self.check_holiday()
for item in args:
if isinstance(item, date):
self.datas.append(item)
elif type(item) == str:
if len(str(item).split('/')) > 2:
if int(str(item).split('/')[1]) > 12 or len(str(item).split('/')[0]) > 2:
continue
self.datas.append(parse(item).date())
else:
pass
# self.datas = [parse(str(item)) for item in args if type(item) not in types and len(str(item).split('/')) > 2]
def check_holiday(self, *args):
check_day = []
for data in args:
print(data)
if len(str(data).split('/')[0]) > 2:
check_day.append('invalido')
continue
if isinstance(data, date):
check_day.append(data)
elif type(data) == str:
if int(str(data).split('/')[1]) > 12 or len(str(data)) <= 5:
check_day.append('invalido')
else:
check_day.append(parse(data))
else:
pass
for day in check_day:
print(day)
if day == 'invalido':
return False
if day.weekday() == 6 or day.weekday() == 5:
return True
else:
return False
def add_holiday(self, *args):
for item in args:
if type(item) == str:
if int(str(item).split('/')[1]) > 12 or len(str(item).split('/')[0]) >= 3:
continue
if parse(str(item)).date() in self.datas:
pass
elif isinstance(item, date):
self.datas.append(item)
elif type(item) == str:
self.datas.append(parse(item))
if __name__ == '__main__':
dt1 = '15/15/2021'
dt2 = '120/3/2021'
dt3 = '15/03/2021'
dt4 = '15/05'
dt5 = '24/24/2021'
objeto = MyCalendar(dt1, dt2)
assert objeto.check_holiday(dt1) is False
assert objeto.check_holiday(dt2) is False
assert objeto.check_holiday(dt3) is False
assert objeto.check_holiday(dt4) is False
assert objeto.check_holiday(dt5) is False
|
[
"vagnerpelais@gmail.com"
] |
vagnerpelais@gmail.com
|
4d641b7b452b7e43378724205d8c5690b44cd11a
|
5b9c50baaa3182868c9f4a744a7361abe422a510
|
/tests/test_base.py
|
f7f5133f7951074f1287e3257df0b73b129805e8
|
[
"MIT"
] |
permissive
|
jasontangxf/geometer
|
3307889c087a1f498d58b5ae6bbf1b037119ca46
|
931df0aff6c680ad13a6c5989f2a89c276370c5e
|
refs/heads/master
| 2023-01-06T17:39:41.837342
| 2020-11-07T15:42:10
| 2020-11-07T15:42:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,251
|
py
|
import numpy as np
from geometer.base import TensorDiagram, Tensor, TensorCollection, LeviCivitaTensor, KroneckerDelta
class TestTensor:
def test_arithmetic(self):
a = Tensor(2, 3)
b = Tensor(5, 4)
# vector operations
assert a + b == Tensor(7, 7)
assert a - b == Tensor(-3, -1)
assert -a == Tensor(-2, -3)
# scalar operations
assert a + 6 == Tensor(8, 9)
assert a - 6 == Tensor(-4, -3)
assert a * 6 == Tensor(12, 18)
assert a / 6 == Tensor(1/3, 0.5)
def test_transpose(self):
a = Tensor([[1, 2],
[3, 4]], covariant=[0])
assert a.transpose() == Tensor([[1, 3], [2, 4]])
assert a.T._covariant_indices == {1}
assert a.T.T == a
def test_getitem(self):
a = Tensor([[1, 2],
[3, 4]], covariant=[0])
assert a[0, 1] == 2
assert a[None, 1] == [[3, 4]]
assert a[None, 1].tensor_shape == (0, 1)
assert a[::-1, 0] == [3, 1]
assert a[::-1, 0].tensor_shape == (1, 0)
def test_dtype(self):
a = Tensor(2, 3, dtype=np.float32)
assert a.dtype == np.float32
a = Tensor(2, 3, dtype=np.complex64)
assert a.dtype == np.complex64
class TestTensorCollection:
def test_init(self):
# empty list
a = TensorCollection([])
assert len(a) == 0
# numpy array
a = TensorCollection(np.ones((1, 2, 3)))
assert len(a) == 1
assert a.size == 2
# nested list of numbers
a = TensorCollection([[1, 2], [3, 4]])
assert len(a) == 2
assert a.size == 2
# nested tuple of numbers
a = TensorCollection(((1, 2), (3, 4)))
assert len(a) == 2
assert a.size == 2
# nested list of Tensor objects
a = TensorCollection([[Tensor(1, 2, 3), Tensor(3, 4, 5)]])
assert a.shape == (1, 2, 3)
assert len(a) == 1
assert a.size == 2
# object with __array__ function
class A:
def __array__(self):
return np.array([Tensor(1, 2), Tensor(3, 4)])
a = TensorCollection(A())
assert len(a) == 2
assert a.size == 2
def test_flat(self):
a = [Tensor([[1, 2], [3, 4]]), Tensor([[5, 6], [7, 8]])]
b = TensorCollection([a], tensor_rank=2)
assert list(b.flat) == a
def test_getitem(self):
a = Tensor([[1, 2],
[3, 4]])
b = Tensor([[5, 6],
[7, 8]])
c = TensorCollection([a, b])
assert c[0] == a
assert c[1] == b
assert list(c) == [a, b]
assert c[:, 1] == TensorCollection([Tensor([3, 4]), Tensor([7, 8])])
assert c[:, 0, 0] == [1, 5]
class TestTensorDiagram:
def test_add_edge(self):
a = Tensor([1, 0, 0, 0])
b = Tensor([[42, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], covariant=False)
diagram = TensorDiagram((a, b))
assert diagram.calculate() == Tensor([42, 0, 0, 0])
diagram.add_edge(a.copy(), b)
assert diagram.calculate() == 42
def test_tensor_product(self):
e1 = Tensor(1, 0)
e2 = Tensor(0, 1)
a = Tensor([0, 1],
[1, 0], covariant=[0])
b = Tensor([1, 0],
[0, 1], covariant=[0])
m = a.tensor_product(b)
e = e1.tensor_product(e2)
assert TensorDiagram((e, m), (e, m)).calculate() == (a * e1).tensor_product(b * e2)
d = TensorDiagram()
d.add_node(a)
d.add_node(b)
assert d.calculate() == a.tensor_product(b)
def test_epsilon_delta_rule(self):
e1 = LeviCivitaTensor(3, True)
e2 = LeviCivitaTensor(3, False)
d = KroneckerDelta(3)
d2 = d.tensor_product(d)
d1 = d2.transpose((0, 1))
diagram = TensorDiagram((e1, e2.transpose()))
assert diagram.calculate() == d1 - d2
def test_kronecker_delta(self):
d = KroneckerDelta(4, 3)
assert d.array.shape == (4,)*6
assert d.array[0, 1, 2, 0, 1, 2] == 1
assert d.array[0, 2, 1, 0, 1, 2] == -1
|
[
"jan.rv@t-online.de"
] |
jan.rv@t-online.de
|
1c10e66617f17ae42de12803b880e514daca1829
|
e4e44097320d056f3768eb3a53f28f4c19cdc7ce
|
/findSubstring.py
|
cb2251aa8f1dbb867af20c7a065e334f694cbfda
|
[] |
no_license
|
amisyy/leetcode
|
0640e009c02956778f402eb89b74c98c36882d44
|
ba8ab343a246aa3eead75a23dc69b5a76680d290
|
refs/heads/master
| 2021-06-03T06:27:38.216035
| 2020-11-08T06:59:40
| 2020-11-08T06:59:40
| 103,757,845
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,116
|
py
|
class Solution(object):
def findSubstring(self, s, words):
"""
:type s: str
:type words: List[str]
:rtype: List[int]
"""
word_length = len(words[0])
word_num = len(words)
length = len(s)
words_dict = {}
for i in words:
if i not in words_dict:
words_dict[i] = 1
else:
words_dict[i]+=1
res = []
for i in range(length -word_length*word_num +1 ):
dict_curr={}
j=0
while j < word_num:
word = s[i+j*word_length:i+(j+1)*word_length]
if word not in words_dict:
break
elif word not in dict_curr:
dict_curr[word] = 1
else:
dict_curr[word] +=1
if dict_curr[word] > words_dict[word]:
break
j +=1
if j==word_num:
res.append(i)
return res
u = Solution()
print(u.findSubstring("barfoothefoobarman",["foo","bar"]))
|
[
"amisyy@sina.cn"
] |
amisyy@sina.cn
|
4c10f5dbe66a1ecd6b2cb0e0d1cb6a3481ac2ca0
|
1b94c7cfd66804fe8d40b5def35e4b9b18d69ba2
|
/old_py2/controllers/apiai_controller.py
|
dfff3930d0c210a7d0d4eb8c2af95d15d9d7e374
|
[
"MIT"
] |
permissive
|
the-blue-alliance/the-blue-alliance
|
3dc210a9611ce9b240907ffd420f78040318dcdc
|
6d42f3cdb2f785d192f2871419e58aaae3445029
|
refs/heads/py3
| 2023-08-22T21:02:36.398100
| 2023-08-22T19:14:01
| 2023-08-22T19:14:01
| 888,427
| 344
| 263
|
MIT
| 2023-09-14T18:35:20
| 2010-09-04T20:34:11
|
HTML
|
UTF-8
|
Python
| false
| false
| 635
|
py
|
import json
from base_controller import LoggedInHandler
from helpers.apiai_helper import APIAIHelper
from models.sitevar import Sitevar
class APIAIHandler(LoggedInHandler):
def __init__(self, *args, **kw):
super(APIAIHandler, self).__init__(*args, **kw)
def post(self):
if self.request.headers.get('X-TBA-APIAI-Auth') != Sitevar.get_by_id('apiai.secrets').contents['key']:
return
request = json.loads(self.request.body)
self.response.headers['content-type'] = 'application/json; charset="utf-8"'
self.response.out.write(json.dumps(APIAIHelper.process_request(request)))
|
[
"noreply@github.com"
] |
the-blue-alliance.noreply@github.com
|
08273d87152e339e41af2407ff4bbad8cc28e79c
|
f2b91692a434ee79ff5d68ed3111d60d90315f00
|
/src/command_modules/azure-cli-servicebus/azure/cli/command_modules/servicebus/_validators.py
|
6a4509e9f662b17fe8494f89fce3441aa9719205
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
cal5barton/azure-cli
|
f883bc7d481b163d4c4af1fa154a990182e5de80
|
6ebc6f810f32b8fce30a360633a70fcfdea15e7b
|
refs/heads/dev
| 2023-05-24T18:12:36.151238
| 2018-07-12T16:16:29
| 2018-07-12T16:16:29
| 140,749,210
| 0
| 0
|
MIT
| 2023-05-15T18:58:31
| 2018-07-12T18:13:18
|
Python
|
UTF-8
|
Python
| false
| false
| 4,322
|
py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
# pylint: disable=unused-variable
import re
from datetime import timedelta
from isodate import parse_duration
from knack.util import CLIError
# PARAMETER VALIDATORS
# Type ISO 8061 duration
iso8601pattern = re.compile("^P(?!$)(\\d+Y)?(\\d+M)?(\\d+W)?(\\d+D)?(T(?=\\d)(\\d+H)?(\\d+M)?(\\d+.)?(\\d+S)?)?$")
timedeltapattern = re.compile("^\\d+:\\d+:\\d+$")
def _validate_lock_duration(namespace):
if namespace.lock_duration:
if iso8601pattern.match(namespace.lock_duration):
if parse_duration(namespace.lock_duration) > timedelta(days=0, minutes=6, seconds=0):
raise CLIError(
'--lock-duration Value Error : {0} value, The maximum value for LockDuration is 5 minutes; the default value is 1 minute.'.format(
namespace.lock_duration))
elif timedeltapattern.match(namespace.lock_duration):
day, miniute, seconds = namespace.lock_duration.split(":")
if int(day) > 0 or int(miniute) > 6:
raise CLIError(
'--lock-duration Value Error : {0} value, The maximum value for LockDuration is 5 minutes; the default value is 1 minute.'.format(
namespace.lock_duration))
else:
raise CLIError('--lock-duration Value Error : {0} value is not in ISO 8601 timespan / duration format. e.g.'
' PT10M for duration of 10 min or 00:10:00 for duration of 10 min'.format(namespace.lock_duration))
def _validate_default_message_time_to_live(namespace):
if namespace.default_message_time_to_live:
if not iso8601pattern.match(namespace.default_message_time_to_live) and not timedeltapattern.match(namespace.default_message_time_to_live):
raise CLIError('--default-message-time-to-live Value Error : {0} value is not in ISO 8601 timespan / duration format. e.g. PT10M for duration of 10 min or 00:10:00 for duration of 10 min'.format(namespace.default_message_time_to_live))
def _validate_duplicate_detection_history_time_window(namespace):
if namespace.duplicate_detection_history_time_window:
if iso8601pattern.match(namespace.duplicate_detection_history_time_window):
pass
elif timedeltapattern.match(namespace.duplicate_detection_history_time_window):
pass
else:
raise CLIError('--duplicate-detection-history-time-window Value Error : {0} value is not in ISO 8601 timespan / duration format. e.g. PT10M for duration of 10 min or 00:10:00 for duration of 10 min'.format(namespace.duplicate_detection_history_time_window))
def _validate_auto_delete_on_idle(namespace):
if namespace.auto_delete_on_idle:
if iso8601pattern.match(namespace.auto_delete_on_idle):
pass
elif timedeltapattern.match(namespace.auto_delete_on_idle):
pass
else:
raise CLIError('--auto-delete-on-idle Value Error : {0} value is not in ISO 8601 timespan / duration format. e.g. PT10M for duration of 10 min or 00:10:00 for duration of 10 min'.format(namespace.auto_delete_on_idle))
def validate_partner_namespace(cmd, namespace):
from azure.cli.core.commands.client_factory import get_subscription_id
from msrestazure.tools import is_valid_resource_id, resource_id
if namespace.partner_namespace:
if not is_valid_resource_id(namespace.partner_namespace):
namespace.partner_namespace = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.ServiceBus',
type='namespaces',
name=namespace.partner_namespace)
def validate_premiumsku_capacity(namespace):
if namespace.sku and namespace.sku != 'Premium' and namespace.capacity:
raise CLIError('--capacity - This property is only applicable to namespaces of Premium SKU')
|
[
"tjprescott@users.noreply.github.com"
] |
tjprescott@users.noreply.github.com
|
00cbe7556cb10b9d98cdd70f69f8f9392fef4b9a
|
325512acc574dffa50d3d7d9645391e2928df127
|
/main.py
|
137f8ee5729044c1f7a1566aec02584d702c168f
|
[] |
no_license
|
sunca7/ff
|
8366d4d6d7c7f22201ae9c324960a16dea045437
|
a6f43f55dc7ed1602908f58c3710cf0b1846b8d9
|
refs/heads/master
| 2022-12-01T02:57:12.815321
| 2020-08-04T14:35:41
| 2020-08-04T14:35:41
| 285,006,260
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,190
|
py
|
import os
import requests
from flask import Flask, render_template, request
from bs4 import BeautifulSoup
from save import save_to_file
LIMIT = 20
db = {}
level_four_db = [{'company': '삼성전자', 'company_url': '/item/main.nhn?code=005930'}, {'company': 'NAVER', 'company_url': '/item/main.nhn?code=035420'}, {'company': '셀트리온', 'company_url': '/item/main.nhn?code=068270'}, {'company': '현대모비스', 'company_url': '/item/main.nhn?code=012330'}, {'company': '엔씨소프트', 'company_url': '/item/main.nhn?code=036570'}, {'company': 'POSCO', 'company_url': '/item/main.nhn?code=005490'}, {'company': 'KB금융', 'company_url': '/item/main.nhn?code=105560'}, {'company': '신한지주', 'company_url': '/item/main.nhn?code=055550'}, {'company': 'KT&G', 'company_url': '/item/main.nhn?code=033780'}, {'company': '넷마블', 'company_url': '/item/main.nhn?code=251270'}, {'company': '삼성생명', 'company_url': '/item/main.nhn?code=032830'}, {'company': '하나금융지주', 'company_url': '/item/main.nhn?code=086790'}, {'company': '삼성화재', 'company_url': '/item/main.nhn?code=000810'}, {'company': '고려아연', 'company_url': '/item/main.nhn?code=010130'}, {'company': '우리금융지주', 'company_url': '/item/main.nhn?code=316140'}, {'company': '기업은행', 'company_url': '/item/main.nhn?code=024110'}, {'company': '미래에셋대우', 'company_url': '/item/main.nhn?code=006800'}, {'company': '포스코케미칼', 'company_url': '/item/main.nhn?code=003670'}, {'company': '현대글로비스', 'company_url': '/item/main.nhn?code=086280'}, {'company': '현대건설', 'company_url': '/item/main.nhn?code=000720'}, {'company': '유한양행', 'company_url': '/item/main.nhn?code=000100'}, {'company': 'DB손해보험', 'company_url': '/item/main.nhn?code=005830'}, {'company': '삼성카드', 'company_url': '/item/main.nhn?code=029780'}, {'company': '삼성증권', 'company_url': '/item/main.nhn?code=016360'}, {'company': 'NH투자증권', 'company_url': '/item/main.nhn?code=005940'}, {'company': '일진머티리얼즈', 'company_url': '/item/main.nhn?code=020150'}, {'company': '농심', 'company_url': '/item/main.nhn?code=004370'}, {'company': '메리츠증권', 'company_url': '/item/main.nhn?code=008560'}, {'company': '현대해상', 'company_url': '/item/main.nhn?code=001450'}, {'company': '제일기획', 'company_url': '/item/main.nhn?code=030000'}, {'company': '동서', 'company_url': '/item/main.nhn?code=026960'}, {'company': 'LS ELECTRIC', 'company_url': '/item/main.nhn?code=010120'}, {'company': 'BNK금융지주', 'company_url': '/item/main.nhn?code=138930'}, {'company': '한올바이오파마', 'company_url': '/item/main.nhn?code=009420'}, {'company': '종근당', 'company_url': '/item/main.nhn?code=185750'}, {'company': 'HDC현대산업개발', 'company_url': '/item/main.nhn?code=294870'}, {'company': 'DB하이텍', 'company_url': '/item/main.nhn?code=000990'}, {'company': '한전KPS', 'company_url': '/item/main.nhn?code=051600'}, {'company': '영원무역', 'company_url': '/item/main.nhn?code=111770'}, {'company': '한국테크놀로지그룹', 'company_url': '/item/main.nhn?code=000240'}, {'company': '이노션', 'company_url': '/item/main.nhn?code=214320'}, {'company': '영풍', 'company_url': '/item/main.nhn?code=000670'}, {'company': '쿠쿠홈시스', 'company_url': '/item/main.nhn?code=284740'}, {'company': '보령제약', 'company_url': '/item/main.nhn?code=003850'}, {'company': '휴켐스', 'company_url': '/item/main.nhn?code=069260'}, {'company': '빙그레', 'company_url': '/item/main.nhn?code=005180'}, {'company': '락앤락', 'company_url': '/item/main.nhn?code=115390'}, {'company': '쿠쿠홀딩스', 'company_url': '/item/main.nhn?code=192400'}, {'company': '세방전지', 'company_url': '/item/main.nhn?code=004490'}]
company_list_url = f"https://finance.naver.com/sise/entryJongmok.nhn?&page="
company_info_base_url = "https://finance.naver.com"
kosdaq_list_url = "https://finance.naver.com/sise/sise_market_sum.nhn?sosok=1&page="
app = Flask("ff")
# os.system('cls' if os.name=='nt' else 'clear')
def request_company_list():
company_list = []
# for nb in range(1,21):
for nb in range(1,21):
request_company_list = requests.get(f"{company_list_url}{nb}")
soup = BeautifulSoup(request_company_list.text, "html.parser")
company_table_list = soup.find_all("td", {"class":"ctg"})
for info in company_table_list:
company = info.find('a').text
company_url = info.find('a')["href"]
company_list.append({"company": company, "company_url": company_url})
return company_list
def extract_indiv_table(company):
request_company_info = requests.get(f"{company_info_base_url}{company.get('company_url')}")
soup = BeautifulSoup(request_company_info.text, "html.parser")
info_table = soup.find("table", {"class": "tb_type1_ifrs"})
info_table_row = info_table.find_all("tr")
return info_table_row
def level_one_extract(company):
info_table_row = extract_indiv_table(company)
net_income_ten = info_table_row[5].find_all("td")
for net_income in net_income_ten:
if "-" in net_income.text.strip():
return -1
else:
result = 1
return result
def level_one_company(company_list):
print("request level one")
level_one = []
# for company in company_list:
# for company in company_list[:1]:
for company in company_list:
one = level_one_extract(company)
if one == 1:
level_one.append(company)
print("level 1 len: ", len(level_one))
return level_one
def four_extract_company(company):
info_table_row = extract_indiv_table(company)
debt_ratio_ten = info_table_row[9].find_all("td")
for debt_ratio in debt_ratio_ten:
nbr = debt_ratio.text.strip().split('.')[0].replace(',','')
if nbr != '':
if int(nbr) > 100:
return -4
else:
result = 4
return result
def four_second_extract_company(company):
info_table_row = extract_indiv_table(company)
checking_ratio_ten = info_table_row[10].find_all("td")
for checking_ratio in checking_ratio_ten:
nbr = checking_ratio.text.strip().split('.')[0].replace(',','')
if nbr != '':
if int(nbr) < 100:
return -4
else:
result = 4
return result
def level_four_company(company_list):
print("request level four")
level_four = []
for company in company_list:
four = four_extract_company(company)
four = four_second_extract_company(company)
if four == 4:
level_four.append(company)
print("level 4 len: ", len(level_four))
return level_four
def six_extract_company(company):
pass
def request_kosdaq():
kosdaq_list = []
for nb in range(1,5):
print(f"{nb}")
request_kosdaq_list = requests.get(f"{kosdaq_list_url}{nb}")
soup = BeautifulSoup(request_kosdaq_list.text, "html.parser")
kosdaq_table_list = soup.find_all("tr")
for info in kosdaq_table_list[7:-1]:
if info.find("a"):
company = info.find('a').text
company_url = info.find('a')["href"]
kosdaq_list.append({"company": company, "company_url": company_url})
else :
continue
print(kosdaq_list)
return kosdaq_list
def ff_program(company_list):
level_one = level_one_company(company_list)
level_four = level_four_company(level_one)
return level_four
# @app.route("/")
# def kospi():
# print("Level four len", len(level_four_db))
# print(level_four_db)
# save_to_file(level_four_db)
# return render_template("index.html", level_four=level_four_db)
# def kosdaq():
# kosdaq_list = request_kosdaq()
# return kosdaq_list
@app.route("/")
def financial_freedom():
# kospi_list = request_company_list()
kosdaq_list = request_kosdaq()
kosdaq_level_four = ff_program(kosdaq_list)
print(kosdaq_level_four)
save_to_file(kosdaq_level_four)
# app.run(host="0.0.0.0")
financial_freedom()
# if "company" not in db:
# print("request company")
# db["company"] = request_company_list()
# company_list = db["company"]
# if "one" not in db:
# print("request level one")
# db["one"] = level_one_company(company_list)
# level_one = db["one"]
# print("Level one len ", len(level_one))
#
|
[
"kosah302@gmail.com"
] |
kosah302@gmail.com
|
9ec5875503577bf114e6521a6174ca229c968b95
|
c1e0874f55d05ee990ed2d637c2910701b32d246
|
/soft_uni_OOP/Defining Classes/lab/scope_mess_3.py
|
03d81f4774c92bdc435a7583da245e72d79f8461
|
[] |
no_license
|
borislavstoychev/Soft_Uni
|
5d047bef402c50215e0abc825476326889ffd0be
|
ccc0b2fb18f8ad6809b475eb20e82a9e4eb4b0b0
|
refs/heads/master
| 2023-05-11T12:27:08.672058
| 2021-05-28T18:00:10
| 2021-05-28T18:00:10
| 277,556,731
| 3
| 2
| null | 2021-02-11T19:57:37
| 2020-07-06T13:58:23
|
Python
|
UTF-8
|
Python
| false
| false
| 311
|
py
|
x = "global"
def outer():
x = "local"
def inner():
nonlocal x
x = "nonlocal"
print("inner:", x)
def change_global():
global x
x = "global: changed!"
print("outer:", x)
inner()
print("outer:", x)
change_global()
print(x)
outer()
print(x)
|
[
"stoy4ew@gmail.com"
] |
stoy4ew@gmail.com
|
8ff2bd1fe3bcae9eaddea886a981234c02e1ccc7
|
8faa47c6b03940bc382a654fcc5ac99babd99d4e
|
/auth.py
|
9cbb74114c71c49a43a0eb158b05868fef3f1d79
|
[] |
no_license
|
TobiasGrosch/sprint-velocity-planning
|
7262038cf2359f0d55834c9333386be1342f947c
|
e575416fcc3bb70ebe9d1c0702c1d2a6c4606bdd
|
refs/heads/master
| 2023-04-19T07:22:35.989809
| 2021-04-25T06:47:53
| 2021-04-25T06:47:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,050
|
py
|
import json
from flask import request, _request_ctx_stack, abort
from functools import wraps
from jose import jwt
from urllib.request import urlopen
AUTH0_DOMAIN = 'fsnd-groscht.eu.auth0.com'
ALGORITHMS = ['RS256']
API_AUDIENCE = 'sprint_vel'
## AuthError Exception
'''
AuthError Exception
A standardized way to communicate auth failure modes
'''
class AuthError(Exception):
def __init__(self, error, status_code):
self.error = error
self.status_code = status_code
## Auth Header
def get_token_auth_header():
"""Obtains the Access Token from the Authorization Header
"""
auth = request.headers.get('Authorization', None)
if not auth:
raise AuthError({
'code': 'authorization_header_missing',
'description': 'Authorization header is expected.'
}, 401)
parts = auth.split()
if parts[0].lower() != 'bearer':
raise AuthError({
'code': 'invalid_header',
'description': 'Authorization header must start with "Bearer".'
}, 401)
elif len(parts) == 1:
raise AuthError({
'code': 'invalid_header',
'description': 'Token not found.'
}, 401)
elif len(parts) > 2:
raise AuthError({
'code': 'invalid_header',
'description': 'Authorization header must be bearer token.'
}, 401)
token = parts[1]
return token
def check_permissions(permission, payload):
if 'permissions' not in payload:
raise AuthError({
'code': 'invalid_claims',
'description': 'Permissions not included in JWT.'
}, 400)
if permission not in payload['permissions']:
raise AuthError({
'code': 'unauthorized',
'description': 'Permission not found.'
}, 401)
return True
def verify_decode_jwt(token):
jsonurl = urlopen(f'https://{AUTH0_DOMAIN}/.well-known/jwks.json')
jwks = json.loads(jsonurl.read())
unverified_header = jwt.get_unverified_header(token)
rsa_key = {}
if 'kid' not in unverified_header:
raise AuthError({
'code': 'invalid_header',
'description': 'Authorization malformed.'
}, 401)
for key in jwks['keys']:
if key['kid'] == unverified_header['kid']:
rsa_key = {
'kty': key['kty'],
'kid': key['kid'],
'use': key['use'],
'n': key['n'],
'e': key['e']
}
if rsa_key:
try:
payload = jwt.decode(
token,
rsa_key,
algorithms=ALGORITHMS,
audience=API_AUDIENCE,
issuer='https://' + AUTH0_DOMAIN + '/'
)
return payload
except jwt.ExpiredSignatureError:
raise AuthError({
'code': 'token_expired',
'description': 'Token expired.'
}, 401)
except jwt.JWTClaimsError:
raise AuthError({
'code': 'invalid_claims',
'description': 'Incorrect claims. Please, check the audience and issuer.'
}, 401)
except Exception:
raise AuthError({
'code': 'invalid_header',
'description': 'Unable to parse authentication token.'
}, 400)
raise AuthError({
'code': 'invalid_header',
'description': 'Unable to find the appropriate key.'
}, 400)
def requires_auth(permission=''):
def requires_auth_decorator(f):
@wraps(f)
def wrapper(*args, **kwargs):
token = get_token_auth_header()
try:
payload = verify_decode_jwt(token)
except:
abort(401)
check_permissions(permission, payload)
return f(payload, *args, **kwargs)
return wrapper
return requires_auth_decorator
|
[
"ubuntu@ip-10-101-1-61.eu-central-1.compute.internal"
] |
ubuntu@ip-10-101-1-61.eu-central-1.compute.internal
|
2e93f6d19a1051930f841ffd22fefbc664870f58
|
3c2dd7932308cf47ca9910d963affa5a67beb97f
|
/model/callback.py
|
c2177b884353fae0e4f5603bcba99584c3450724
|
[] |
no_license
|
HaroldLiuJ/CGSum
|
cb65dc65a7c300f32412bf7f3fbfea4c07d2680c
|
ef372b0b126553d531fedb53f0a1a72b36a82b63
|
refs/heads/main
| 2023-06-06T10:17:54.917198
| 2021-06-25T14:09:23
| 2021-06-25T14:09:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,289
|
py
|
import os
import sys
import time
import numpy as np
import torch
from data_util.logging import logger
from data_util.utils import calc_running_avg_loss
from fastNLP.core.callback import Callback, EarlyStopError
class TrainCallback(Callback):
def __init__(self, config, patience=10, quit_all=True):
super().__init__()
self.config = config
self.patience = patience
self.wait = 0
self.running_avg_loss = 0
self.loss_update_every = []
if type(quit_all) != bool:
raise ValueError("In KeyBoardInterrupt, quit_all arguemnt must be a bool.")
self.quit_all = quit_all
def on_epoch_begin(self):
self.epoch_start_time = time.time()
if self.epoch == self.config.coverage_at:
self.config.is_coverage = True
if self.config.is_coverage:
self.trainer.do_valid = True
else:
self.trainer.do_valid = False
def on_backward_begin(self, loss):
self.loss_update_every.append(loss.item())
if isinstance(loss, tuple) and not np.isfinite(loss[0].item()):
logger.error("train Loss is not finite. Stopping.")
logger.info(loss[0].item())
for name, param in self.model.named_parameters():
if param.requires_grad:
logger.info(name)
logger.info(param.grad.data.sum())
raise Exception("train Loss is not finite. Stopping.")
if self.step % self.update_every == 0:
assert len(self.loss_update_every) == self.update_every
loss_batch = sum(self.loss_update_every)
self.loss_update_every = []
# report the loss
if self.step < 10 or self.step % 1000 == 0:
logger.info("|epoch: %d step: %d log_loss: %.4f |"
% (self.epoch, self.step / self.update_every, loss_batch))
self.running_avg_loss = calc_running_avg_loss(loss_batch, self.running_avg_loss,
self.step / self.update_every)
def on_backward_end(self):
if self.config.max_grad_norm:
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config.max_grad_norm)
def on_epoch_end(self):
logger.info(
' | end of epoch {:3d} | time: {:5.2f}s | '.format(self.epoch, (time.time() - self.epoch_start_time)))
def on_valid_begin(self):
self.valid_start_time = time.time()
def on_valid_end(self, eval_result, metric_key, optimizer, is_better_eval):
logger.info(
' | end of valid {:3d} | time: {:5.2f}s | '.format(self.epoch, (time.time() - self.valid_start_time)))
# save the better checkpoint
if is_better_eval:
logger.info("got better results on dev, save checkpoint.. ")
model_save_path = os.path.join(self.config.model_path,
f'CGSum_{self.config.setting}_{self.config.n_hop}hopNbrs.pt')
checkpoint = {"state_dict": self.model.state_dict(), "config": self.model.config.__dict__}
torch.save(checkpoint, model_save_path)
# early stop
if not is_better_eval:
if self.wait == self.patience:
raise EarlyStopError("Early stopping raised.")
else:
self.wait += 1
else:
self.wait = 0
def on_exception(self, exception):
if isinstance(exception, KeyboardInterrupt):
if self.quit_all is True:
sys.exit(0)
else:
pass
else:
raise exception
class LRDecayCallback(Callback):
def __init__(self, parameters, decay_rate=1e-3, steps=100):
super().__init__()
self.paras = parameters
self.decay_rate = decay_rate
self.steps = steps
def on_step_end(self):
if self.step % self.update_every == 0:
step = self.step // self.update_every
if step % self.steps == 0:
for para in self.paras:
para['lr'] = para['lr'] * (1 - self.decay_rate)
|
[
"cxan@ChenxindeMacBook-Pro.local"
] |
cxan@ChenxindeMacBook-Pro.local
|
490df8c8807c725fdf915ccba2ff1496bd0ac937
|
60cb975f3e0251c73c457271bce8a7b2036e422b
|
/studysrc/mytest/websppider/transtest.py
|
23c308fa3b1c83bba1c6cd379e0c29e746a2f19d
|
[] |
no_license
|
49257620/reboot
|
0a2341f23bc1a6f3ae47b59f772919228c623544
|
86b348228d1a25d78c45b0e9022d7c773544373b
|
refs/heads/master
| 2018-11-17T19:19:58.969710
| 2018-09-25T03:15:57
| 2018-09-25T03:15:57
| 125,727,532
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 976
|
py
|
# encoding: utf-8
# Author: LW
import urllib.request
import urllib.parse
import time
import random
import hashlib
content = 'what fuck'
url = 'http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule'
data = {}
'''
1523493384696
1523493371204
351ac046404e1bbcb9442615f964a96d
cb2731255a15489013919b3788953bdc
'''
u = 'fanyideskweb'
d = content
f = str(int(time.time()*1000) + random.randint(1,10))
c = 'ebSeFb%=XZ%T[KZ)c(sy!'
sign = hashlib.md5((u + d + f + c).encode('utf-8')).hexdigest()
print(f)
print(sign)
data['i']: content
data['from']: 'AUTO'
data['to']: 'AUTO'
data['smartresult']: 'dict'
data['client']: 'fanyideskweb'
data['salt'] = f
data['sign'] = sign
data['doctype']: 'json'
data['version']: '2.1'
data['keyfrom']: 'fanyi.web'
data['action']: 'FY_BY_CLICKBUTTION'
data['typoResult']: 'false'
data = urllib.parse.urlencode(data).encode('utf-8')
response = urllib.request.urlopen(url, data)
html = response.read().decode('utf-8')
print(html)
|
[
"49257620@qq.com"
] |
49257620@qq.com
|
8b37209b33d201b789d2658845aa87843ef7a8e0
|
db144fdc9a1948cce066bed20912c32e1a18a8aa
|
/accounts/views.py
|
49c0aa25bf7c13a2faa3ed61bf4acc3c6a75f458
|
[] |
no_license
|
masato932/django-blog3
|
cd01101cbffdbaa33d2cb9bf696e5a5cdf8cd6fa
|
769068ba356cf8e0cc0bbde76e82e116e58b8bab
|
refs/heads/main
| 2023-05-13T20:14:43.706480
| 2021-06-05T14:03:13
| 2021-06-05T14:03:13
| 365,480,981
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 484
|
py
|
from django.shortcuts import render, redirect
from allauth.account import views
class LoginView(views.LoginView):
template_name = 'accounts/login.html'
class LogoutView(views.LogoutView):
template_name = 'accounts/logout.html'
def post(self, *args, **kwargs):
if self.request.user.is_authenticated:
self.logout()
return redirect('/')
class SignupView(views.SignupView):
template_name = 'accounts/signup.html'
# Create your views here.
|
[
"masatowada66@gmail.com"
] |
masatowada66@gmail.com
|
9ed2c8e05d8b011321f54977f2d63a7de27935b1
|
e93117371d0c5c6b3eb0b177fc4b2acc8f9524de
|
/TestFiles/test_cp.py
|
8d5a4bfb1545f4f8b2be1b6b96f89b41a25a789c
|
[] |
no_license
|
marthinwurer/TournamentRecorder
|
94dfc8af28455e3844628616cef45d1e3a3fdd6c
|
87f04c4caa68d12935e339d253525e3364ab8f68
|
refs/heads/master
| 2021-03-24T09:11:44.460711
| 2016-11-29T04:47:44
| 2016-11-29T04:47:44
| 67,151,794
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 998
|
py
|
"""
This file tests the create player (cp) method of the API
Author: TangentTally
"""
import unittest
import sys
sys.path.insert(0, "../src")
import tr_api
class TestAp(unittest.TestCase):
def setUp(self):
tr_api.createPlayer(1, 'Evan')
self.topdict = tr_api.listPlayers()
self.topdict2 = tr_api.createPlayer(1, "Fail2")
def tearDown(self):
tr_api.createPlayer(2, 'Ben')
tr_api.createPlayer(3, 'Will')
tr_api.createPlayer(4, 'Jon')
def test_add_player(self):
self.assertEqual(len(self.topdict.get('rows')), 1)
def test_new_player_id(self):
self.assertEqual(self.topdict.get('rows')[0].get('id'), 1)
def test_new_player_name(self):
self.assertEqual(self.topdict.get('rows')[0].get('name'), 'Evan')
def test_fail_cp_same_id(self):
self.assertFalse(self.topdict2.get('outcome'))
def test_fail_cp_error_message(self):
self.assertEqual(self.topdict2.get('reason'), 'DCI Exists')
|
[
"evanditto9@gmail.com"
] |
evanditto9@gmail.com
|
69f95154a6b1698708090146d7eafde38aea3f17
|
955781aa0539cb4b61c189ad86d61f332c60f307
|
/project/rmethods_rec_train.py
|
bfe1b663e861095bfec55ea328a660409faea4a1
|
[
"Apache-2.0"
] |
permissive
|
LARC-CMU-SMU/coleridge-rich-context-larc
|
26039a436555fb0ab0d38f4af442f389f32da5ca
|
a0b6cba59b843bbaf98cdcb5e661b1c524a79db0
|
refs/heads/master
| 2020-04-08T18:45:30.960621
| 2019-06-15T10:02:11
| 2019-06-15T10:02:11
| 159,623,704
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,811
|
py
|
import argparse
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.utils import shuffle
from sklearn.externals import joblib
from rcc_conf import RAND_SEED
from rcc_utils import json_from_file
class RMethodContextExtractor(BaseEstimator, TransformerMixin):
"""Extract title and contents features.
"""
def fit(self, x, y=None):
return self
def transform(self, data):
# construct object dtype array with two columns
# first column = 'title' and second column = 'contents'
features = np.empty(shape=(len(data), 2), dtype=object)
for i, d in enumerate(data):
features[i, 0] = d['title']
features[i, 1] = d['contents']
return features
def _generate_train_model(data_train, labels_train,
output_file):
print('Training research method model for recommendation...')
features = ColumnTransformer(
[
('title', TfidfVectorizer(ngram_range=(1, 2)), 0),
('contents', TfidfVectorizer(ngram_range=(1, 1)), 1),
],
transformer_weights={
'title': 1.0,
'contents': 1.0
}
)
sgd = SGDClassifier(loss='log', penalty='l2',
alpha=1e-4,
random_state=RAND_SEED,
max_iter=2000, tol=1e-3)
pipeline = Pipeline([
('feature_set_extractor', RMethodContextExtractor()),
('union', features),
('clf', sgd)])
pipeline.fit(data_train, labels_train)
joblib.dump(pipeline, output_file, compress='zlib')
print('Model file {} saved.'.format(output_file))
def main(args):
rmethod_ctx_train = json_from_file(args.input)
rmethod_ctx_train = [d for d in rmethod_ctx_train
if d['title'] is not None]
labels_train = [d['method'] for d in rmethod_ctx_train]
rmethod_ctx_train, labels_train = shuffle(rmethod_ctx_train, labels_train,
random_state=RAND_SEED)
_generate_train_model(rmethod_ctx_train, labels_train, args.output)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train classifier model for \
research method recommendation.')
parser.add_argument('--input', type=str, required=True,
help='Filename of input dataset to train the models.')
parser.add_argument('--output', type=str, required=True,
help='Filename of model output')
args = parser.parse_args()
main(args)
|
[
"philipskokoh@gmail.com"
] |
philipskokoh@gmail.com
|
8c8a966e406d530b9ab30c7abb9645b76d1a5898
|
bf833d3048f1eabc3e47101412ac77d14a43f332
|
/src/augment_me.py
|
e546249219c040609b22161b7fa6182efc20e5ce
|
[] |
no_license
|
DavidDavidsonDK/Person-Detection
|
c6c177ab0ddd42320741796edc372e4a5f42d11a
|
718d16e8cd1f5b4e876951365c75ee88d752c308
|
refs/heads/master
| 2020-04-11T22:34:42.747205
| 2018-12-29T07:15:33
| 2018-12-29T07:15:33
| 162,140,905
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,962
|
py
|
import numpy as np
import cv2
from ano_parser import PascalVocWriter,PascalVocReader
from plot import draw_rect
from PIL import Image
import matplotlib.pyplot as plt
import math
from tqdm import tqdm
from rotation import Rotation
from random_translation import Translate
from random_scale import Scale
from horizontal_flip import HorizontalFlip
import os
class Augmentation(object):
def __init__(self,rotation=True,translate=True,scale=True,horizontal_flip=True,**kwargs):
self.augmentators = {}
if rotation:
ag = kwargs['ag'] if 'ag' in kwargs else 30
self.rotation = Rotation(ag)
self.augmentators['rotation'] = self.rotation
if translate:
tr = kwargs['tr'] if 'tr' in kwargs else 0.2
self.translate = Translate(tr)
self.augmentators['translate'] = self.translate
if scale:
sc_lower = kwargs['sc_lower'] if 'sc_lower' in kwargs else 0.2
sc_upper = kwargs['sc_upper'] if 'sc_upper' in kwargs else 0.2
self.scale = Scale((sc_lower,sc_upper))
self.augmentators['scale'] = self.scale
if horizontal_flip:
self.horizontal_flip = HorizontalFlip()
self.augmentators['horizontal_flip'] = self.horizontal_flip
def __augment(self,im_path,im_name,ano_reader,dest_path):
im = np.array(Image.open(im_path+im_name+'.jpg'), dtype=np.uint8)
boxes = np.array([[shape[1][0][0],shape[1][0][1], shape[1][1][0],shape[1][1][1]] for shape in ano_reader.getShapes()])
d_flags = [int(shape[4]) for shape in ano_reader.getShapes()]
i = 0
for key,transformer in self.augmentators.items():
name_like = dest_path + im_name + '_'+key
ano_writer = PascalVocWriter('augmented', im_name + '_'+key+'.jpg', im.shape,localImgPath=name_like+'.jpg')
a_im,a_boxes = transformer(im,boxes.astype(np.float64))
for box in a_boxes:
ano_writer.addBndBox(math.ceil(box[0]),math.ceil(box[1]),math.ceil(box[2]),math.ceil(box[3]),'person',d_flags[i]) #the last 0 means dificult
i+=1
ano_writer.save(name_like+'.xml')
cv2.imwrite(name_like+'.jpg',a_im)
i = 0
def augment(self, file_path='../data/raw/citycams_1/', dest_path='../data/augmented/'):
for img_or_xml in tqdm(os.listdir(file_path)):
if img_or_xml.endswith('.jpg'):
main_part_of_name = img_or_xml.split('.')[0]
ano_reader = PascalVocReader(file_path+main_part_of_name+'.xml')
self.__augment(file_path,main_part_of_name,ano_reader,dest_path)
if __name__ == '__main__':
print('Augmantation start...')
aug = Augmentation(rotation=True, translate=True, scale=True, horizontal_flip=True,ag = 20,tr=0.2,sc_lower=0.2,sc_upper=0.6)
aug.augment()
print('Augmantation finish')
|
[
"d.kprogrammer0.0@gmail.com"
] |
d.kprogrammer0.0@gmail.com
|
e5be18291d8f73243a4c9f25f3a48e3ce073b814
|
a7e40fc92f2681beef1c41afb0e348a910d0dc06
|
/misc_tool/fixfix.py
|
07be6bea5d3f88e9f20eff4f8610623fda11e076
|
[] |
no_license
|
yacheng1127/YW-Python
|
ae0e7480859e21c6aaab20a166561c818387ec44
|
e88cfd202c7449a23dd8bb8d659bbdc0df1a1ff9
|
refs/heads/package
| 2020-12-24T17:26:23.905392
| 2013-09-26T13:41:49
| 2013-09-26T13:41:49
| 7,425,035
| 0
| 1
| null | 2013-05-14T13:04:40
| 2013-01-03T15:36:42
|
Python
|
UTF-8
|
Python
| false
| false
| 22,076
|
py
|
################################################################################################
## fixfix: post-processes drifter tracks
## xiuling.wu
## August 9, 2011
##
## Jim Manning modifications
## October 2011
## March 2012
## Nov 2012 trouble with ginput causing segfault
## STEPS ARE AS FOLLOWS:
# - load all the ascii data file
# - for each drifter it conducts 4 basic steps
# - eliminate repeat times
# - calculate forward and backward differences (velocity) and eliminate bad points
# - writes meta data to log file
# - check for drogue loss
# - clean plots of track (pth) and velocity uv_id_final.png)
# - generates oracle ready ascii file
##################################################################################################
import sys
sys.path.append("/home3/ocn/jmanning/py/jmanning/whython6/")
import csv
from conversions import ll2uv #To convert from longitude/latitude to unit vectors
import numpy as np
import matplotlib as mpl
import matplotlib.mlab as ml
#part1
#import scipy
#from scipy import *
#import pylab
from pylab import *
import matplotlib.pyplot as plt
#import basemap
from matplotlib.dates import num2date,date2num, DateFormatter
import math
### HARD CODE ################################################################################
critfactor=8 #multipe of the mean velcity to discard
minla=20;maxla=48;minlo=-150;maxlo=-20 # delimiting data to make it easier
bathy=True # set to "True" if isobaths are wanted
fid=open("/home3/ocn/jmanning/drift/drift.log","a")#permanent log file that is appended to../home3/ocn/jmanning/drift/drift.log
# load /data5/jmanning/drift/massdmf/withtemp_2009.dat ##ids with temperature
#list(set(line.strip() for line in open('X:/drift/bowdoin/2009/withtemp_2009.dat'))) # ids with temperature
#fileformat='bowdoin'; # format of temperature data (sometimes "emolt") if using minilog/dat files
#wt='withtemp_2009'
direcin="/net/nwebserver/drifter/" # directory where the final plots are stored
#direcout='/net/data5/jmanning/drift/umassb/2012' #'/data5/jmanning/drift/css/2011'
#fn='drift_umb_2012_1.dat'
direcout='/net/data5/jmanning/drift/kocik/2012' #'/data5/jmanning/drift/css/2011'
fn='drift_cfcc_2012_1.dat'
fid.write('\n'+'#'*40+' below is '+str(fn)+' log '+'#'*40+'\n')
depcont=[-10] # depth contour in final plot (apparently not used in 5/2012)
year=int(2012)
strattle=0 # this will eventually flag to "1" if yeardays change by more than 365
### END HARDCODES ################################################
#raw data loaded
idss,yeardays_all,lat_all,lon_all,day_all,hours_all,minutes_all,depths,temps=[],[],[],[],[],[],[],[],[]
csv_reader=csv.reader(open(direcin+fn,"r"))
for line in (x for x in csv_reader if x[0][0] !='%'): # if the first line is comment line, skip
# print float(line[0].split()[8])
if float(line[0].split()[8])<maxla and float(line[0].split()[8])>minla and float(line[0].split()[7])>minlo and float(line[0].split()[7])<maxlo:
idss.append(line[0].split()[0])
yeardays_all.append(line[0].split()[6])
lat_all.append(line[0].split()[8])
lon_all.append(line[0].split()[7])
day_all.append(line[0].split()[3])
hours_all.append(line[0].split()[4])
minutes_all.append(line[0].split()[5])
depths.append(line[0].split()[9])
temps.append(line[0].split()[10])
# get ids
id=list(set(idss))
# convert string to float
yeardays_all=[float(i)+1 for i in yeardays_all]# in python num2date(), less one day than matlab, so add 1 here
lat_all=[float(i) for i in lat_all]
lon_all=[float(i) for i in lon_all]
#days_all=[float(i) for i in days_all]
#ids=[float(i) for i in ids]
days_all,ids=[],[]
for i in range(len(day_all)):
days_all.append(int(float(day_all[i])))
for i in range(len(id)):
ids.append(int(float(id[i])))
fido=open(direcout+'/prep_for_oracle_'+fn[6:],'w')
ids=np.sort(ids)
#ids=[110410712]#,11041073]
for k in range(len(ids)): #where "ids" is a list of distinct ids and int
# latitude, longitude, time
strattle=0
lat,lon,time,yeardays,depth,temp=[],[],[],[],[],[]
for i in range(len(idss)):
if int(float(idss[i]))==ids[k]:
lat.append(lat_all[i])
lon.append(lon_all[i])
if (i>1)&(strattle==0):# here's where we account for a new year
if yeardays_all[i]-yeardays_all[i-1]<-200:
year=year+1
print 'incremented year to '+str(year)
strattle=1
yeardays.append(yeardays_all[i])
#time.append(date2num(num2date(yeardays_all[i]).replace(year=year).replace(day=days_all[i])))
time.append(date2num(num2date(yeardays_all[i]).replace(year=year)))
depth.append(depths[i])
temp.append(temps[i])
#print time
print "there are ", len(lat), " fixes for id =",ids[k]
print "Note: Check to see if any of these already exist in database before loading"
# STEP 1a: check to make sure time is monotonic
#if len(find(diff(time)<=0))>0:
# plot(time)
# show()
#raw_input('Trouble with time not increasing press return to continue')
# close()
# STEP 1b: check for repeat times
###### while time[i]==time[i-1], get the del_same_time_index ########
del_same_time_index=[]
for i in range(1,len(time)):
if int(time[i-1])==int(time[i]) and num2date(time[i-1]).hour== num2date(time[i]).hour and num2date(time[i-1]).minute== num2date(time[i]).minute:
del_same_time_index.append(i)
del_same_time_index.reverse()
if del_same_time_index==[]:
print "there is no same time."
else:
print str(len(del_same_time_index))+' points deleted with the same times'
index=range(len(time))
for i in del_same_time_index:
del lat[i],lon[i],time[i],yeardays[i],depth[i],temp[i]
# STEP 2a:
############ calculate forward and backward velocities of the raw data ##########################
forward_u,forward_v,forward_spd,jdn=ll2uv(time,lat,lon)# was yeardays but now uses "time" as of 3/29/2012
backward_u,backward_v,backward_spd,backward_jdn=ll2uv(time[::-1],lat[::-1],lon[::-1])
## calculate resultants
id_fs=list(np.where(np.array(forward_spd)<500)[0])
id_bs=list(np.where(np.array(backward_spd)<500)[0])
idc=[val for val in id_fs if val in id_bs]
jdraw,spdraw=[],[]
for i in idc:
jdraw.append(jdn[i])
spdraw.append(forward_spd[i])
########### plot the velocities ###################################################
## def plot_speed(time,speed):
## #fig=plt.figure()
## ax = fig.add_subplot(111) #to divide the fig into some area and (line row chose)
## plt.title('Difter#'+str(ids[k]))
## jd=[]
## for i in time:
## jd.append(i)
## plt.plot(jd,speed,'b-')
#locator = mpl.dates.AutoDateLocator()
#ax.xaxis.set_major_locator(locator)
## if len(jd)<100:
## else:
## monthsFmt = DateFormatter('%b/%d')
## ax.set_ylabel('cm/s')
#ax.xaxis.set_major_formatter(monthsFmt)
## ax.set_xlabel(str(year),fontsize=17)
## plt.grid()
## fig=plt.figure()
## plot_speed(jdraw,spdraw)## plot speed
## plt.show()
## plt.close()
#######################################################################################
# calculate a reasonable criteria for this drifter
crit=np.mean([abs(i) for i in forward_spd])*critfactor
print "Velocity criteria set to ", str(critfactor),' times the mean or ',str(crit),' cm/s'
# check for drifter going aground (ie very low velocity)
idlow=list(np.where(np.array(spdraw)<float(np.mean([abs(i) for i in forward_spd]))/100)[0])
# if idlow is not empty, add the comments in fid file
if idlow<>[]:
for i in range(len(idlow)):
print 'WARNING: Drifter ',str(ids[k]),' may be hung up on gear or aground on ',str(idlow[i]),' where velocity is < 1# mean'
#fid.write(str(ids[k]).rjust(10)+' apparently hung-up on '+str(idlow[i])+'\n')
idlow_print0=str(sorted(idlow))
idlow_print1=idlow_print0.replace(', ',' ')
tempochunk0=str(ids[k]).rjust(10)+' apparently hung-up on '+str(idlow_print1)+'\n'#'from'+str(idlow[0])+'to'+str(idlow[-1])+'\n'
else:
tempochunk0='There is no point hung up\n'
#### find bad velocities where criteria was just calculated
idbadf=list(np.where(abs(np.array(forward_spd))>crit)[0])
idbadb=list(np.where(abs(np.array(backward_spd))>crit)[0])
#if it is the 2nd time/point in the bad forward velocity (fu) that caused the problem
# then the 2nd time/point associated with the bad backward velocity should match
timeb=time[::-1] # backwards time vector
badtime=list(set([time[i+1] for i in idbadf]).intersection(set([timeb[i+1] for i in idbadb])))
print "%10.3f percent bad velocities deleted according to velocity criteria" % float(len(badtime)/float(len(lat))*100.)
index_badtime=[]# find near the badtime points
for i in badtime:
index_badtime.append(int(np.interp(i,time,range(len(time)))))
if index_badtime<>[]:
index_badtime.reverse()
for i in index_badtime:
index_near_badtimes=[]
if i-5<0:
ra=range(0,i+5)
elif i+5>len(lat):
ra=range(i-5,len(lat)-1)
else:
ra=range(i-5,i+5)
for m in ra:
index_near_badtimes.append(m)
plot_badtime=list(set(index_near_badtimes))
#plot the bad time data and near the bad time data's points
#plt.plot([lon[l] for l in plot_badtime],[lat[l] for l in plot_badtime],marker='.',)
#plt.plot([lon[l] for l in index_badtime],[lat[l] for l in index_badtime],marker='o',markerfacecolor='r',linestyle='None')
fig=plt.figure()
plt.plot([lon[l] for l in plot_badtime],[lat[l] for l in plot_badtime],marker='.',)
plt.plot(lon[i],lat[i],marker='o',markerfacecolor='r',linestyle='None')
plt.show()
#plt.close()
del_or_not=raw_input('Delete? (y/n or 1 for the end point): ')
if del_or_not=='y':
del time[i],lat[i],lon[i],yeardays[i],depth[i],temp[i]
elif del_or_not=='1':
plt.plot(lon[i-1],lat[i-1],marker='o',markerfacecolor='r',linestyle='None')
plt.show()
raw_input('How is that? press return')
del time[i-1],lat[i-1],lon[i-1],yeardays[i-1],depth[i-1],temp[i-1]
plt.close()
plt.close()
# STEP 3:
# delelte values bad due to objective criteria
#index_badtime.reverse()
#for i in index_badtime:
# if lat[i]!=999:
# del time[i],lat[i],lon[i],yeardays[i],depth[i],temp[i]
#print str(float(len(badtime))/len(time)*100),'# editted due to bad velocities > criteria'
idgood=len(lat)
###############################################################################################
# Step 4a:
# calculate forward velocities of the automatically editted data
fu,fv,spd1,jd1=ll2uv(time,lat,lon)
fig=plt.figure()
#plot_speed(jd1,spd1)
plt.plot(jd1,spd1)
plt.plot(jd1,spd1,marker="o",markerfacecolor="r",linestyle='None')
plt.show()
print 'click on any obviously bad points and then press the enter key.'
badpoints=ginput(n=0)
print badpoints#,timeout=10)
#badpoints=ginput(0,timeout=10,mouse_stop=3)
#badpoints=[]
#close()
# Step 4b:
# eliminate those points clicked as bads
# find badpoints index in yeardays
index_badpoints=[]
badpoints_num=len(badpoints)
for i in range(len(badpoints)):
index_badpoints.append(int(np.interp(badpoints[i][0],jd1,range(len(jd1)))))
print index_badpoints
index_badpoints=list(set(index_badpoints))
print "%10.2f percent bad velocities deleted according to manual clicks on velocity" % float(float(badpoints_num)/len(lat)*100.)
for i in sorted(index_badpoints)[::-1]:
del time[i], lat[i], lon[i], yeardays[i],depth[i],temp[i]
#plt.close()
plot_again=raw_input("Do you want to replot the figure after delete the bad points?(y/n)")
#plot_again='y'
if plot_again=="y" or plot_again=="Y" or plot_again=="yes":
#plt.close('all')
fig=plt.figure()
fu2,fv2,spd2,jd2=ll2uv(time,lat,lon)
#plot_speed(jd2,spd2)
plt.plot(jd2,spd2,'mo-',markersize=5)#marker='o',markerfacecolor="r",linestyle='None')
plt.show()
#plt.close()
#print 'pausing 10 seconds'
#sleep(10)
# if there are a list of bad points, click the first point and the last point, then delete between them
del_between=raw_input('Do you want to delete all the points between two points? input "N" or "Y"' )
#del_between='N'
if del_between=="N" or del_between=="n":
print "You have choosen NOT to delete all the points between two points."
if del_between=="Y" or del_between=="y" :
print "Please click the first bad point and the last bad point to choose the range of the bad points"
between_badpoints=ginput(n=0)
print between_badpoints#,timeout=0)#,mouse_stop=2)
index_between_badpoints=[]
for i in range(len(between_badpoints)):
index_between_badpoints.append(int(np.interp(between_badpoints[i][0],jd2,range(len(jd2)))))
print index_between_badpoints
index_betweens=[]
for i in range(sorted(index_between_badpoints)[0],sorted(index_between_badpoints)[1]+1):
index_betweens.append(i)
for i in index_betweens[::-1]:
del lat[i],lon[i],time[i],yeardays[i],depth[i],temp[i]
del_between_badpoints=sorted(index_between_badpoints)[1]-sorted(index_between_badpoints)[0]+1
badpoints_num=len(badpoints)+del_between_badpoints
print "%10.2f percent editted due to bad velocities from manual clicks between two points" % float(float(badpoints_num)/len(time)*100.)
if ids[k]==1174306915 or ids[k]==1174306911:
del time[-1], lat[-1], lon[-1], yeardays[-1],depth[-1],temp[-1]
fig=plt.figure()
fu3,fv3,spd3,jd3=ll2uv(time,lat,lon)
#plot_speed(jd3,spd3)
plt.plot(jd3,spd3,'bo-')
plt.show()
#step 5a:
#manually delete points based on track
##############################################################################################################
fig=plt.figure()
#plt.figure(2)
#plt.plot(lon,lat,'k-')
plt.plot(lon,lat,'ro-')
plt.show()
print 'click on any obviously bad points and then press the enter key on the track.'
bad=ginput(n=0)
print bad
badplotpts=[] #index of points that are found to be near the same index of x & y
if len(bad)>0:
for kbad in range(len(bad)):
idxbad=ml.find(abs(lon-bad[kbad][0])==min(abs(lon-bad[kbad][0])))
idybad=ml.find(abs(lat-bad[kbad][1])==min(abs(lat-bad[kbad][1])))
print idxbad,idybad
if idxbad==idybad:
print lat[int(idxbad)],lon[int(idxbad)],' is bad'
badplotpts.append(int(idxbad))
for kk in range(len(badplotpts)):
plt.plot(lon[badplotpts[kk]],lat[badplotpts[kk]],'bo')
# #thismanager = get_current_fig_manager()
# #thismanager.window.SetPosition((1000,500))
plt.show()
for i in sorted(badplotpts)[::-1]:
del time[i], lat[i], lon[i], yeardays[i],depth[i],temp[i]
#fig=plt.figure()
plt.plot(lon,lat,'yo-')
plt.show()
raw_input(str(len(badplotpts))+' deleted from manual click on track. Press return to continue')
#plt.close()
# write to log file if some data was editted
if badpoints_num>0:
tempochunk1=(str(ids[k]).rjust(10)+' '+ str(crit).rjust(10)+' '+ str(badpoints_num).rjust(10)+' '+
str(idgood).rjust(10)+" "+str(math.floor(time[-1]-time[0])).rjust(10)+" manual editted uv plot\n")
else:
tempochunk1='There is no bad point delete manual.\n'
if len(badtime)>0:
tempochunk2=(str(ids[k]).rjust(10)+' '+ str(crit).rjust(10)+' '+ str(len(badtime)).rjust(10)+' '+
str(idgood).rjust(10)+" "+str(math.floor(time[-1]-time[0])).rjust(10)+" objectively editted\n")
else:
tempochunk2='There is no bad velocities deleted according to velocity criteria.\n'
if len(badplotpts)>0:
tempochunk3=(str(ids[k]).rjust(10)+' '+ str(crit).rjust(10)+' '+ str(len(badplotpts)).rjust(10)+' '+
str(idgood).rjust(10)+" "+str(math.floor(time[-1]-time[0])).rjust(10)+" manually editted track points\n")
else:
tempochunk3='There is no bad point delete manual on track.\n'
# clean velocities w/out bad points
[u2,v2,spd2,jd2]=ll2uv(time,lat,lon) #was "yeardays" until 3/2012 to deal with strattling new year
#plot time, lat,lon
fig=plt.figure(figsize=(10,8))
ax = fig.add_subplot(111)
print 'calling basemap_usgs '
#print min(lat),min(lon),max(lat),max(lon)
#if max(lon)-min(lon)>4.0:
# basemap.basemap(lat,lon)#,(float(min(max(lat)-min(lat),max(lon)-min(lon)))+1.0)/5*4)
#else:
# basemap.basemap_detail(lat,lon,bathy, False,float(min(max(lat)-min(lat),max(lon)-min(lon)))/5*4)
#basemap.basemap_usgs(lat,lon,False)#,depcont)
print 'past basemap'
ax.plot(lon,lat,marker=".",markerfacecolor='r',markersize=10)
points_num=10
ax.set_ylabel('latitude')
ax.set_xlabel("longitude")
#ax.set_xlim(min(lon)-(max(lon)-min(lon))/10.,max(lon)+(max(lon)-min(lon))/10.)
#ax.set_ylim(min(lat)-(max(lat)-min(lat))/10.,max(lat)+(max(lat)-min(lat))/10.)
ax.set_xlim(min(lon),max(lon))
ax.set_ylim(min(lat),max(lat))
plt.title('Drifter #'+str(ids[k]),fontsize=16)
# fig.autofmt_xdate() #display the time "lean"
ax.xaxis.set_label_coords(0.5, -0.1)#set the position of the xtick labels
ax.yaxis.set_label_coords(-0.08, 0.4)
# the last points, annotate "end"
self_annotate1=ax.annotate("End", xy=(lon[-1], lat[-1]),xycoords='data', xytext=(8, 11),
textcoords='offset points',arrowprops=dict(arrowstyle="->"))
self_annotate2=ax.annotate("Start", xy=(lon[0], lat[0]),xycoords='data', xytext=(8, 11),
textcoords='offset points',arrowprops=dict(arrowstyle="->"))
if time[-1]-time[0]<=2:
if len(time)<5: skip=1
else: skip=int(float(len(time))/5)
for i in range(0,len(time),skip):
self_annotate3=ax.annotate(num2date(time[i]).replace(tzinfo=None).strftime('%d-%b %H:%M'), xy=(lon[i], lat[i]),
xycoords='data', xytext=(8, 11), textcoords='offset points',arrowprops=dict(arrowstyle="->"))
# self_annotate3.draggable()
elif (time[-1]-time[0]>2.0)&(time[-1]-time[0]<20.0):
for i in range(1,len(time)):
if num2date(time[i-1]).day<>num2date(time[i]).day:
self_annotate4=ax.annotate(num2date(time[i]).replace(tzinfo=None).strftime('%d-%b'), xy=(lon[i], lat[i]),
xycoords='data', xytext=(8, 11), textcoords='offset points',arrowprops=dict(arrowstyle="->"))
else: # place approximately 10 labels
for i in range(1,len(time),int(len(time)/10.)):
#if num2date(time[i-1]).day<>num2date(time[i]).day:
self_annotate4=ax.annotate(num2date(time[i]).replace(tzinfo=None).strftime('%d-%b'), xy=(lon[i], lat[i]),
xycoords='data', xytext=(8, 11), textcoords='offset points',arrowprops=dict(arrowstyle="->")) # self_annotate4.draggable() #drag the text if you want
thismanager = plt.get_current_fig_manager()
thismanager.window.SetPosition((1000, 0))
plt.show()
plt.savefig(direcin+'pth_'+str(ids[k])+'_final'+".ps")
plt.savefig(direcin+'pth_'+str(ids[k])+'_final'+".png")
raw_input('press return to close final track window')
plt.close()
# plt.show()
#plot u & v
fig=plt.figure()
ax1 = fig.add_subplot(111)
plt.plot(jdn,forward_u,"r",label='raw eastward')
plt.plot(jdn, forward_v,"b",label='raw northward')
plt.plot(jd2,u2,"m",linewidth=2,label='final eastward')
plt.plot(jd2,v2,"g",linewidth=2,label='final northward')
leg=plt.legend()
# leg.draggable()
locator = mpl.dates.AutoDateLocator()
ax1.xaxis.set_major_locator(locator)
if len(jdn)<100:
monthsFmt = DateFormatter('%b/%d %H:')
else:
monthsFmt = DateFormatter('%b/%d')
ax1.xaxis.set_major_formatter(monthsFmt)
ax1.set_xlabel(str(year))
ax1.set_ylabel('cm/s (where 50 ~ 1 knot)')
fig.autofmt_xdate() #display the time "lean"
plt.title('Drifter '+str(ids[k])+' cleaned',fontsize=16)
plt.savefig(direcin+'uv_'+str(ids[k])+'_final'+'.ps')
plt.savefig(direcin+'uv_'+str(ids[k])+'_final'+'.png')
plt.show()
raw_input('press return to close uv window')
# close()
# write out id,date,lat,lon,yrday0_gmt,temp, and depth_i
depth=[float(i) for i in depth]
for i in range(len(time)):
fido.write(str(ids[k]).rjust(10)+ " " +num2date(time[i]).replace(tzinfo=None).strftime('%d-%b-%Y:%H:%M')+" ")
fido.write(("%10.6f") %(lat[i]))
fido.write(" ")
fido.write(("%10.6f") %(lon[i]))
fido.write(" ")
fido.write(("%10.6f") %(yeardays[i]-1))
fido.write(" ")
fido.write(temp[i]+ " ")
fido.write(("%5.1f") %(depth[i]))
fido.write('\n')
if k<>len(ids)-1:
raw_input("Press Enter to process next drifter")
whetherlog=raw_input('Do you want to keep this log?')
if whetherlog=="Y" or whetherlog=="y" :
fid.write(tempochunk0)
fid.write(tempochunk1)
fid.write(tempochunk2)
fid.write(tempochunk3)
print 'log has been saved.'
fido.close()
fid.close()
|
[
"james.manning@noaa.gov"
] |
james.manning@noaa.gov
|
9c85a3150d50dce18e37c4fd3faae85c74370fc8
|
32b628faa8b8ca8d11d8837cc495c0013f58b71a
|
/scripts/matrix2matrix.py
|
db91bd6468fb706939d9b97cc2c5810de2e084d0
|
[
"BSD-2-Clause"
] |
permissive
|
jaquol/cgat
|
40b81617625ae9f0ba352caf38c2afd6a13c58f6
|
d26fab0dff2192d4accc128d2895e668254d7b65
|
refs/heads/master
| 2021-01-12T22:33:46.186451
| 2016-01-15T16:56:43
| 2016-01-15T16:56:43
| 49,868,597
| 1
| 0
| null | 2016-01-18T10:10:24
| 2016-01-18T10:10:24
| null |
UTF-8
|
Python
| false
| false
| 17,852
|
py
|
'''
matrix2matrix.py - operate on matrices
======================================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
* full: full matrix with row and column headers (unless --no-headers is given.)
* sparse: sparse matrix
* phylip: phylip formatted matrix, but using tabs as separators and long names.
Methods:
sort-rows
sort rows by order in --rows-tsv-file
sort-columns
sort columns by order in --columns-tsv-file
mask-rows
set rows matching ids in --rows-tsv-file to --value
mask-columns
set columns matching ids in --columns-tsv-file to --value
mask-rows-and-columns
set rows and columns matching ids in --columns-tsv-file to --value (and)
Usage
-----
Example::
python matrix2matrix.py --help
Type::
python matrix2matrix.py --help
for command line help.
Command line options
--------------------
'''
import sys
import math
import StringIO
import numpy
import CGAT.Experiment as E
import CGAT.IOTools as IOTools
import CGAT.CorrespondenceAnalysis as CorrespondenceAnalysis
import CGAT.MatlabTools as MatlabTools
import scipy
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
parser = E.OptionParser(
version="%prog version: $Id: matrix2matrix.py 2782 2009-09-10 11:40:29Z andreas $")
parser.add_option("-m", "--method", dest="methods", type="choice", action="append",
choices=("normalize-by-min-diagonal", "normalize-by-column",
"log", "ln", "negzero2value",
"set-diagonal",
"subtract-matrix", "mix-matrix", "normalize-by-matrix",
"normalize-by-column-max", "normalize-by-row-max",
"normalize-by-column-min", "normalize-by-row-min",
"normalize-by-column-median", "normalize-by-row-median",
"normalize-by-column-mean", "normalize-by-row-mean",
"normalize-by-column-total", "normalize-by-row-total",
"correspondence-analysis",
"normalize-by-value",
"add-value",
"sort-rows", "sort-columns",
"transpose",
"upper-bound", "lower-bound",
"subtract-first-col", "multiply-by-value", "divide-by-value",
"mask-rows", "mask-columns", "mask-rows-and-columns",
"symmetrize-mean", "symmetrize-max", "symmetrize-min",
),
help="""method to use [default=%default]""" )
parser.add_option("-s", "--scale", dest="scale", type="float",
help="factor to scale matrix by [default=%default].")
parser.add_option("-f", "--format", dest="format", type="string",
help="output number format [default=%default].")
parser.add_option("--rows-tsv-file", dest="filename_rows", type="string",
help="filename with rows to mask [default=%default].")
parser.add_option("--columns-tsv-file", dest="filename_columns", type="string",
help="filename with columns to mask [default=%default].")
parser.add_option("-p", "--parameters", dest="parameters", type="string",
help="Parameters for various functions.")
parser.add_option("-t", "--header-names", dest="headers", action="store_true",
help="matrix has row/column headers.")
parser.add_option("--no-headers", dest="headers", action="store_false",
help="matrix has no row/column headers.")
parser.add_option("-a", "--value", dest="value", type="float",
help="value to use for various algorithms.")
parser.add_option("-i", "--input-format", dest="input_format", type="choice",
choices=("full", "sparse", "phylip"),
help="""input format for matrix.""" )
parser.add_option("-o", "--output-format", dest="output_format", type="choice",
choices=("full", "sparse", "phylip"),
help="""output format for matrix.""" )
parser.add_option("--missing-value", dest="missing", type="float",
help="value to use for missing values. If not set, missing values will cause the script to fail [default=%default].")
parser.set_defaults(
methods=[],
scale=1.0,
headers=True,
format="%6.4f",
output_format="full",
input_format="full",
value=0.0,
parameters="",
write_separators=True,
filename_rows=None,
filename_columns=None,
missing=None,
)
(options, args) = E.Start(parser)
options.parameters = options.parameters.split(",")
lines = filter(lambda x: x[0] != "#", sys.stdin.readlines())
if len(lines) == 0:
raise IOError("no input")
chunks = filter(lambda x: lines[x][0] == ">", range(len(lines)))
if not chunks:
options.write_separators = False
chunks = [-1]
chunks.append(len(lines))
if options.filename_rows:
row_names, n = IOTools.ReadList(open(options.filename_rows, "r"))
if options.filename_columns:
column_names, n = IOTools.ReadList(open(options.filename_columns, "r"))
for chunk in range(len(chunks) - 1):
try:
raw_matrix, row_headers, col_headers = MatlabTools.readMatrix(StringIO.StringIO("".join(lines[chunks[chunk] + 1:chunks[chunk + 1]])),
format=options.input_format,
headers=options.headers,
missing=options.missing)
except ValueError, msg:
E.warn("matrix could not be read: %s" % msg)
continue
nrows, ncols = raw_matrix.shape
E.debug("read matrix: %i x %i, %i row titles, %i colum titles" %
(nrows, ncols, len(row_headers), len(col_headers)))
parameter = 0
for method in options.methods:
matrix = numpy.reshape(numpy.array(raw_matrix), raw_matrix.shape)
if method in ("normalize-by-matrix", "subtract-matrix", "mix-matrix", "add-matrix"):
other_matrix, other_row_headers, other_col_headers = MatlabTools.ReadMatrix(open(options.parameters[parameter], "r"),
headers=options.headers)
other_nrows, other_ncols = other_matrix.shape
if options.loglevel >= 2:
options.stdlog.write("# read second matrix from %s: %i x %i, %i row titles, %i colum titles.\n" %
(options.parameters[parameter],
other_nrows, other_ncols, len(other_row_headers), len(other_col_headers)))
parameter += 1
elif method == "normalize-by-min-diagonal":
for x in range(nrows):
for y in range(ncols):
m = min(raw_matrix[x, x], raw_matrix[y, y])
if m > 0:
matrix[x, y] = raw_matrix[x, y] / m
elif method == "normalize-by-column":
if nrows != ncols:
raise "only supported for symmeric matrices."
for x in range(nrows):
for y in range(ncols):
if raw_matrix[y, y] > 0:
matrix[x, y] = raw_matrix[x, y] / raw_matrix[y, y]
elif method == "normalize-by-value":
matrix = raw_matrix / float(options.parameters[parameter])
parameter += 1
elif method == "normalize-by-row":
if nrows != ncols:
raise "only supported for symmeric matrices."
for x in range(nrows):
for y in range(ncols):
if raw_matrix[y, y] > 0:
matrix[x, y] = raw_matrix[x, y] / raw_matrix[x, x]
elif method == "subtract-first-col":
for x in range(nrows):
for y in range(ncols):
matrix[x, y] -= raw_matrix[x, 0]
elif method.startswith("normalize-by-column"):
if method.endswith("max"):
f = max
elif method.endswith("min"):
f = min
elif method.endswith("median"):
f = scipy.median
elif method.endswith("mean"):
f = scipy.mean
elif method.endswith("total"):
f = sum
for y in range(ncols):
m = f(matrix[:, y])
if m != 0:
for x in range(nrows):
matrix[x, y] = matrix[x, y] / m
elif method.startswith("normalize-by-row"):
if method.endswith("max"):
f = max
elif method.endswith("min"):
f = min
elif method.endswith("median"):
f = scipy.median
elif method.endswith("mean"):
f = scipy.mean
elif method.endswith("total"):
f = sum
for x in range(nrows):
m = f(matrix[x, :])
if m != 0:
for y in range(ncols):
matrix[x, y] = raw_matrix[x, y] / m
elif method == "negzero2value":
# set zero/negative values to a value
for x in range(nrows):
for y in range(ncols):
if matrix[x, y] <= 0:
matrix[x, y] = options.value
elif method == "minmax":
# set zero/negative values to a value
for x in range(nrows):
for y in range(ncols):
matrix[x, y], matrix[y, x] = \
min(matrix[x, y], matrix[y, x]), \
max(matrix[x, y], matrix[y, x])
elif method == "log":
# apply log to all values.
for x in range(nrows):
for y in range(ncols):
if matrix[x, y] > 0:
matrix[x, y] = math.log10(matrix[x, y])
elif method == "ln":
for x in range(nrows):
for y in range(ncols):
if matrix[x, y] > 0:
matrix[x, y] = math.log(matrix[x, y])
elif method == "transpose":
matrix = numpy.transpose(matrix)
row_headers, col_headers = col_headers, row_headers
nrows, ncols = ncols, nrows
elif method == "mul":
matrix = numpy.dot(matrix, numpy.transpose(matrix))
col_headers = row_headers
elif method == "multiply-by-value":
matrix *= options.value
elif method == "divide-by-value":
matrix /= options.value
elif method == "add-value":
matrix += options.value
elif method == "angle":
# write angles between col vectors
v1 = numpy.sqrt(numpy.sum(numpy.power(matrix, 2), 0))
matrix = numpy.dot(numpy.transpose(matrix), matrix)
row_headers = col_headers
nrows = ncols
for x in range(nrows):
for y in range(ncols):
matrix[x, y] /= v1[x] * v1[y]
elif method == "euclid":
# convert to euclidean distance matrix
matrix = numpy.zeros((ncols, ncols), numpy.float)
for c1 in range(0, ncols - 1):
for c2 in range(c1 + 1, ncols):
for r in range(0, nrows):
d = raw_matrix[r][c1] - raw_matrix[r][c2]
matrix[c1, c2] += (d * d)
matrix[c2, c1] = matrix[c1, c2]
matrix = numpy.sqrt(matrix)
row_headers = col_headers
nrows = ncols
elif method.startswith("symmetrize"):
f = method.split("-")[1]
if f == "max":
f = max
elif f == "min":
f = min
elif f == "mean":
f = lambda x, y: float(x + y) / 2
if nrows != ncols:
raise ValueError(
"symmetrize only available for symmetric matrices")
if row_headers != col_headers:
raise ValueError(
"symmetrize not available for permuted matrices")
for x in range(nrows):
for y in range(ncols):
matrix[x, y] = matrix[y, x] = f(
matrix[x, y], matrix[y, x])
elif method == "sub":
matrix = options.value - matrix
elif method in ("lower-bound", "upper-bound"):
boundary = float(options.parameters[parameter])
new_value = float(options.parameters[parameter + 1])
parameter += 2
if method == "upper-bound":
for x in range(nrows):
for y in range(ncols):
if matrix[x, y] > boundary:
matrix[x, y] = new_value
else:
for x in range(nrows):
for y in range(ncols):
if matrix[x, y] < boundary:
matrix[x, y] = new_value
elif method == "subtract-matrix":
matrix = matrix - other_matrix
elif method == "add-matrix":
matrix = matrix + other_matrix
elif method == "normalize-by-matrix":
# set 0s to 1 in the other matrix
for x in range(nrows):
for y in range(ncols):
if other_matrix[x, y] == 0:
other_matrix[x, y] = 1.0
matrix = matrix / other_matrix
elif method == "mix-matrix":
for x in range(len(other_row_headers) - 1):
for y in range(x + 1, len(other_col_headers)):
matrix[x, y] = other_matrix[x, y]
elif method == "set-diagonal":
value = float(options.parameters[parameter])
for x in range(min(nrows, ncols)):
matrix[x, x] = value
parameter += 1
elif method == "transpose":
matrix = numpy.transpose(raw_matrix)
row_headers, col_headers = col_headers, row_headers
elif method == "correspondence-analysis":
row_indices, col_indices = CorrespondenceAnalysis.GetIndices(
raw_matrix)
map_row_new2old = numpy.argsort(row_indices)
map_col_new2old = numpy.argsort(col_indices)
matrix, row_headers, col_headers = CorrespondenceAnalysis.GetPermutatedMatrix(raw_matrix,
map_row_new2old,
map_col_new2old,
row_headers=row_headers,
col_headers=col_headers)
elif method == "mask-rows":
r = set(row_names)
for x in range(len(row_headers)):
if row_headers[x] in r:
matrix[x, :] = options.value
elif method == "mask-columns":
r = set(column_names)
for x in range(len(col_headers)):
if col_headers[x] in r:
matrix[:, x] = options.value
elif method == "mask-rows-and-columns":
r = set(row_names)
c = set(column_names)
for x in range(len(row_headers)):
for y in range(len(col_headers)):
if row_headers[x] in r and col_headers[y] in c:
matrix[x, y] = options.value
raw_matrix = numpy.reshape(numpy.array(matrix), matrix.shape)
else:
# for simple re-formatting jobs
matrix = raw_matrix
if options.write_separators:
options.stdout.write(lines[chunks[chunk]])
MatlabTools.writeMatrix(sys.stdout, matrix,
value_format=options.format,
format=options.output_format,
row_headers=row_headers,
col_headers=col_headers)
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
[
"andreas.heger@gmail.com"
] |
andreas.heger@gmail.com
|
b62893ee1712e3ddf4365071e6596e2d820ac5dc
|
cf57cd3355471f035ca429302742b4eb4baf1214
|
/Comparações/SHI-TOMASI/SHI-TOMASI_sift.py
|
7a91ba9cb2b4ae56f47b6d8069c64cbee54c797b
|
[] |
no_license
|
RobotColony-UEFS/feature-match
|
c56d78230d86948e5612a9645c71a0647eb94604
|
ac421989aa1ee3893243122a0cf041b30e038a28
|
refs/heads/master
| 2022-11-27T15:31:20.570505
| 2020-08-04T19:24:17
| 2020-08-04T19:24:17
| 285,063,878
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,365
|
py
|
#coding: utf-8
import cv2
import numpy as np
import mysql.connector
import math
mydb = mysql.connector.connect(
host="localhost",
user="descritores",
passwd="12345678",
database="referencias"
)
def desvio (vetResult):
# Desvio padrão populacional
soma = float(sum(vetResult))
media = soma/len(vetResult)
res = 0
for valor in vetResult:
res += ((valor - media)**2)
desvio = (math.sqrt(res/len(vetResult)))
return (media, desvio)
vet_matches = []
vet_corretos = []
img11 = cv2.imread("../../imgReferencia/img00.jpg", 0)
altura = img11.shape[0]
largura = img11.shape[1]
img1 = cv2.resize(img11, (int(largura*0.4), int(altura*0.4)))
corners11 = cv2.goodFeaturesToTrack(img1, 100, 0.01, 10)
corners1 = np.int0(corners11)
kp1 = cv2.KeyPoint_convert(corners1)
sift = cv2.xfeatures2d.SIFT_create()
bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)
kp1, des1 = sift.compute(img1, kp1)
quantidadeImagens = 1
while(quantidadeImagens<=13):
acertos = 0
img22 = cv2.imread("../../imgTeste/img"+str(quantidadeImagens)+".jpg", 0)
altura2 = img22.shape[0]
largura2 = img22.shape[1]
img2 = cv2.resize(img22, (int(largura2*0.4), int(altura2*0.4)))
corners22 = cv2.goodFeaturesToTrack(img2, 100, 0.01, 10)
corners2 = np.int0(corners22)
kp2 = cv2.KeyPoint_convert(corners2)
kp2, des2 = sift.compute(img2, kp2)
mat = bf.match(des1,des2)
mat = sorted(mat, key = lambda x:x.distance)
matches = mat[0:150]
with open("../../imgTeste/img"+str(quantidadeImagens)+".txt",'r') as f:
texto=f.readlines()
posicao_x= np.float_(texto[0:4])
posicao_y = np.float_(texto[4:8])
min_x = float(min(posicao_x))
max_x = float(max(posicao_x))
min_y = float(min(posicao_y))
max_y = float(max(posicao_y))
if len(matches)>10:
src_pts = np.float32([ kp1[m.queryIdx].pt for m in matches ]).reshape(-1,1,2)
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in matches ]).reshape(-1,1,2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
h,w = img1.shape
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv2.perspectiveTransform(pts,M)
img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA)
for pos in dst_pts:
if((pos[0][0]>(min_x) and pos[0][0]<(max_x)) and (pos[0][1]>(min_y) and pos[0][1]<(max_y))):
acertos+=1
img3 = cv2.drawMatches(img1,kp1,img2,kp2,matches[:],None,flags=2)
cv2.imwrite("../resultados/shiTomasi-sift/img"+str(quantidadeImagens)+".jpg", img3)
vet_matches.append(len(matches))
vet_corretos.append(acertos)
mycursor = mydb.cursor()
sql = "INSERT INTO shiTomasi_sift(Nome, Matches, Correto, ImgReferente) VALUES (%s, %s, %s, %s)"
valor = ("ShiTomasi-Sift"+str(quantidadeImagens), len(matches), acertos, "img"+str(quantidadeImagens)+".jpg")
mycursor.execute(sql, valor)
mydb.commit()
print(len(matches), acertos)
quantidadeImagens+=1
media_matches, desvio_matches = desvio(vet_matches)
media_corretos, desvio_corretos = desvio(vet_corretos)
porcentagem = (media_corretos/media_matches)*100
sql2 = "INSERT INTO medias_desvios(Nome, MediaMatches, DesvioMatches, MediaCorretos, DesvioCorretos, Porcentagem) VALUES (%s, %s, %s, %s, %s, %s)"
valor2 = ("shiTomasi_sift", media_matches, desvio_matches, media_corretos, desvio_corretos, porcentagem)
mycursor.execute(sql2, valor2)
mydb.commit()
|
[
"samuelreboucas07@hotmail.com"
] |
samuelreboucas07@hotmail.com
|
c7323c619a500829099442ad1c4249689bb1dc1e
|
ca292e954d548c62f0c4604dc46cb9faac914a2f
|
/max_of_two.py
|
b87410a3cb5d90d5d170799f4f46230a547ce7f4
|
[] |
no_license
|
rawgni/empireofcode
|
e73ee032266e85bb062ad03e34f8c37bf69072c3
|
8f00029ddc38759c77ac6308fe65ae07c44960fc
|
refs/heads/master
| 2021-06-08T22:50:44.828587
| 2016-11-29T13:59:00
| 2016-11-29T13:59:00
| 71,658,210
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 38
|
py
|
def my_max(a, b):
return max(a,b)
|
[
"ingwar.wirjawan@gmail.com"
] |
ingwar.wirjawan@gmail.com
|
b17874da1534c1635ec7a910c3ce1d32eda7ff50
|
5252efd0922ea5be93dfc63db6de282184505346
|
/ds/tests/test_linked_list.py
|
72dc77493dcc768bc3624b55b39b9e4ab6554da2
|
[] |
no_license
|
faddy/ds-with-python
|
157b35a5f22107f6dfba7604ed3ca87d33df6c5e
|
6fba0eeb4552fa03fcbfb2f84ce747a2dc2c3e79
|
refs/heads/master
| 2016-09-11T05:02:18.879067
| 2013-08-18T21:47:46
| 2013-08-18T21:47:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,453
|
py
|
import unittest
from data_structures.linked_lists import Node
from data_structures.linked_lists import UnorderedList
class TestNode(unittest.TestCase):
def setUp(self):
self.input_list = [0, -4, 10, None, 'one']
def test_object_creation(self):
node_list = [Node(item)for item in self.input_list]
for node in node_list:
self.assertIsNotNone(node)
def test_data_and_next(self):
node_list = [Node(item)for item in self.input_list]
for node, inp in zip(node_list, self.input_list):
self.assertEqual(node.get_data(), inp)
self.assertIsNone(node.get_next())
def test_setting_next(self):
node = Node([])
node.set_next(Node(3))
self.assertEqual(node.get_next().get_data(), 3)
class TestLinkedListCreation(unittest.TestCase):
def test_list_creation(self):
llist = UnorderedList()
self.assertIsNotNone(llist)
class TestLinkedList(unittest.TestCase):
def setUp(self):
self.llist = UnorderedList()
self.input_list = [0, -4, 10, None, 'one']
def populate_list(self):
for item in self.input_list:
self.llist.append(item)
def node_spitter(self):
node = self.llist.head
while node:
yield node
node = node.get_next()
def test_list_head(self):
self.assertIsNone(self.llist.head)
self.assertTrue(self.llist.is_empty())
def test_add_and_length(self):
self.populate_list()
self.assertEqual(self.llist.length(), len(self.input_list))
for i, j in zip(self.node_spitter(), self.input_list):
self.assertEqual(i.get_data(), j)
def test_index(self):
self.populate_list()
self.assertEqual(self.llist.index(0), 0)
self.assertEqual(self.llist.index(-4), 1)
self.assertEqual(self.llist.index(None), 3)
self.assertEqual(self.llist.index('one'), 4)
def test_return_last_node(self):
self.assertIsNone(self.llist._return_last_node())
self.populate_list()
self.assertEqual(self.llist._return_last_node().get_data(), self.input_list[-1])
def test_insert(self):
self.populate_list()
self.assertRaises(ValueError, self.llist.insert, -1, 5)
self.assertRaises(ValueError, self.llist.insert, len(self.input_list)+2, 5)
self.llist.insert(0, 'zeroth')
result = [n.get_data() for n in self.llist.get_node_generator()]
self.assertEqual(self.llist.head.get_data(), 'zeroth')
self.assertEqual(result, ['zeroth', 0, -4, 10, None, 'one'])
self.llist.insert(1, 'first')
result = [n.get_data() for n in self.llist.get_node_generator()]
self.assertEqual(result, ['zeroth', 'first', 0, -4, 10, None, 'one'])
self.llist.insert(6, 'sixth')
result = [n.get_data() for n in self.llist.get_node_generator()]
self.assertEqual(result, ['zeroth', 'first', 0, -4, 10, None, 'sixth', 'one'])
self.llist.insert(8, 'last')
result = [n.get_data() for n in self.llist.get_node_generator()]
self.assertEqual(result, ['zeroth', 'first', 0, -4, 10, None, 'sixth', 'one', 'last'])
def test_pop(self):
self.assertRaises(Exception, self.llist.pop)
self.populate_list()
result = []
while not self.llist.is_empty():
item = self.llist.pop()
result.append(item)
self.assertEqual(result, list(reversed(self.input_list)))
def test_search(self):
self.populate_list()
self.assertTrue(self.llist.search(10))
self.assertTrue(self.llist.search(None))
self.assertFalse(self.llist.search(123))
def test_remove(self):
self.populate_list()
self.llist.remove(10)
result = [n.get_data() for n in self.llist.get_node_generator()]
self.input_list.remove(10)
self.assertEqual(result, self.input_list)
self.llist.remove(None)
result = [n.get_data() for n in self.llist.get_node_generator()]
self.input_list.remove(None)
self.assertEqual(result, self.input_list)
self.llist.remove(0)
result = [x.get_data() for x in self.llist.get_node_generator()]
self.input_list.remove(0)
self.assertEqual(result, self.input_list)
if __name__ == '__main__':
unittest.main()
|
[
"fahadghanidgp@gmail.com"
] |
fahadghanidgp@gmail.com
|
d716a64d25d8ed53904876bd54c1a98a7b88deb5
|
9dab41a71bf19a9ad17ee3e9f77c0f58aebd1d6d
|
/python/uline/uline/uline/handlers/app/distributor/balance/distributorBalanceList.py
|
4116d637e99da40fb08daa5c8fdc82a1bdbb023b
|
[] |
no_license
|
apollowesley/Demo
|
f0ef8ec6c4ceb0aec76771da8dd9a62fb579eac8
|
471c4af95d3a7222d6933afc571a8e52e8fe4aee
|
refs/heads/master
| 2021-02-15T04:01:51.590697
| 2018-01-29T01:44:29
| 2018-01-29T01:44:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,367
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import tornado.web
import tornado.gen
from uline.handlers.baseHandlers import DtAdminHandler
from .form import DistributorBalanceList
from uline.public.constants import TO_PAY, PAY_CHANNEL
from datetime import timedelta, datetime
from uline.public.permit import check_permission
class DistributorBalanceListHandler(DtAdminHandler):
@tornado.web.authenticated
@check_permission
def prepare(self):
form = DistributorBalanceList(self)
if not form.validate():
self.redirect('/dist/balance/dt/list')
return
self.dt_daily_balance_no = form.ddb_no.data
@tornado.web.asynchronous
@tornado.gen.coroutine
def get(self):
data = self.do_execute()
self.render('distributor/balance/distributorBalanceList.html', data=data)
def do_execute(self):
dt_id = self.current_user
query = """select
to_char(ddbi.pay_start_time, 'YYYY-MM-DD HH24:MI:SS'),
to_char(ddbi.need_pay_time,'YYYY-MM-DD'),
ddbi.rcvAcctName,
ddbi.channel,
ddbi.rcvacctno,
ddbi.rcvBankName,
ddbi.tranAmt,
ddbi.pay_status,
ddbi.failure_details
from dt_daily_balance_info as ddbi
inner join dt_balance db on db.dt_id = ddbi.dt_id
where ddbi.dt_id=%(dt_id)s
and ddbi.dt_daily_balance_no=%(dt_daily_balance_no)s;"""
ret = self.db.selectSQL(query, {'dt_daily_balance_no': self.dt_daily_balance_no, 'dt_id': dt_id})
fields = ['create_at', 'need_pay_time', 'rcvAcctName', 'channel', 'balance_account', 'rcvBankName',
'tranAmt', 'pay_status', 'failure_details']
dt_info = dict(zip(fields, ret))
dt_info['tranAmt'], dt_info['pay_status'], dt_info['channel'] = dt_info['tranAmt'] / 100, \
TO_PAY[str(dt_info['pay_status'])], \
PAY_CHANNEL[str(dt_info['channel'])],
dt_info['need_pay_time'] = datetime.strptime(dt_info['need_pay_time'], '%Y-%m-%d') - timedelta(days=1)
dt_info['need_pay_time'] = datetime.strftime(dt_info['need_pay_time'], '%Y-%m-%d')
# todo 缺少划付状态详情数据表
return dt_info
|
[
"36821277@qq.com"
] |
36821277@qq.com
|
6f4fda92cc404753602829b7b45f67c7d15c83ed
|
d3cc5966bccf06dd733bc6b51a0c4d9d8fc1baec
|
/ingest/ingest-equipment.py
|
0c39157df34390570655eea8020c053643c2272b
|
[] |
no_license
|
zhongh/Additive-Manufacturing-Processing-Ontology
|
0229464dfe9e474b1e2b50abefe48a6436cb4569
|
a729aef5db049de13718c9b4de56ad93f7ec7985
|
refs/heads/master
| 2020-03-23T15:18:22.076341
| 2016-11-30T21:11:26
| 2016-11-30T21:11:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,639
|
py
|
__author__ = 'congrui_li'
from SPARQLWrapper import SPARQLWrapper, JSON
from rdflib import Namespace, RDF
import json
import requests
import multiprocessing
from itertools import chain
import functools
import argparse
from Maybe import *
import collections
def load_file(filepath):
with open(filepath) as _file:
return _file.read().replace('\n', " ")
AMPO = Namespace("https://tw.rpi.edu/web/project/ampo#")
SIO = Namespace("http://semanticscience.org/ontology/sio.owl#")
FOAF = Namespace("http://xmlns.com/foaf/0.1/")
PROV = Namespace("http://www.w3.org/ns/prov#")
QUDT = Namespace("http://data.qudt.org/qudt/owl/1.0.0/qudt.owl#")
VITRO = Namespace("http://vitro.mannlib.cornell.edu/ns/vitro/0.7#")
BIBO = Namespace("http://purl.org/ontology/bibo/")
VCARD = Namespace("http://www.w3.org/2006/vcard/ns#")
VIVO = Namespace('http://vivoweb.org/ontology/core#')
get_equipment_query = load_file("queries/listEquip.rq")
describe_equipment_query = load_file("queries/describeEquip.rq")
# standard filters
non_empty_str = lambda s: True if s else False
has_label = lambda o: True if o.label() else False
def get_metadata(id):
return {"index": {"_index": "ampo", "_type": "equipment", "_id": id}}
def select(endpoint, query):
sparql = SPARQLWrapper(endpoint)
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
return results["results"]["bindings"]
def describe(endpoint, query):
sparql = SPARQLWrapper(endpoint)
sparql.setQuery(query)
try:
return sparql.query().convert()
except RuntimeWarning:
pass
def has_type(resource, type):
for rtype in resource.objects(RDF.type):
if str(rtype.identifier) == str(type):
return True
return False
def get_equipment(endpoint):
r = select(endpoint, get_equipment_query)
return [rs["equipment"]["value"] for rs in r]
def describe_equipment(endpoint, equipment):
q = describe_equipment_query.replace("?equipment", "<" + equipment + ">")
return describe(endpoint, q)
def get_most_specific_type(equipment):
return Maybe.of(equipment).stream() \
.flatmap(lambda p: p.objects(VITRO.mostSpecificType)) \
.map(lambda t: t.label()) \
.filter(non_empty_str) \
.one().value
def get_processes(equipment):
return Maybe.of(equipment).stream() \
.flatmap(lambda p: p.objects(AMPO.isParticipantIn)) \
.filter(has_label) \
.map(lambda r: {"uri": str(r.identifier), "name": str(r.label())}).list()
def get_larger_equip(equipment):
return Maybe.of(equipment).stream() \
.flatmap(lambda p: p.objects(AMPO.isPartOf)) \
.filter(has_label) \
.map(lambda r: {"uri": str(r.identifier), "name": str(r.label())}).list()
def get_smaller_equip(equipment):
return Maybe.of(equipment).stream() \
.flatmap(lambda p: p.objects(AMPO.hasPart)) \
.filter(has_label) \
.map(lambda r: {"uri": str(r.identifier), "name": str(r.label())}).list()
def get_inputs(equipment):
return Maybe.of(equipment).stream() \
.flatmap(lambda p: p.objects(AMPO.hasInput)) \
.filter(has_label) \
.map(lambda r: {"uri": str(r.identifier), "name": str(r.label())}).list()
def get_attrs(equipment):
return Maybe.of(equipment).stream() \
.flatmap(lambda p: p.objects(AMPO.hasAttribute)) \
.filter(has_label) \
.map(lambda r: {"uri": str(r.identifier), "name": str(r.label())}).list()
def create_equipment_doc(equipment, endpoint):
graph = describe_equipment(endpoint=endpoint, equipment=equipment)
equ = graph.resource(equipment)
try:
name = equ.label()
except AttributeError:
print("missing name:", equipment)
return {}
doc = {"uri": equipment, "name": name}
most_specific_type = get_most_specific_type(equ)
if most_specific_type:
doc.update({"mostSpecificType": most_specific_type})
processes = get_processes(equ)
if processes:
new_processes = sorted(processes, key=lambda k: k['name'])
doc.update({"process": new_processes[0]})
larger_equip = get_larger_equip(equ)
if larger_equip:
doc.update({"largerEquip": larger_equip})
smaller_equip = get_smaller_equip(equ)
if smaller_equip:
doc.update({"smallerEquip": smaller_equip})
inputs = get_inputs(equ)
if inputs:
doc.update({"input": inputs})
attrs = get_attrs(equ)
if attrs:
doc.update({"attr": attrs})
return doc
def process_equipment(equipment, endpoint):
equ = create_equipment_doc(equipment=equipment, endpoint=endpoint)
es_id = equ["uri"]
return [json.dumps(get_metadata(es_id)), json.dumps(equ)]
def publish(bulk, endpoint, rebuild, mapping):
# if configured to rebuild_index
# Delete and then re-create to publication index (via PUT request)
index_url = endpoint + "/ampo"
if rebuild:
requests.delete(index_url)
r = requests.put(index_url)
if r.status_code != requests.codes.ok:
print(r.url, r.status_code)
r.raise_for_status()
# push current publication document mapping
mapping_url = endpoint + "/ampo/equipment/_mapping"
with open(mapping) as mapping_file:
r = requests.put(mapping_url, data=mapping_file)
if r.status_code != requests.codes.ok:
# new mapping may be incompatible with previous
# delete current mapping and re-push
requests.delete(mapping_url)
r = requests.put(mapping_url, data=mapping_file)
if r.status_code != requests.codes.ok:
print(r.url, r.status_code)
r.raise_for_status()
# bulk import new publication documents
bulk_import_url = endpoint + "/_bulk"
r = requests.post(bulk_import_url, data=bulk)
if r.status_code != requests.codes.ok:
print(r.url, r.status_code)
r.raise_for_status()
def generate(threads, sparql):
pool = multiprocessing.Pool(threads)
params = [(equipment, sparql) for equipment in get_equipment(endpoint=sparql)]
return list(chain.from_iterable(pool.starmap(process_equipment, params)))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--threads', default=8, help='number of threads to use (default = 8)')
parser.add_argument('--es', default="http://localhost:9200", help="elasticsearch service URL")
parser.add_argument('--publish', default=False, action="store_true", help="publish to elasticsearch?")
parser.add_argument('--rebuild', default=False, action="store_true", help="rebuild elasticsearch index?")
parser.add_argument('--mapping', default="mappings/equipment.json", help="publication elasticsearch mapping document")
parser.add_argument('--sparql', default='https://dofamp.tw.rpi.edu/fuseki/ampo/query', help='sparql endpoint')
parser.add_argument('out', metavar='OUT', help='elasticsearch bulk ingest file')
args = parser.parse_args()
# generate bulk import document for publications
records = generate(threads=int(args.threads), sparql=args.sparql)
# save generated bulk import file so it can be backed up or reviewed if there are publish errors
with open(args.out, "w") as bulk_file:
bulk_file.write('\n'.join(records)+'\n')
# publish the results to elasticsearch if "--publish" was specified on the command line
if args.publish:
bulk_str = '\n'.join(records)+'\n'
publish(bulk=bulk_str, endpoint=args.es, rebuild=args.rebuild, mapping=args.mapping)
|
[
"lic10@rpi.edu"
] |
lic10@rpi.edu
|
48056c7a32622758cfb3818b9273a4f1de5b1921
|
0475b7d5791114c913e0ccc432ea7893fcd5182d
|
/webServer/webServer.py
|
f7b6750739694524227ed9c31fa897f53dcbaa67
|
[] |
no_license
|
jonzhaocn/python_projects
|
5c7717f5e81248e99f3252bba94c81d0cf3b6d5f
|
22d171aebd46590661c0ea3fc20a3fa5bef8bafd
|
refs/heads/master
| 2021-09-13T00:25:45.990594
| 2018-04-23T07:20:06
| 2018-04-23T07:20:06
| 87,550,515
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,828
|
py
|
import socket
import os
import logging
import subprocess
# 自己写的一个简单的we服务器代码,实现了get、post
class WebServer(object):
def __init__(self):
self.HOST = ''
self.PORT = 80
self.root_dir = 'd:/root_dir' # 文件的根目录
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # 设置协议、套接字
self.server.bind((self.HOST, self.PORT)) # 绑定端口
self.server.listen()
self.allowed_readable_text_file_types = ['.html', '.htm', '.txt', '.js', '.css'] # 设置允许访问的文件类型
self.allowed_readable_img_file_types = ['.jpg', '.gif', '.png', '.jpeg']
self.allowed_readable_file = self.allowed_readable_text_file_types + self.allowed_readable_img_file_types
self.allow_show_dir = True # 如果文件夹下没有index.html文件是否允许显示文件夹的结构
self.HTTP_version = 'HTTP/1.x'
logging.basicConfig(level=logging.DEBUG, # 设置log日志
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename='mylog.log',
filemode='w')
server_Logging = logging.StreamHandler()
server_Logging.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s: %(message)s')
server_Logging.setFormatter(formatter)
logging.getLogger('').addHandler(server_Logging)
def serve_forever(self): # 开启服务的主函数
while True:
client, address = self.server.accept() # 接受请求
request = client.recv(1024).decode()
if request is None or len(request) == 0:
continue
else:
request_list = request.split(' ')
method = request_list[0] # 请求是get还是post
file_src = request_list[1] # 请求的文件路径
content = None
path = self.root_dir + file_src
logging.info(str(address) + ':' + method + ' ' + file_src)
if method == 'GET':
if os.path.exists(path): # 该路径存在
if os.path.isdir(path): # 该路径是一个文件夹
if self.allow_show_dir: # 如果允许显示文件夹中的内容
content = self.read_index_file(path)
if content is None:
content = self.display_dir_structure(path)
else:
content = self.get_head(200, '.html') + content
else: # 如果不允许显示文件夹中的内容
content = self.read_index_file(path) # 查找该文件夹中是否存在index.html
if content is None:
content = self.get_head(403, '.html') + self.create_info_html("Forbidden")
else:
content = self.get_head(200, '.html') + content
elif os.path.isfile(path): # 该路径是一个文件
file_type = self.get_filnameExt(path)
if file_type in self.allowed_readable_file: # 如果该文件内容允许读取
content = self.get_head(200, '.html') + self.read_file(path)
else:
content = self.get_head(403, '.html') + self.create_info_html("Forbidden")
else: # 如果该路径不存在
content = self.get_head(404, '.html')+self.create_info_html("file not found")
client.sendall(content)
client.close()
if method == 'POST': # Post请求
# new_process = subprocess.Popen('')
content = None
if os.path.exists(path): # 处理表单的文件存在
form = request.split('\r\n')[-1] # 表单的内容在request的最后一行
form_list = form.split('&') # 如果表单中有多个内容,内容已&分隔
submit_args = ''
for item in form_list:
submit_args = submit_args + item + ";" # 提取表单中的内容
# python post.py firstname=1;lastname=2
args = ['python', path, submit_args]
try:
result = subprocess.check_output(args, shell=True) # 运行请求
except subprocess.CalledProcessError as e:
result = self.create_info_html('error')
content = self.get_head(200, '.html') + result
else: # 处理表单的文件不存在
content = self.get_head(404, '.html') + self.create_info_html('file not found')
client.sendall(content)
client.close()
def display_dir_structure(self, path): # 用于展示指定路径下的目录结构
dir_structure = '''
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=mbcs">
<title>Directory listing for {path}</title>
</head>
<body>
<h1>Directory listing for {path}</h1>
<hr>
<ul>
'''
for file in os.listdir(path):
dir_structure += '<li><a href=\"'+file+'\">'+file+'</a></li>'
dir_structure += '''
</ul>
<hr>
</body>
</html>'''
index = len(self.root_dir)
path = path[index:]
dir_structure = dir_structure.format(path=path).encode()
dir_structure = self.get_head(200, '.html')+dir_structure
return dir_structure
def get_head(self, status_code, file_type): # 返回头信息
status_code_dict = {
100: 'Continue', 101: 'Switching Protocols', 102: 'Processing', 200: 'OK',
400: 'Bad Request', 401: 'Unauthorized', 402: 'Payment Required', 403: 'Forbidden',
404: 'Not Found'
}
content = self.HTTP_version + ' ' + str(status_code) + ' ' + status_code_dict[status_code]+'\n'
if file_type in self.allowed_readable_text_file_types:
content += 'Content-Type: text/'+file_type.split('.')[-1]+'\n'+'\n'
elif file_type in self.allowed_readable_img_file_types:
content += 'Content-Type: image/'+file_type.split('.')[-1]+'\n'+'\n'
return content.encode()
def read_file(self, path): # 读取指定文件并返回
file = open(path, 'rb')
content = file.read()
file.close()
return content
def read_index_file(self, path): # 查找制定目录下的index文件,并返回其内容
for file in os.listdir(path):
list = file.split('.')
if len(list) == 2 and list[0].upper() == 'INDEX' and list[1] == 'html':
return self.read_file(path+'/'+file)
return None
def create_info_html(self, info): # 生成指定内容的网页
content = '''
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=mbcs">
<title>{info}</title>
</head>
<body>
<h1>{info}</h1>
</body>
</html>
'''.format(info=info).encode()
return content
def get_filnameExt(self, filename): # 获取文件的扩展名
import os
(filepath, tempfilename) = os.path.split(filename);
(shotname, extension) = os.path.splitext(tempfilename);
return extension
if __name__ == '__main__':
server = WebServer()
server.serve_forever()
|
[
"1044264932@qq.com"
] |
1044264932@qq.com
|
690ebfa7539e477c986cc44439b64ed513a1e44b
|
6356b828b1209c409be87783107ad2e96f7fc0e4
|
/data/main.py
|
aff69002d470a47b481ce58543aede4cf670b59e
|
[
"MIT"
] |
permissive
|
vkazei/deeplogs
|
01e4d1eedbb220b921a2ccd7a2b015b684006086
|
4f6f853ce608a59e9d4b1a3160eb6b0035f333c0
|
refs/heads/master
| 2021-07-09T20:01:48.121545
| 2020-09-15T16:14:15
| 2020-09-15T16:14:15
| 194,487,355
| 33
| 18
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 37,572
|
py
|
#%% [markdown]
# # Smart velocity analysis : mapping raw data to velocity logs
#%%
#(c) Vladimir Kazei, Oleg Ovcharenko; KAUST 2020
# cell with imports
import importlib
import multiprocessing
import os
import sys
import time
import pickle
import threading
import random
# learning
import keras
# madagascar API
import m8r as sf
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import seaborn
import tensorflow as tf
# images
from IPython import get_ipython
from keras import backend as K
from keras.utils import multi_gpu_model
from keras.callbacks import (EarlyStopping, ModelCheckpoint, ReduceLROnPlateau,
TensorBoard)
from keras.layers import (AveragePooling2D, BatchNormalization, Conv2D, Dense, Lambda,
Dropout, Flatten, MaxPool2D, Reshape, GaussianNoise, GaussianDropout)
from keras.models import load_model
from numpy.random import randint, seed
from scipy import ndimage
from skimage.transform import resize
from skimage.util import view_as_windows
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
#import styler
from myutils import (cd, cmd, const,
elastic_transform, plt_nb_T, toc, aug_flip, upsample,
merge_dict, np_to_rsf, rsf_to_np, nrms,
tf_random_flip_channels)
from myutils import const as c
from generate_data import (generate_model, show_model_generation,
alpha_deform, sigma_deform,
generate_all_data, generate_rsf_data)
seed()
# set up matplotlib
matplotlib.rc('image', cmap='RdBu_r')
seaborn.set_context('paper', font_scale=5)
CUDA_VISIBLE_DEVICES = "0"
os.environ["CUDA_VISIBLE_DEVICES"]=CUDA_VISIBLE_DEVICES
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
# Madagascar binaries will be stored in DATAPATH (RAM on Linux recommended)
cmd("mkdir /dev/shm/RSFTMP")
cmd("chmod 777 /dev/shm/RSFTMP")
os.environ["DATAPATH"]="/dev/shm/RSFTMP/"
# execution flags
generate_rsf_data_flag = True
retrain_flag = False #(sys.argv[1] == "--retrain")
print(f"retrain_flag = {retrain_flag}")
print(type(retrain_flag))
tic_total = time.time()
#%% [markdown]
# ## Introduction
#
# ### Why?
# FWI provides high resolution models, yet it is very computationally expensive and it can fail with the lack of low frequencies.
#
# Velocity analysis is on the other hand very cheap computationally, but limited by the assumptions on the background medium.
#
# ### Goal
# Combine advantages of both methods with deep learning
#
# ### Solution
# We will train a deep convolutional neural network to perform velocity analysis in inhomogeneous media
##%% [markdown]
# We estimate velocity $v(x_{CMP}, z)$ from presure field
# $p_{obs}(x_{CMP}-\varepsilon:x_{CMP}+\varepsilon, 0:h_{max}, f)$, where
# $x_{CMP}$ is the central midpoint,
# $p_{obs}$ is the observed pressure.
#
# $\varepsilon = 0$ in this first part of the application => single CMP as input
##%% [markdown]
# ## Method
#
# 0) generate a model set
# 1) generate seismic data set
# 2) build neural network
# 3) train neural network
# 4) test it on a model that it has not seen
#%% [markdown]
# ## Model generation
#
# we utilize common deep learning image augmentation technique -- elastic transform
#%%
show_model_generation()
#%% [markdown]
# ## Gaussian fields to generate a coordinate shift for laterally smooth models
#
#
#
# ### Large correlation radius in horizontal direction -- to keep it almost horizontally layered
#
# ### Small correlation radius in vertical direction -- to make it represent different layering scenarios
#
# ### Same parameters but different fields for horizontal and vertical components
#
# ### Large vertical shifts and small horizontal -- to keep it laterally slowly varying
#%% [markdown]
# ## Modeling data with constant offset on GPU with Madagascar
#%%
# Setting up parameters
_vel = generate_model()
N = np.shape(_vel)
dt = c.dt
dx = c.dx
T_max = c.T_max
nt = c.nt
print(f"number of time steps = {nt}")
# check stability
print(f"you chose dt = {dt}, dt < {dx/np.max(_vel):.4f} should be chosen for stability \n")
# force stability
assert dt < dx/np.max(_vel)
# ricker wavelet is roughly bounded by 3f_dominant
# therefore the sampling rate principally acceptable sampling rate would be
central_freq = c.central_freq
print(f"dt from Nyquist criterion is {1/(2*3*central_freq)}")
print(f"dt chosen for CNN is {c.jdt*dt}, which is {(1/(3*central_freq))/(c.jdt*dt)} samples per cycle")
#%% [markdown]
# ## Read data into numpy and check that the number of logs is the same as number of shots
#%%
nCMP=21
def read_rsf_XT(shots_rsf='shots_cmp_full.rsf', logs_rsf='logs_full.rsf', j_log_z=c.jlogz):
X = rsf_to_np(shots_rsf)
# single model exception
if X.ndim == 3:
X = np.expand_dims(X, axis=0)
X_f = np.flip(X, axis=2)
X = np.maximum(np.abs(X), np.abs(X_f)) * np.sign(X+X_f)
X = X[:,:,:(np.shape(X)[2] + 1) // 2,:]
T = rsf_to_np(logs_rsf)
# single model exception
if T.ndim == 2:
T = np.expand_dims(T, axis=0)
# decimate logs in vertical direction --2 times by default
T = resize(T, (*T.shape[0:2], np.shape(T)[2] // j_log_z))
T_size = np.shape(T)
print(T_size)
# ensure that the number of logs is equal to the number of CMPs
assert (X.shape[0:2] == T.shape[0:2])
return X, T
#%%
while not os.path.exists('new_data_ready'):
time.sleep(1)
print("waiting for new data, run python generate_data.py if you didn't", end="\r")
cmd("rm new_data_ready")
#%%
X, T = read_rsf_XT(shots_rsf='shots_cmp_full.rsf', logs_rsf='logs_full.rsf')
T_multi = view_as_windows(T, (1, nCMP, T.shape[2])).squeeze().reshape((-1, nCMP, T.shape[2]))[:,nCMP//2,:].squeeze()
# create scaler for the outputs
T_scaler = StandardScaler().fit(T_multi)
scale = np.copy(T_scaler.scale_)
mean = np.copy(T_scaler.mean_)
np.save("scale", scale)
np.save("mean", mean)
#%%
T_scaler.scale_[:] = 1
T_scaler.mean_[:] = 0
# X has the format (model, CMP, offset, time)
plt_nb_T(X[1,:10, -1,:200], title="Common offset (600 m) gather", dx=c.dx*c.jgx*2, dz=1e3*dt*c.jdt,
origin_in_middle=True, ylabel="Time(s)", fname="../latex/Fig/X_short_offset", vmin=-1e-4, vmax=1e-4)
plt_nb_T(T[1,:10,:100], title="Model", dx=c.dx*c.jgx*2, dz=c.dx*c.jlogz,
origin_in_middle=True, ylabel="Time(s)", fname="../latex/Fig/X_short_offset")
#X=X[:,:-3,:,:]
#%%
def prepare_XT(X,T, T_scaler=T_scaler, gen_plots=False):
nCMP = 21
X_multi = view_as_windows(X, (1,nCMP,X.shape[2],X.shape[3])).squeeze().reshape((-1, nCMP, X.shape[2], X.shape[3]))
X_multi = np.swapaxes(X_multi,1,3)
X_multi = np.swapaxes(X_multi,1,2)
T_multi = view_as_windows(T, (1, nCMP, T.shape[2])).squeeze().reshape((-1, nCMP, T.shape[2]))[:,nCMP//2,:].squeeze()
X_scaled_multi = X_multi
T_scaled_multi = T_scaler.transform(T_multi)
# extract central CMPs for singleCMP network
X_scaled = X_scaled_multi[:,:,:,nCMP//2:nCMP//2+1]
T_scaled = T_scaled_multi
#%%
if gen_plots:
plt_nb_T(T_multi, dx=c.jgx*c.dx, dz=c.jlogz*c.dx, fname="../latex/Fig/T_multi")
plt_nb_T(1e3*T_scaled, dx=c.jgx*dx, dz=c.jlogz*c.dx, fname="../latex/Fig/T_scaled")
#%%
# show single training sample
sample_reveal = nCMP
plt_nb_T(1e3*np.concatenate((np.squeeze(X_scaled_multi[sample_reveal,:,:,-1]), np.flipud(np.squeeze(X_scaled_multi[sample_reveal,:,:,0]))), axis=0),
title="CMP first | CMP last", dx=200, dz=1e3*dt*c.jdt,
origin_in_middle=True, ylabel="Time(s)", fname="../latex/Fig/X_scaled", cbar_label = "")
print(np.shape(1e3*T_scaled[sample_reveal-(nCMP+1)//2:sample_reveal+(nCMP-1)//2:nCMP]))
plt_nb_T(1e3*T_multi[sample_reveal-(nCMP-1)//2:sample_reveal+(nCMP-1)//2,:],
dx=100, dz=c.dx*c.jlogz,
title="scaled velocity logs")
return X_scaled_multi, T_scaled_multi
X_scaled_multi, T_scaled_multi = prepare_XT(X,T, T_scaler=T_scaler, gen_plots=False)
#%% plot single input into the network
plt_nb_T(1e3*np.reshape(X[0, :21, :, :], (21*X.shape[2], X.shape[3])), vmin=-0.1, vmax=0.1, figsize=(48,12), no_labels=True, cbar=True, fname="../latex/Fig/input_multi")
#%% [markdown]
# # CNN construction single CMP -> log under the CMP
# 1D total variation for the output
def tv_loss(y_true, y_pred):
#b, h, w, c = img.shape.as_list()
a = K.abs(y_pred[:, :-1] - y_pred[:, 1:])
tv = 0.0 * K.mean(a, axis=-1)
total = tv + K.mean(K.square(y_pred - y_true), axis=-1)
return total
def random_channel_flip(x):
print(x.shape)
return K.in_train_phase(tf_random_flip_channels(x), x)
def R2(y_true, y_pred):
SS_res = K.sum(K.square(y_true-y_pred))
SS_tot = K.sum(K.square(y_true - K.mean(y_true)))
return (1 - SS_res/(SS_tot + K.epsilon()))
def create_model(inp_shape, out_shape, jlogz=c.jlogz):
model = keras.models.Sequential()
activation = 'elu'
padding = 'same'
kernel_size = (3, 11)
model.add(Lambda(random_channel_flip, input_shape=inp_shape, output_shape=inp_shape))
model.add(BatchNormalization())
model.add(GaussianNoise(0.1))
model.add(Conv2D(filters=32, kernel_size=kernel_size, activation=activation, padding=padding, input_shape=inp_shape))
model.add(BatchNormalization())
model.add(GaussianNoise(0.1))
model.add(Conv2D(filters=32, kernel_size=kernel_size, strides=(2,2), activation=activation, padding=padding))
model.add(BatchNormalization())
model.add(GaussianNoise(0.1))
model.add(Conv2D(filters=64, kernel_size=kernel_size, activation=activation, padding=padding))
model.add(BatchNormalization())
model.add(GaussianNoise(0.1))
model.add(Conv2D(filters=64, kernel_size=kernel_size, strides=(2,2), activation=activation, padding=padding))
model.add(BatchNormalization())
model.add(GaussianNoise(0.1))
model.add(Conv2D(filters=128, kernel_size=kernel_size, activation=activation, padding=padding))
model.add(BatchNormalization())
model.add(GaussianNoise(0.1))
model.add(Conv2D(filters=128, kernel_size=kernel_size, strides=(2,2), activation=activation, padding=padding))
model.add(BatchNormalization())
model.add(GaussianNoise(0.1))
model.add(Conv2D(filters=64, kernel_size=kernel_size, activation=activation, padding=padding))
model.add(BatchNormalization())
model.add(GaussianNoise(0.1))
model.add(Conv2D(filters=64, kernel_size=kernel_size, activation=activation, padding=padding))
model.add(BatchNormalization())
model.add(GaussianNoise(0.1))
model.add(Conv2D(filters=32, kernel_size=kernel_size, activation=activation, padding=padding))
model.add(BatchNormalization())
model.add(GaussianNoise(0.1))
model.add(Conv2D(filters=32, kernel_size=kernel_size, activation=activation, padding=padding))
model.add(BatchNormalization())
model.add(GaussianNoise(0.1))
model.add(Conv2D(filters=16, kernel_size=kernel_size, activation=activation, padding=padding))
model.add(BatchNormalization())
model.add(GaussianNoise(0.1))
model.add(Conv2D(filters=16, kernel_size=kernel_size, activation=activation, padding=padding))
model.add(BatchNormalization())
model.add(GaussianNoise(0.1))
model.add(Conv2D(filters=8, kernel_size=kernel_size, activation=activation, padding=padding))
model.add(BatchNormalization())
model.add(GaussianNoise(0.1))
model.add(Conv2D(filters=8, kernel_size=kernel_size, activation=activation, padding=padding))
model.add(BatchNormalization())
model.add(GaussianNoise(0.1))
model.add(Conv2D(filters=4, kernel_size=kernel_size, activation=activation, padding=padding))
model.add(BatchNormalization())
model.add(GaussianNoise(0.1))
model.add(Conv2D(filters=4, kernel_size=kernel_size, activation=activation, padding=padding))
model.add(BatchNormalization())
model.add(GaussianNoise(0.1))
model.add(Conv2D(filters=2, kernel_size=kernel_size, activation=activation, padding=padding))
model.add(BatchNormalization())
model.add(GaussianNoise(0.1))
model.add(Conv2D(filters=2, kernel_size=kernel_size, activation=activation, padding=padding))
model.add(BatchNormalization())
model.add(GaussianNoise(0.1))
model.add(Conv2D(filters=1, kernel_size=(3, 15), activation='linear', padding="valid"))
model.add(Flatten())
model.add(Lambda(lambda x: K.tf.add(K.tf.multiply(x, K.variable(scale.squeeze)),
K.variable(mean.squeeze))))
return model
#%%
class threadsafe_iter:
"""Takes an iterator/generator and makes it thread-safe by
serializing call to the `next` method of given iterator/generator.
"""
def __init__(self, it):
self.it = it
self.lock = threading.Lock()
def __iter__(self):
return self
def __next__(self):
with self.lock:
return self.it.__next__()
def threadsafe_generator(f):
"""A decorator that takes a generator function and makes it thread-safe.
"""
def g(*a, **kw):
return threadsafe_iter(f(*a, **kw))
return g
@threadsafe_generator
def batch_generator(X, T, T_scaler=T_scaler, batch_size = None):
batch=[]
print("generator restarted !!!!!!!!!!!!!!!!!!!!!!!!!!! waiting for new data")
while not os.path.exists("new_data_ready"):
time.sleep(1)
while True:
# it might be a good idea to shuffle your data before each epoch
# for iData in range(40):
# print(f"loading NEW DATA {iData}")
# X_rsf, T_rsf = read_rsf_XT(shots_rsf=f'/data/ibex_data/fullCMP_{iData}/shots_cmp_full.hh',
# logs_rsf=f'/data/ibex_data/fullCMP_{iData}/logs_cmp_full.hh')
# X, T = prepare_XT(X_rsf, T_rsf, T_scaler)
# indices = np.arange(len(X))
# np.random.shuffle(indices)
# for i in indices:
# # if os.path.exists("new_data_ready"):
# # break
# batch.append(i)
# if len(batch)==batch_size:
# yield X[batch], T[batch]
# batch=[]
if os.path.exists("new_data_ready"):
cmd("rm new_data_ready")
X_rsf, T_rsf = read_rsf_XT(shots_rsf='shots_cmp_full.rsf', logs_rsf='logs_full.rsf')
#cmd("ssh glogin.ibex.kaust.edu.sa 'rm ~/log_estimation/data/new_data_ready'")
X, T = prepare_XT(X_rsf, T_rsf, T_scaler)
print("new data loaded")
else:
print("reusing the old data")
indices = np.arange(len(X))
np.random.shuffle(indices)
print("indices reshuffled")
for i in indices:
if os.path.exists("new_data_ready"):
break
batch.append(i)
if len(batch)==batch_size:
yield X[batch], T[batch]
batch=[]
#%%
# Init callbacks
def train_model(prefix="multi", X_scaled=X_scaled_multi, T_scaled=T_scaled_multi, weights=None):
cmd("rm new_data_ready")
#cmd("ssh 10.109.66.7 'rm ~/log_estimation/data/new_data_ready'")
lr_start = 0.001
if weights != None:
lr_start = 1e-5
net = create_model(np.shape(X_scaled)[1:], np.shape(T_scaled)[1:])
net.compile(loss=tv_loss,
optimizer=keras.optimizers.Nadam(lr_start),
metrics=[R2])
#net.summary()
if weights != None:
net.load_weights(weights)
early_stopping = EarlyStopping(monitor='val_loss', patience=21)
model_checkpoint = ModelCheckpoint("trained_net",
monitor='val_loss',
save_best_only=True,
verbose=1,
period=5)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5,
patience=7, min_lr=1e-5, verbose=1)
X_valid = X_scaled
T_valid = T_scaled
print(f"X validation data size = {np.shape(X_valid)}")
# TRAINING
batch_size = 64
# we flip every batch, so, going through whole data needs twice as many batches
steps_per_epoch = len(X_scaled)//batch_size
print(f"Batch size = {batch_size}, batches per epoch = {steps_per_epoch}")
history = net.fit_generator(batch_generator(X_scaled, T_scaled, batch_size=batch_size),
validation_data=(X_valid, T_valid),
epochs=200,
verbose=2,
shuffle=True,
max_queue_size=200,
workers=10,
use_multiprocessing=False,
steps_per_epoch = steps_per_epoch,
callbacks=[model_checkpoint,
reduce_lr,
early_stopping])
print("Optimization Finished!")
return net, history
def load_history(fname_history):
with open(fname_history,'rb') as f:
return pickle.load(f)
def save_history(history, fname_history):
with open(fname_history,'wb') as f:
pickle.dump(history, f)
def train_ensemble(prefix, X_scaled, T_scaled):
valid_best=1e100
net_dict = {}
history_dict = {}
for iNet in range(5):
if retrain_flag:
weights = f"{prefix}_weights.h5"
history_prev = load_history(f"history_{prefix}")
else:
weights = None
net, history = train_model(prefix=prefix, X_scaled=X_scaled, T_scaled=T_scaled, weights=weights)
cur_val_loss = np.min(history.history['val_loss'])
print(cur_val_loss)
if cur_val_loss < valid_best:
valid_best = cur_val_loss
net_best = net
history_best = history.history
net_dict[f"{iNet}"] = net
history_dict[f"{iNet}"] = history.history
if retrain_flag:
history_best = merge_dict(history_prev, history_best)
net_best.save_weights(f"{prefix}_weights.h5")
save_history(history_best, f"history_{prefix}")
return net_dict, history_dict, net_best, history_best
#singleCMP_net_dict, singleCMP_net_best, history_best = train_ensemble("singleCMP", X_scaled, T_scaled)
#cmd("rm new_data_ready")
multiCMP_net_dict, history_dict, multiCMP_net_best, history_best = train_ensemble("multiCMP", X_scaled_multi, T_scaled_multi)
# stop generator
cmd("touch training_finished")
#%% KOSTYLI for testing
history_best = load_history("history_multiCMP")
prefix = "multiCMP"
plt.figure(figsize=(16,9))
r2_arr = np.zeros((history_dict.__len__(),2))
for iNet in history_dict.keys():
r2_arr[int(iNet),0] = history_dict[iNet]['R2'][-1]
r2_arr[int(iNet),1] = history_dict[iNet]['val_R2'][-1]
print(f"netN={iNet}, R2={history_dict[iNet]['R2'][-1]},{history_dict[iNet]['val_R2'][-1]}")
plt.plot(history_dict[iNet]['R2'][:],'b--')
plt.plot(history_dict[iNet]['val_R2'][:],'r')
print(f"Average R2={np.mean(r2_arr, 0)}")
plt.plot(history_best['R2'][:],'b--', label='Training R2', linewidth=3)
plt.plot(history_best['val_R2'][:],'r', label='Validation R2', linewidth=3)
plt.xlabel("epoch")
plt.legend()
plt.savefig(f"../latex/Fig/{prefix}_R2", bbox_inches='tight')
plt.grid(True,which="both",ls="-")
plt.show(block=False)
plt.pause(1)
plt.close()
plt.figure(figsize=(16,9))
for iNet in history_dict.keys():
print(iNet)
plt.plot(history_dict[iNet]['loss'][:],'b--')
plt.plot(history_dict[iNet]['val_loss'][:],'r')
plt.semilogy(history_best['loss'][:],'b--', label='Training loss', linewidth=3)
plt.semilogy(history_best['val_loss'][:],'r', label='Validation loss', linewidth=3)
plt.xlabel("epoch")
plt.legend()
plt.savefig(f"../latex/Fig/{prefix}_loss", bbox_inches='tight')
plt.grid(True,which="both",ls="-")
plt.show(block=False)
plt.pause(1)
plt.close()
# #%%
# multiCMP_net_dict={}
# net_best = create_model(np.shape(X_scaled_multi)[1:], np.shape(T_scaled_multi)[1:])
# net_best.summary()
# net_best.compile(loss=tv_loss,
# optimizer=keras.optimizers.Nadam(1e-6),
# metrics=[R2])
# net_best.load_weights("multiCMP_weights.h5")
# multiCMP_net_dict["0"] = net_best
#%% [markdown]
# # We trained the neural net, it fits the training and validation data...
#
# ## How well does it fit?
#
# ## Does it fit stretched marmousi itself?
#
# ## Could we learn more from models like this?
#
# ## Does it work on something different?
#
# ## When does it break?!
#%% [markdown]
# # Testing
#%% uncomment for loading initial weights
# singleCMP_net_dict={}
# net = create_model(np.shape(X_scaled)[1:], np.shape(T_scaled)[1:])
# net.summary()
# net.load_weights("singleCMP_weights.h5")
# singleCMP_net_dict["0"] = net
# multiCMP_net_dict={}
# netM = create_model(np.shape(X_scaled_multi)[1:], np.shape(T_scaled_multi)[1:])
# netM.summary()
# netM.load_weights("multiCMP_weights.h5")
# multiCMP_net_dict["0"] = netM
def test_on_model(folder="marmvel1D",
net_dict=None,
prefix="singleCMP",
model_filename=None,
distort_flag=False,
stretch_X=None,
nCMP_max=nCMP,
generate_rsf_data_flag=True,
jlogz=c.jlogz,
jgx=c.jgx, sxbeg=c.sxbeg, gxbeg=c.gxbeg):
if model_filename==None:
model_filename=f"{folder}.hh"
fig_path = f"../latex/Fig/test_{prefix}_{folder}"
# expand model
model_output="vel_test.rsf"
print(model_output)
vel_test = generate_model(model_input=model_filename,
model_output=model_output,
stretch_X=stretch_X,
random_state_number=c.random_state_number,
distort_flag=distort_flag,
crop_flag=False,
test_flag=True)
# model data
if generate_rsf_data_flag:
cmd(f"mkdir {folder}")
cmd(f"cp {model_output} {folder}/{model_output}")
# check stability
print(f"you chose dt = {dt}, dt < {dx/np.max(vel_test):.4f} should be chosen for stability \n")
# force stability
assert dt < dx/np.max(vel_test)
generate_rsf_data(model_name=f"{folder}/vel_test.rsf",
shots_out=f"{folder}/shots_cmp_test.rsf",
logs_out=f"{folder}/logs_test.rsf")
# read data
X_test, T_test = read_rsf_XT(shots_rsf=f"{folder}/shots_cmp_test.rsf",
logs_rsf=f"{folder}/logs_test.rsf")
nCMP = int(net_dict["0"].input.shape[3])
# X_scaled, T_test = make_multi_CMP_inputs(X_scaled, T_test, nCMP_max)
X_scaled, T_scaled = prepare_XT(X_test, T_test)
T_test = T_scaler.inverse_transform(T_scaled)
sample_reveal = nCMP_max+1
plt_nb_T(1e3*np.concatenate((np.squeeze(X_scaled[sample_reveal,:,:,-1]), np.flipud(np.squeeze(X_scaled[sample_reveal,:,:,0]))), axis=0),
title="CMP first | CMP last", dx=200, dz=1e3*dt*c.jdt,
vmin=-0.1, vmax=0.1,
origin_in_middle=True, ylabel="Time(s)", fname=f"{fig_path}_X_scaled", cbar_label = "")
if nCMP == 1:
X_scaled = X_scaled[:,:,:,nCMP_max//2:nCMP_max//2+1]
# predict with all networks and save average
T_pred_total = np.zeros_like(net_dict["0"].predict(X_scaled))
T_pred_dict = np.zeros((2*len(net_dict), T_pred_total.shape[0], T_pred_total.shape[1]))
iNet=0
for net in net_dict.values():
T_pred_tmp = net.predict(X_scaled)
T_pred_tmp = T_scaler.inverse_transform(T_pred_tmp)
T_pred_dict[iNet,:,:] = T_pred_tmp
T_pred_tmp = net.predict(np.flip(X_scaled, axis=3))
T_pred_tmp = T_scaler.inverse_transform(T_pred_tmp)
T_pred_dict[iNet+1,:,:] = T_pred_tmp
iNet += 2
T_pred = np.mean(T_pred_dict, axis=0)
# interpolation for display
ups_plot = c.ups_plot
T_pred = upsample(T_pred, ups_plot)
T_test = upsample(T_test, ups_plot)
np_to_rsf(T_pred, f"{folder}/logs_pred.rsf", d1=25, d2=25)
np_to_rsf(T_test, f"{folder}/logs_test_m.rsf", d1=25, d2=25)
variance = np.var(T_pred_dict, axis=0)
plt_nb_T(upsample(np.sqrt(variance), ups_plot), title="Standard deviation",
dx=jgx*dx/ups_plot, dz=jlogz*dx/ups_plot,
fname=f"{fig_path}_inverted_std_dev",
vmin=0, vmax=1, figsize=(16,6))
plt_nb_T(T_pred-T_test, title="Pred-True",
dx=jgx*dx, dz=jlogz*dx,
fname=f"{fig_path}_inverted_error",
vmin=-1, vmax=1)
plt_nb_T(T_pred, title=f"DL, R2 = {r2_score(T_test.flatten(), T_pred.flatten()):.2f}, NRMS={nrms(T_pred, T_test):.1f}%",
dx=jgx*dx/ups_plot, dz=jgx*dx/ups_plot,
vmin=np.min(1e-3*T_test),
vmax=np.max(1e-3*T_test),
fname=f"{fig_path}_inverted", figsize=(16,6))
plt_nb_T(T_test,
dx=jgx*dx/ups_plot, dz=jgx*dx/ups_plot,
vmin=np.min(1e-3*T_test),
vmax=np.max(1e-3*T_test),
fname=f"{fig_path}_true",
title=f"True model",
figsize=(16,6))
#%%
def run_all_tests(net_dict=None, prefix="single", generate_rsf_data_flag=False):
# Marmousi-based tests
test_on_model("marmvel1D", net_dict=net_dict, prefix=prefix, stretch_X=10, generate_rsf_data_flag=generate_rsf_data_flag)
test_on_model("marmvel", net_dict=net_dict, prefix=prefix, stretch_X=1, generate_rsf_data_flag=generate_rsf_data_flag)
test_on_model("marm2", net_dict=net_dict, prefix=prefix, stretch_X=1, generate_rsf_data_flag=generate_rsf_data_flag)
# Overthrust-based tests
test_on_model("overthrust1D", net_dict=net_dict, prefix=prefix, stretch_X=2, generate_rsf_data_flag=generate_rsf_data_flag)
# cmd("sfadd < overthrust3D_orig.hh add=-1 | sfclip2 lower=1.5 --out=stdout > overthrust3D.hh")
# cmd("sfwindow < overthrust3D_orig.hh n3=120 f1=400 n1=1 | sftransp | sfadd scale=1000 | sfput d1=25 d2=25 --out=stdout > overthrust_test_2D_1.hh")
test_on_model("overthrust_test_2D_1", net_dict=net_dict, prefix=prefix, stretch_X=1, generate_rsf_data_flag=generate_rsf_data_flag)
# cmd("sfwindow < overthrust3D.hh n3=120 f2=400 n2=1 | sftransp | sfadd scale=1000 | sfput d1=25 d2=25 --out=stdout > overthrust_test_2D_2.hh")
test_on_model("overthrust_test_2D_2", net_dict=net_dict, prefix=prefix, stretch_X=1, generate_rsf_data_flag=generate_rsf_data_flag)
# SEAM I based tests
test_on_model("seam100", net_dict=net_dict, prefix=prefix, stretch_X=2, generate_rsf_data_flag=generate_rsf_data_flag)
# cmd("sfwindow < SEAM_I_3D_20m.hh f3=100 n3=151 f1=1400 | sftransp memsize=100000 plane=13 | sfwindow f3=20 n3=1 f2=500 n2=1000 | sfput o1=0 o2=0 --out=stdout > seam_i_sediments.hh")
test_on_model("seam_i_sediments", net_dict=net_dict, prefix=prefix, stretch_X=1, generate_rsf_data_flag=generate_rsf_data_flag)
test_on_model("seam_karst", net_dict=net_dict, prefix=prefix, stretch_X=1, generate_rsf_data_flag=generate_rsf_data_flag)
#cmd("sfwindow < SEAM_I_3D_20m.hh f3=10 n3=151 | sftransp memsize=100000 plane=23 | sftransp memsize=100000 plane=12 | sfwindow f3=1455 n3=1 --out=stdout > seam_i_salt.hh")
test_on_model("seam_i_salt", net_dict=net_dict, prefix=prefix, stretch_X=2, generate_rsf_data_flag=generate_rsf_data_flag)
test_on_model("seam_arid", net_dict=net_dict, prefix=prefix, stretch_X=1, generate_rsf_data_flag=generate_rsf_data_flag)
#run_all_tests(net_dict=singleCMP_net_dict, prefix="singleCMP", generate_rsf_data_flag=True)
run_all_tests(net_dict=multiCMP_net_dict, prefix="multiCMP", generate_rsf_data_flag=True)
print(f"Total execution time is {toc(tic_total)}")
#%% PLOT FWI RESULTS
# for folder in ["marm2",
# "seam_i_sediments",
# "seam100",
# "overthrust"]:
for folder in ["overthrust"]:
with cd(f"fwi_{folder}"):
cmd("scons -j 4")
fwi1 = rsf_to_np("fwi2.rsf")
fwi2 = rsf_to_np("fwi_shi.rsf")
velo = rsf_to_np("vel.rsf")
velsm = rsf_to_np("smvel.rsf")
R2o = r2_score(velo.flatten(), fwi2.flatten())
fwi2 = resize(fwi2, (fwi2.shape[0], 120))
fwi1 = resize(fwi1, (fwi2.shape[0], 120))
plt_nb_T(fwi2, title=f"DL+MSFWI, R2={R2o:.2f}, NRMS={nrms(velo,fwi2):.1f}%", fname=f"../../latex/Fig/msfwi_{folder}", dx=25, dz=25, figsize=(32,6), vmin=1.5, vmax=4.5)
plt_nb_T(velsm,
title=f"DL, R2={r2_score(velo.flatten(),velsm.flatten()):.2f}, NRMS={nrms(velo,velsm):.1f}%",
fname=f"../../latex/Fig/dl_{folder}",
dx=25, dz=25, figsize=(16,6), vmin=1.5, vmax=4.5)
plt_nb_T(velo,
title=f"True model",
fname=f"../../latex/Fig/true_{folder}",
dx=25, dz=25, figsize=(16,6), vmin=1.5, vmax=4.5)
def plot_logs(log_x):
plt.figure(figsize=(11,18))
depth = 0.025*np.array(range(120))
plt.plot( 1e-3*velsm[log_x,:], depth, 'r', label="DL", linewidth=6)
plt.plot(1e-3*fwi1[log_x,:], depth, 'b--', label="DL+FWI", linewidth=6)
plt.plot(1e-3*fwi2[log_x,:], depth, 'bo', label="+MSFWI", markersize=15)
plt.plot( 1e-3*velo[log_x,:], depth, 'black', label="True", linewidth=8, alpha=0.6)
plt.ylabel("Depth (km)")
plt.xlabel("Velocity (km/s)")
plt.xlim((1.5, 4.5))
plt.yticks([0,1,2,3])
plt.title(f"Log at {int(0.025*log_x)} km")
plt.gca().invert_yaxis()
plt.legend()
plt.axis("tight")
plt.savefig(f"../latex/Fig/log_{int(0.025*log_x)}")
plot_logs(240)
plot_logs(400)
plot_logs(480)
# %%
|
[
"vkazei@gmail.com"
] |
vkazei@gmail.com
|
59c2e6f44d3b4e5943e33fae3055bca489248b1f
|
0d261e74c5cfcc7631cf17e5e5ea67ac146c9929
|
/1-Neural_Networks_and_Deep_Learning/vectorization.py
|
7a583c48d8b1449c4511c9e9c14c5de5001332b3
|
[] |
no_license
|
zoro16/deep-learning
|
61c4b2d85867fc9761badf1286a29dcc1360130c
|
4130b3591bb04cff5771e7cca0e122ab897afff0
|
refs/heads/master
| 2021-09-10T12:19:14.013642
| 2018-03-26T06:11:30
| 2018-03-26T06:11:30
| 113,404,656
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 383
|
py
|
import numpy as np
import time
a = np.random.rand(1000000)
b = np.random.rand(1000000)
started = time.time()
c = np.dot(a, b)
ended = time.time()
print("Vertorized version: {} ms".format(str(1000*(ended-started))))
c = 0
started = time.time()
for i in range(1000000):
c += a[i] * b[i]
ended = time.time()
print("For loop version: {} ms".format(str(1000*(ended-started))))
|
[
"mohamed.saleh16@gmail.com"
] |
mohamed.saleh16@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.