blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
86308bfcffce3e71e197ca2c8c3120a75ad06334
|
4dda601cb02b404bc0ae25f984825641ddb135fe
|
/scuole/districts/management/commands/bootstrapdistricts_v2.py
|
46af7c74d2b94e19d1157f0518a10620f2fe82f8
|
[
"MIT"
] |
permissive
|
texastribune/scuole
|
d89e49d6bf42d6476a8b2e5a4ebe6380c28e9f60
|
155444e313313ba484d98d73d94d34e9b8f57fbe
|
refs/heads/master
| 2023-06-28T02:52:40.037200
| 2023-05-22T21:51:15
| 2023-05-22T21:51:15
| 35,112,798
| 1
| 0
|
MIT
| 2023-06-12T20:04:49
| 2015-05-05T17:03:23
|
Python
|
UTF-8
|
Python
| false
| false
| 2,884
|
py
|
from csv import DictReader
from json import dumps, load
from os.path import join
from django.conf import settings
from django.contrib.gis.geos import GEOSGeometry, MultiPolygon
from django.core.management.base import BaseCommand, CommandError
from django.utils.text import slugify
from scuole.counties.models import County
from scuole.districts.models import District
from scuole.regions.models import Region
class Command(BaseCommand):
help = "Bootstraps District models using TEA data."
def add_arguments(self, parser):
parser.add_argument("year", nargs="?", type=str, default=None)
def handle(self, *args, **options):
self.year = options.get("year")
if not self.year:
raise CommandError("A year is required.")
entities_file = join(
settings.DATA_FOLDER, f"tapr/{self.year}/district/entities.csv"
)
with open(entities_file) as infile:
districts = [row for row in DictReader(infile)]
districts_geojson_file = join(
settings.DATA_FOLDER, "tapr/reference/district/shapes/districts.geojson"
)
shape_data = {}
with open(districts_geojson_file) as infile:
geo_data = load(infile)
features = geo_data.get("features")
for feature in features:
properties = feature.get("properties")
tea_id = properties.get("DISTRICT_C")
shape_data[tea_id] = feature.get("geometry")
self.shape_data = shape_data
for district in districts:
self.create_district(district)
def create_district(self, data):
district_id = str(data.get("DISTRICT")).zfill(6)
district_name = data.get("DISTNAME_CLEAN")
county_state_code = data.get("COUNTY").zfill(3)
region_id = str(data.get("REGION")).zfill(2)
self.stdout.write(f"Creating {district_name} ({district_id})")
county = County.objects.get(state_code=county_state_code)
region = Region.objects.get(region_id=region_id)
is_charter = data["DFLCHART"] == "Y"
if district_id in self.shape_data:
geometry = GEOSGeometry(dumps(self.shape_data.get(district_id)))
# checks to see if the geometry is a MultiPolygon
if geometry.geom_typeid == 3:
geometry = MultiPolygon(geometry)
else:
geometry = None
self.stderr.write(f"No shape data for {district_name}")
instance, _ = District.objects.update_or_create(
tea_id=district_id,
defaults={
"name": district_name,
"slug": slugify(district_name, allow_unicode=True),
"charter": is_charter,
"region": region,
"county": county,
"shape": geometry,
},
)
|
[
"rdmurphy@users.noreply.github.com"
] |
rdmurphy@users.noreply.github.com
|
233ca755a7a06a8bb3b16af8bdf0e8b4b9d705fd
|
cdb92a30ff6e0fd12a782d91af915f4975c8bdce
|
/video.py
|
21b1947d5d8b30efce3fd68aa308b346cd6a4d2a
|
[] |
no_license
|
Ravenking7675/Screen-Sharing-using-PubSub-
|
0295b1b8f1bbbe2031bdb074be255e207a9b9f15
|
d3aba9cfe4bdc7db5b6d0fe960945f322cf42753
|
refs/heads/master
| 2022-12-06T18:02:54.371487
| 2020-09-04T08:26:21
| 2020-09-04T08:26:21
| 292,794,352
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 57
|
py
|
from capture import capture_image
print(capture_image())
|
[
"superavinash2000gmail.com"
] |
superavinash2000gmail.com
|
1d630546bb190a52d8d5854bcb6b460288e43877
|
ffc224baf2393bc6a5d0c24eebd5da689bbfafe1
|
/ass2ex2.py
|
87ebbb883645591dc553aa7c7d4a7f8531b2351d
|
[] |
no_license
|
Ranjeth83/assignment-
|
dd84f4df9abe7a7b61f48626eab4c096516237d0
|
c1dc2cf8b03128b4a93a1b41ff69083f297f486b
|
refs/heads/master
| 2022-11-06T09:31:22.711445
| 2020-06-22T02:13:58
| 2020-06-22T02:13:58
| 273,515,018
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 42
|
py
|
val = input("enter your name:")
print(val)
|
[
"noreply@github.com"
] |
noreply@github.com
|
e606b3c1a271a2e229c69bc0923948d3028a7129
|
41a515a85c8116ad95eedf8e6f75825eef044d09
|
/app.py
|
4ce1afbc9182266a06af5ab3d7cae746fb85c1f3
|
[] |
no_license
|
ionagamed/ds-lab-09
|
ffa3fe2dc70ec9e000ad9ecc8f65b5b6e852a726
|
219259a2647a4a11631705572d6905a15b1fee72
|
refs/heads/master
| 2020-08-30T19:16:32.501191
| 2019-10-30T07:23:53
| 2019-10-30T07:23:53
| 218,466,786
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 681
|
py
|
from flask import Flask, request, render_template
from pymongo import MongoClient
client_url = ",".join(
f"mongodb-replicaset-{i}.mongodb-replicaset"
for i in range(3)
)
client = MongoClient(client_url, 27017)
db = client.chat.messages
app = Flask(__name__)
@app.route("/", methods=["GET", "POST"])
def index():
if request.method == "POST":
doc = {
"username": request.form["username"],
"message": request.form["message"],
}
db.insert_one(doc)
messages = reversed(list(db.find()))
return render_template("index.html", messages=messages)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000)
|
[
"ionagamed@gmail.com"
] |
ionagamed@gmail.com
|
4565b9d512665043f48b45c2190f63f7c94d3f14
|
3b059132c1ef89671416fbf1d2b91b5709f24321
|
/singly_linked.py
|
b78d6f888ac15a58e2fd3ff4116db24779633ba5
|
[] |
no_license
|
Philipwallen/Data-Structures
|
0eeae94b9322d4c0a012df0871e187f4498ec86e
|
7c6b9c42aec1002a97f52b079e599ac57a36a2dc
|
refs/heads/master
| 2020-12-03T00:07:20.851363
| 2017-07-07T14:59:05
| 2017-07-07T14:59:05
| 95,990,013
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 151
|
py
|
def Node(object):
def __init__(self, value):
self.value = value
self.nextnode = None
'''
Here we have a singly linked list class.
'''
|
[
"philipwallen1@gmail.com"
] |
philipwallen1@gmail.com
|
050ede100f804daccbc9e5d1be042cf7b8a52937
|
32e0e3ad8bf23aa2c3672d5a803069e80c1d33e1
|
/commonsrc/Log.py
|
fb2019f580468d59829cecb3f1db2bb3795ccb03
|
[] |
no_license
|
abao0713/interfaceTest2
|
d0c5ba0718c7b4b50f6ce327b641567d00209ad0
|
854a31f3b9c6ea75e8a9d457dac7f6f21009e676
|
refs/heads/master
| 2020-03-18T09:09:39.153793
| 2019-05-22T01:07:14
| 2019-05-22T01:07:14
| 134,547,795
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,691
|
py
|
import os
import readConfig as readConfig
import logging
from datetime import datetime
import threading
localReadConfig = readConfig.ReadConfig()
class Log:
def __init__(self):
global logPath, resultPath, proDir
proDir = readConfig.proDir
resultPath = os.path.join(proDir, "result")
if not os.path.exists(resultPath):
os.mkdir(resultPath)
logPath = os.path.join(resultPath, str(datetime.now().strftime("%Y%m%d%H%M%S")))
if not os.path.exists(logPath):
os.mkdir(logPath)
self.logger = logging.getLogger()
self.logger.setLevel(logging.INFO)
# defined handler
handler = logging.FileHandler(os.path.join(logPath, "output.log"))
# defined formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
self.logger.addHandler(handler)
def get_logger(self):
"""
get logger
:return:
"""
return self.logger
def build_start_line(self, case_no):
"""
write start line
:return:
"""
self.logger.info("--------" + case_no + " START--------")
def build_end_line(self, case_no):
"""
write end line
:return:
"""
self.logger.info("--------" + case_no + " END--------")
def build_case_line(self, case_name, msg):
"""
write test case line
:param case_name:
:param code:
:param msg:
:return:
"""
self.logger.info(case_name+"----msg:"+msg)
def get_report_path(self):
"""
get report file path
:return:
"""
report_path = os.path.join(logPath, "report.html")
return report_path
def get_result_path(self):
"""
get test result path
:return:
"""
return logPath
def write_result(self, result):
"""
:param result:
:return:
"""
result_path = os.path.join(logPath, "report.txt")
fb = open(result_path, "wb")
try:
fb.write(result)
except FileNotFoundError as ex:
logger.error(str(ex))
class MyLog:
log = None
mutex = threading.Lock()
def __init__(self):
pass
@staticmethod
def get_log():
if MyLog.log is None:
MyLog.mutex.acquire()
MyLog.log = Log()
MyLog.mutex.release()
return MyLog.log
if __name__ == "__main__":
log = MyLog.get_log()
logger = log.get_logger()
logger.debug("test debug")
logger.info("test info")
|
[
"13686821736@163.com"
] |
13686821736@163.com
|
0041805ec3ee73af537e19673e3b48a1208a44fa
|
78604a99d488889e2932ed713b209d19178f2c95
|
/src/models/pieces/strategies/move_strategy.py
|
4aa8cc683e5fef4b330c6acbbba96a628e1bdf27
|
[] |
no_license
|
AdnanZahid/Chess_Neural_Networks_With_Python_Chess
|
8173706cf97fcf818dc0049054a81659d3346274
|
c4d760bed1951bda24c84ed0681b9ca3820550e1
|
refs/heads/master
| 2021-01-24T04:19:59.626876
| 2018-02-26T07:37:30
| 2018-02-26T07:37:30
| 122,931,577
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 730
|
py
|
# This class manages mobility and list of moves for a piece
class MoveStrategy:
def __init__(self,piece,color,directionsList,board):
self.piece = piece
self.color = color
self.directionsList = directionsList
self.board = board
def getMobility(self,position):
return len(generateAllMoves(position))
def generateAllMoves(self):
possibleMovesToSquaresList = []
if self.color == self.board.currentTurnColor:
for direction in self.directionsList:
possibleMovesToSquaresList.extend(self.generateMove(self.piece.position,direction))
return possibleMovesToSquaresList
|
[
"adnaan.zaahid@gmail.com"
] |
adnaan.zaahid@gmail.com
|
c02607f0370710a5f23cb44e48f0bff1b7cdeaaf
|
fc74436ab3e11ba2978ebb81600dcd3799a74d1f
|
/lstm_based_seq_2_seq/alad_seq2seq.py
|
fa019e23d362ca3df50f06ee529e7350ed98120f
|
[] |
no_license
|
vrahul1997/nlp_from_scratch
|
f119d8736edbf81474d4916eba6d87de8d73a936
|
08984d7a3900419995df0bd0e99ab4794a7f54dc
|
refs/heads/main
| 2023-02-16T22:04:26.678253
| 2021-01-07T13:04:38
| 2021-01-07T13:04:38
| 314,092,477
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,947
|
py
|
import torch
import torch.nn as nn
import torch.optim as optim
from torchtext.datasets import Multi30k
from torchtext.data import Field, BucketIterator
import numpy as np
import spacy
import random
from torch.utils.tensorboard import SummaryWriter # to print to tensorboard
from utils import translate_sentence, bleu, save_checkpoint, load_checkpoint
spacy_ger = spacy.load("de")
spacy_eng = spacy.load("en")
def tokenize_ger(text):
return [tok.text for tok in spacy_ger.tokenizer(text)]
def tokenize_eng(text):
return [tok.text for tok in spacy_eng.tokenizer(text)]
german = Field(tokenize=tokenize_ger, lower=True, init_token="<sos>", eos_token="<eos>")
english = Field(
tokenize=tokenize_eng, lower=True, init_token="<sos>", eos_token="<eos>"
)
train_data, valid_data, test_data = Multi30k.splits(
exts=(".de", ".en"), fields=(german, english)
)
german.build_vocab(train_data, max_size=10000, min_freq=2)
english.build_vocab(train_data, max_size=10000, min_freq=2)
class Encoder(nn.Module):
def __init__(self, input_size, embedding_size, hidden_size, num_layers, p):
super(Encoder, self).__init__()
self.dropout = nn.Dropout(p)
self.hidden_size = hidden_size
self.num_layers = num_layers
self.embedding = nn.Embedding(input_size, embedding_size)
self.rnn = nn.LSTM(embedding_size, hidden_size, num_layers, dropout=p)
def forward(self, x):
# x shape: (seq_length, N) where N is batch size
embedding = self.dropout(self.embedding(x))
# embedding shape: (seq_length, N, embedding_size)
outputs, (hidden, cell) = self.rnn(embedding)
# outputs shape: (seq_length, N, hidden_size)
return hidden, cell
class Decoder(nn.Module):
def __init__(
self, input_size, embedding_size, hidden_size, output_size, num_layers, p
):
super(Decoder, self).__init__()
self.dropout = nn.Dropout(p)
self.hidden_size = hidden_size
self.num_layers = num_layers
self.embedding = nn.Embedding(input_size, embedding_size)
self.rnn = nn.LSTM(embedding_size, hidden_size, num_layers, dropout=p)
self.fc = nn.Linear(hidden_size, output_size)
def forward(self, x, hidden, cell):
# x shape: (N) where N is for batch size, we want it to be (1, N), seq_length
# is 1 here because we are sending in a single word and not a sentence
x = x.unsqueeze(0)
embedding = self.dropout(self.embedding(x))
# embedding shape: (1, N, embedding_size)
outputs, (hidden, cell) = self.rnn(embedding, (hidden, cell))
# outputs shape: (1, N, hidden_size)
predictions = self.fc(outputs)
# predictions shape: (1, N, length_target_vocabulary) to send it to
# loss function we want it to be (N, length_target_vocabulary) so we're
# just gonna remove the first dim
predictions = predictions.squeeze(0)
return predictions, hidden, cell
class Seq2Seq(nn.Module):
def __init__(self, encoder, decoder):
super(Seq2Seq, self).__init__()
self.encoder = encoder
self.decoder = decoder
def forward(self, source, target, teacher_force_ratio=0.5):
batch_size = source.shape[1]
target_len = target.shape[0]
target_vocab_size = len(english.vocab)
outputs = torch.zeros(target_len, batch_size, target_vocab_size).to(device)
hidden, cell = self.encoder(source)
# Grab the first input to the Decoder which will be <SOS> token
x = target[0]
for t in range(1, target_len):
# Use previous hidden, cell as context from encoder at start
output, hidden, cell = self.decoder(x, hidden, cell)
# Store next output prediction
outputs[t] = output
# Get the best word the Decoder predicted (index in the vocabulary)
best_guess = output.argmax(1)
# With probability of teacher_force_ratio we take the actual next word
# otherwise we take the word that the Decoder predicted it to be.
# Teacher Forcing is used so that the model gets used to seeing
# similar inputs at training and testing time, if teacher forcing is 1
# then inputs at test time might be completely different than what the
# network is used to. This was a long comment.
x = target[t] if random.random() < teacher_force_ratio else best_guess
return outputs
### We're ready to define everything we need for training our Seq2Seq model ###
# Training hyperparameters
num_epochs = 100
learning_rate = 0.001
batch_size = 64
# Model hyperparameters
load_model = False
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
input_size_encoder = len(german.vocab)
input_size_decoder = len(english.vocab)
output_size = len(english.vocab)
encoder_embedding_size = 300
decoder_embedding_size = 300
hidden_size = 1024 # Needs to be the same for both RNN's
num_layers = 2
enc_dropout = 0.5
dec_dropout = 0.5
# Tensorboard to get nice loss plot
writer = SummaryWriter(f"runs/loss_plot")
step = 0
train_iterator, valid_iterator, test_iterator = BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size=batch_size,
sort_within_batch=True,
sort_key=lambda x: len(x.src),
device=device,
)
encoder_net = Encoder(
input_size_encoder, encoder_embedding_size, hidden_size, num_layers, enc_dropout
).to(device)
decoder_net = Decoder(
input_size_decoder,
decoder_embedding_size,
hidden_size,
output_size,
num_layers,
dec_dropout,
).to(device)
model = Seq2Seq(encoder_net, decoder_net).to(device)
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
pad_idx = english.vocab.stoi["<pad>"]
criterion = nn.CrossEntropyLoss(ignore_index=pad_idx)
if load_model:
load_checkpoint(torch.load("my_checkpoint.pth.tar"), model, optimizer)
sentence = "ein boot mit mehreren männern darauf wird von einem großen pferdegespann ans ufer gezogen."
for epoch in range(num_epochs):
print(f"[Epoch {epoch} / {num_epochs}]")
checkpoint = {"state_dict": model.state_dict(), "optimizer": optimizer.state_dict()}
save_checkpoint(checkpoint, filename="alad_my_checkpoint.pth.tar")
model.eval()
translated_sentence = translate_sentence(
model, sentence, german, english, device, max_length=50
)
print(f"Translated example sentence: \n {translated_sentence}")
model.train()
for batch_idx, batch in enumerate(train_iterator):
# Get input and targets and get to cuda
inp_data = batch.src.to(device)
target = batch.trg.to(device)
# Forward prop
output = model(inp_data, target)
# Output is of shape (trg_len, batch_size, output_dim) but Cross Entropy Loss
# doesn't take input in that form. For example if we have MNIST we want to have
# output to be: (N, 10) and targets just (N). Here we can view it in a similar
# way that we have output_words * batch_size that we want to send in into
# our cost function, so we need to do some reshapin. While we're at it
# Let's also remove the start token while we're at it
output = output[1:].reshape(-1, output.shape[2])
target = target[1:].reshape(-1)
optimizer.zero_grad()
loss = criterion(output, target)
# Back prop
loss.backward()
# Clip to avoid exploding gradient issues, makes sure grads are
# within a healthy range
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1)
# Gradient descent step
optimizer.step()
# Plot to tensorboard
writer.add_scalar("Training loss", loss, global_step=step)
step += 1
score = bleu(test_data[1:100], model, german, english, device)
print(f"Bleu score {score*100:.2f}")
|
[
"vijayvenu1997@gmail.com"
] |
vijayvenu1997@gmail.com
|
c5a0ff62b99e765cf2248885b7536589d03a6b90
|
13f6d3ff4764956d61ebb5ca8ad55927e2fea919
|
/session1/list1.py
|
81f398f7ba7f504c34d9d0da0abe33ab9c7f5198
|
[] |
no_license
|
Kit-Data-Science-Telecom-2021-2022/Kit-Data_Marie-Elisabeth-Campo
|
73390bf073ee68843f34ca3e354142ba6c9397bf
|
522cf35558401557aa74263d09ecc0d6ab3c55fb
|
refs/heads/main
| 2023-09-05T22:35:18.090965
| 2021-11-07T23:24:07
| 2021-11-07T23:24:07
| 409,204,002
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,105
|
py
|
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic list exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in list2.py.
# A. match_ends
# Given a list of strings, return the count of the number of
# strings where the string length is 2 or more and the first
# and last chars of the string are the same.
# Note: python does not have a ++ operator, but += works.
def match_ends(words):
return len([w for w in words if len(w)>=2 and w[0]==w[-1]])
# B. front_x
# Given a list of strings, return a list with the strings
# in sorted order, except group all the strings that begin with 'x' first.
# e.g. ['mix', 'xyz', 'apple', 'xanadu', 'aardvark'] yields
# ['xanadu', 'xyz', 'aardvark', 'apple', 'mix']
# Hint: this can be done by making 2 lists and sorting each of them
# before combining them.
def front_x(words):
o = list(w for w in words if w[0] != 'x')
o = sorted(o)
x = list(w for w in words if w[0] == 'x')
x = sorted(x)
return x + o
# C. sort_last
# Given a list of non-empty tuples, return a list sorted in increasing
# order by the last element in each tuple.
# e.g. [(1, 7), (1, 3), (3, 4, 5), (2, 2)] yields
# [(2, 2), (1, 3), (3, 4, 5), (1, 7)]
# Hint: use a custom key= function to extract the last element form each tuple.
def sort_last(tuples):
l = sorted(list(t[::-1] for t in tuples))
l1 = list(t[::-1] for t in l)
return l1
return
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print ('%s got: %s expected: %s' % (prefix, repr(got), repr(expected)))
# Calls the above functions with interesting inputs.
def main():
print ('match_ends')
test(match_ends(['aba', 'xyz', 'aa', 'x', 'bbb']), 3)
test(match_ends(['', 'x', 'xy', 'xyx', 'xx']), 2)
test(match_ends(['aaa', 'be', 'abc', 'hello']), 1)
print
print ('front_x')
test(front_x(['bbb', 'ccc', 'axx', 'xzz', 'xaa']),
['xaa', 'xzz', 'axx', 'bbb', 'ccc'])
test(front_x(['ccc', 'bbb', 'aaa', 'xcc', 'xaa']),
['xaa', 'xcc', 'aaa', 'bbb', 'ccc'])
test(front_x(['mix', 'xyz', 'apple', 'xanadu', 'aardvark']),
['xanadu', 'xyz', 'aardvark', 'apple', 'mix'])
print
print ('sort_last')
test(sort_last([(1, 3), (3, 2), (2, 1)]),
[(2, 1), (3, 2), (1, 3)])
test(sort_last([(2, 3), (1, 2), (3, 1)]),
[(3, 1), (1, 2), (2, 3)])
test(sort_last([(1, 7), (1, 3), (3, 4, 5), (2, 2)]),
[(2, 2), (1, 3), (3, 4, 5), (1, 7)])
if __name__ == '__main__':
main()
|
[
"marinette@gmx.fr"
] |
marinette@gmx.fr
|
c1d95549cd754be59496169e8ee446c75643f18f
|
62dd39e19d2b839d8e01f6d2d6b0d22bc348be77
|
/test_PokerScoring.py
|
fad8fd4d48b9370a068e0d9f6f2e175b647a3093
|
[] |
no_license
|
MattMorris1996/PythonPoker
|
6c6cc64f39c138dd2c4c73e5bf793f2b0f6cca33
|
dd174f0019c618f4754d3630cd5f5bd66048d639
|
refs/heads/master
| 2020-12-04T14:46:18.741751
| 2020-09-01T23:23:42
| 2020-09-01T23:23:42
| 231,806,220
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,916
|
py
|
import unittest
import PokerScoring
import CardDeck
import random
class TestPokerHands(unittest.TestCase):
def setUp(self):
# suit values
diamonds = 0
hearts = 1
spade = 2
clubs = 3
# duplicates setup
self.multiples = [CardDeck.Card(diamonds, 4),CardDeck.Card(hearts, 4),CardDeck.Card(spade, 4),CardDeck.Card(clubs, 4)]
# full house setup
self.doubles = [[CardDeck.Card(diamonds, 4), CardDeck.Card(spade, 4)],[CardDeck.Card(hearts, 1), CardDeck.Card(clubs, 1)]]
self.doubles_same = [[CardDeck.Card(diamonds, 4), CardDeck.Card(spade, 4)],[CardDeck.Card(hearts, 0), CardDeck.Card(clubs, 0)]]
self.only_triples = [[CardDeck.Card(diamonds, 0), CardDeck.Card(spade, 0),CardDeck.Card(hearts, 0)]]
self.straight_test = []
# straight setup
for i in range(7):
self.straight_test.append(CardDeck.Card(clubs, i))
self.flush_test = []
# flush setup
for i in range(7):
self.flush_test.append(CardDeck.Card(hearts, random.randint(0, 13)))
# straight flush setup
self.straights = []
self.flushes = []
straight = []
flush = []
# generate straight flush
for i in range(5):
straight.append(CardDeck.Card(hearts, i))
for i in range(5):
flush.append(CardDeck.Card(hearts, i))
self.flushes.append(flush)
self.straights.append(straight)
pass
def test_duplicates(self):
dupl = PokerScoring.duplicates(self.multiples)
self.assertEqual(3, len(dupl))
def test_full_house(self):
# test doubles and triples with unique values
full_house = PokerScoring.full_house(self.only_triples, self.doubles)
self.assertEqual(2, len(full_house))
for hands in full_house:
self.assertEqual(5, len(hands))
# test doubles and triples where values arent unique
full_house = PokerScoring.full_house(self.only_triples, self.doubles_same)
self.assertEqual(1, len(full_house))
for hands in full_house:
self.assertEqual(5, len(hands))
def test_two_pair(self):
two_pair = PokerScoring.two_pair(self.doubles)
self.assertEqual(2, len(two_pair))
def test_straights(self):
straights = PokerScoring.connectivity(self.straight_test)
self.assertEqual(3, len(straights))
for straight in straights:
self.assertEqual(5, len(straight))
def test_flushes(self):
flushes = PokerScoring.same_suit(self.flush_test)
self.assertEqual(3, len(flushes))
for flush in flushes:
self.assertEqual(5, len(flush))
def test_straight_flush(self):
straight_flushes = PokerScoring.connected_flushes(self.flushes, self.straights)
self.assertEqual(1, len(straight_flushes))
|
[
"matthew.m1996@gmail.com"
] |
matthew.m1996@gmail.com
|
a3999bae68200d01e0d1c7cfcc0ba9cd188bd945
|
9d8bd40159f119cea1c2c3fd86743c5bc1d9907c
|
/examples/spatial_filter.py
|
45583d7527bc32d44ff66ff560834c812c30a7f8
|
[
"BSD-3-Clause"
] |
permissive
|
mihaieduard/Diffraction-Simulations--Angular-Spectrum-Method
|
70757b315d06de40c7914184b4015e53a5f3dd1f
|
4ec7abcc1a726c5e7b65d05455cab2467fdca9cf
|
refs/heads/main
| 2023-03-23T16:46:52.097425
| 2021-03-19T18:36:12
| 2021-03-19T18:36:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 515
|
py
|
from diffractsim import MonochromaticField, nm, mm, cm
F = MonochromaticField(
wavelength=632.8 * nm, extent_x=25. * mm, extent_y=25. * mm, Nx=2000, Ny=2000,power = 0.1
)
F.add_gaussian_beam(0.7*mm)
F.add_spatial_noise(noise_radius = 2.2*mm, f_mean = 1/(0.2*mm), f_size = 1/(0.5*mm), A = 0.2, N= 50)
F.add_lens(f = 50*cm)
F.propagate(50*cm)
F.add_circular_slit( 0, 0, 0.28*mm)
F.propagate(50*cm)
F.add_lens(f = 50*cm)
F.propagate(30*cm)
rgb = F.get_colors()
F.plot(rgb, xlim=[-2.5,2.5], ylim=[-2.5,2.5])
|
[
"rafael.fuente.herrezuelo@gmail.com"
] |
rafael.fuente.herrezuelo@gmail.com
|
bb45d0593e0bed3aa6c10277e34775a2e6257981
|
4a216a1d968cb3f3ed78f24def35773ed7c04459
|
/main.py
|
c17f90b508427d9ff451d5ab5497d96d8f8826fd
|
[] |
no_license
|
luizgdias/kafka_producer_topic_consumer
|
a43d15b40aed048271e37b64f24af3adf2fe47e2
|
b4484ece16443423e7e1f2dfe16b5084e98f2abf
|
refs/heads/master
| 2020-06-28T15:44:06.080711
| 2019-08-02T18:09:21
| 2019-08-02T18:09:21
| 200,271,917
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,230
|
py
|
# -*- Coding: UTF-8 -*-
#coding: utf-8
#########################################################
# author: Luiz Gustavo Dias
# date : 07/23/2019
#########################################################
# At First time is necessary to run in terminal:
# $ docker run -d --name zookeeper jplock/zookeeper:3.4.6
# $ docker run -d --name kafka --link zookeeper:zookeeper ches/kafka
# $ export ZK_IP=$(docker inspect --format "{{ .NetworkSettings.IPAddress }}" zookeeper)
# $ export KAFKA_IP=$(docker inspect --format "{{ .NetworkSettings.IPAddress }}" kafka)
# $ docker run --rm ches/kafka kafka-topics.sh --create --topic test --replication-factor 1 --partitions 1 --zookeeper $ZK_IP:2181
# Created topic "test".
#########################################################
# Description: The script list all files in ./Files directory on a txt file,
# after a kafka producer is created, the producer reads the file
# and sendsd all the files name to kafka consumer that uses the
# same kafka topic.
# docker run --rm --interactive ches/kafka kafka-console-producer.sh --broker-list 172.17.0.3:9092 --topic test
# docker run --rm ches/kafka kafka-console-consumer.sh --topic test --from-beginning --zookeeper 172.17.0.2:2181
#########################################################
from kafka import KafkaConsumer
from kafka import KafkaProducer
from json import loads
import os, sys, subprocess, shlex
import json
from json import dumps
from time import sleep
def buffering():
os.system("touch buffer-list-files.json")
buffer_list_files = open("buffer-list-files.json").readlines()
print(buffer_list_files)
buffer_list_files2 = open("buffer-list-files.json", "a")
for root, dirs, files in os.walk("./Files", topdown=False):
for name in files:
json_lista = '{"file_path":"'+os.path.join(root,name)+'", "submited":" "}\n'
if json_lista in buffer_list_files:
print("O arquivo <"+name+"> já está bo buffer!")
else:
print("O arquivo <"+name+"> não está no buffer....\nPreparando para inserir o arquivo <"+name+"> no buffer...")
#print(os.path.join(root,name))
buffer_list_files2.write('{"file_path":"'+os.path.join(root,name)+'", "submited":" "}\n')
print("Arquivo <"+name+"> inserido no buffer.")
buffer_list_files2.close()
def connection():
x = "docker start zookeeper kafka"
process = subprocess.Popen(x, stdout=subprocess.PIPE, shell=True)
process.communicate()
def sendToTopic():
# os.system('docker stop zookeeper kafka')
# os.system('docker rm zookeeper kafka')
# os.system('docker run -d --name zookeeper jplock/zookeeper:3.4.6')
# os.system('docker run -d --name kafka --link zookeeper:zookeeper ches/kafka')
# os.system('export KAFKA_IP=$(docker inspect --format "{{ .NetworkSettings.IPAddress }}" kafka)')
# os.system('echo $KAFKA_IP')
x = "docker start zookeeper kafka"
process = subprocess.Popen(x, stdout=subprocess.PIPE, shell=True)
process.communicate()
producer = KafkaProducer(bootstrap_servers=['172.17.0.3:9092'], api_version=(0,10,1),
value_serializer=lambda x:
dumps(x).encode('utf-8'))
for e in range(10):
data = {'id': e,'x1': '1', 'y1': '1','x2': '2', 'y2': '2','page': '3', 'type': '3', 'path': '/out'}
producer.send('test', value=data)
print("Producer to topic: "+str(e))
sleep(1)
#os.system('docker stop zookeeper kafka')
def getKafkaMessages(topicName):
#os.system('docker run --rm ches/kafka kafka-console-consumer.sh --topic testTopic --from-beginning --zookeeper 172.17.0.2:2181')
# x = "docker start zookeeper kafka"
# process = subprocess.Popen('export ZK_IP=$(docker inspect --format \'{{ .NetworkSettings.IPAddress }}\' zookeeper) && echo $ZK_IP', stdout=subprocess.PIPE, shell=True)
# zookeeper_ip = process.communicate()[0]
# zookeeper_ip = (str(zookeeper_ip, 'UTF-8')).strip('\n')
# print(zookeeper_ip)
os.system('docker run --rm ches/kafka kafka-console-consumer.sh --topic image-detection-topic --from-beginning --zookeeper 192.168.1.112:2181')
# process.communicate()
#buffering()
def getKafkaMessagesV2(topic, kafka_ip):
## Collect Messages from Bus
consumer = KafkaConsumer(topic, auto_offset_reset='earliest',
bootstrap_servers=[kafka_ip],
api_version=(0, 10, 1))
consumer.subscribe([topic])
print('after consumer')
print(consumer)
for msg in consumer:
print('inside for')
print(msg[6])
#sendToTopic()
#getKafkaMessages('image-detection-topic')
getKafkaMessagesV2('image-detection-topic', '10.100.14.107:9092')
#getKafkaMessagesV2('test', '172.17.0.3:9092')
#bin/kafka-console-consumer --zookeeper localhost:2181 --topic kafkatest --from-beginning
#bin/kafka-console-consumer --zookeeper localhost:2181 /kafka --topic kafkatest --from-beginning
#kafka/bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic my-topic --from-beginning
|
[
"gusttavodiias@gmail.com"
] |
gusttavodiias@gmail.com
|
05fc046d63ad0da119f177a76e959f80d9d8f37b
|
d184d1fc998a300feee2d716d97209b9fbc78468
|
/probability.py
|
dbeb07713ae4103f2e739fabfa5eb51dd35d80c9
|
[] |
no_license
|
MickeyKen/plot_node_master_thesis
|
df196d7a037b1960c1ee95268a1ae3b1e8f24148
|
5182ea79cb8cfbc6bead60d97eda9307f7e53c10
|
refs/heads/master
| 2023-02-16T21:17:49.284973
| 2021-01-19T09:19:40
| 2021-01-19T09:19:40
| 330,574,321
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,316
|
py
|
#!/usr/bin/python
import matplotlib.pyplot as plt
path = 'data/param_UD-v95_output.txt'
isServiceCount = True
ACTOR_NUM = 3
AVERAGE_NUM = 100
LIMIT = 5000
if __name__ == '__main__':
collision = [[] for j in range(ACTOR_NUM)]
average_collision = []
success = [[] for j in range(ACTOR_NUM)]
average_success = []
no_action = [[] for j in range(ACTOR_NUM)]
average_no_action = []
eps = []
average_eps = []
epsilons = [[] for j in range(ACTOR_NUM)]
flag = 0
count = 0
fig = plt.figure(figsize=(8.27,3.9), dpi=100)
plt.ion()
plt.xlabel('Episode')
# plt.ylabel('P')
plt.grid()
cycle = plt.rcParams['axes.prop_cycle'].by_key()['color']
with open(path) as f:
for s_line in f:
eps_num = int(s_line.split(',')[0])
actor_num = int(s_line.split(',')[1])
step = int(s_line.split(',')[3])
reward = float(s_line.split(',')[5])
if step < 150 and reward < -200:
collision[actor_num].append(1.0)
success[actor_num].append(0.0)
no_action[actor_num].append(0.0)
elif step < 150 and reward > 0:
collision[actor_num].append(0.0)
success[actor_num].append(1.0)
no_action[actor_num].append(0.0)
else:
collision[actor_num].append(0.0)
success[actor_num].append(0.0)
no_action[actor_num].append(1.0)
collision_sum = 0.0
success_sum = 0.0
no_action_sum = 0.0
average_collision_sum = 0.0
average_success_sum = 0.0
average_no_action_sum = 0.0
count = 1
for index in range(min(len(v) for v in collision)):
collision_sum = 0.0
success_sum = 0.0
no_action_sum = 0.0
if index <= LIMIT:
for n in range(ACTOR_NUM):
collision_sum += collision[n][index]
success_sum += success[n][index]
no_action_sum += no_action[n][index]
average_collision_sum += collision_sum / float(ACTOR_NUM)
average_success_sum += success_sum / float(ACTOR_NUM)
average_no_action_sum += no_action_sum / float(ACTOR_NUM)
if index % AVERAGE_NUM == 0 and index > 0:
average_eps.append(count*AVERAGE_NUM)
average_collision.append(average_collision_sum / float(AVERAGE_NUM))
average_success.append(average_success_sum / float(AVERAGE_NUM))
average_no_action.append(average_no_action_sum / float(AVERAGE_NUM))
average_collision_sum = 0.0
average_success_sum = 0.0
average_no_action_sum = 0.0
count += 1
eps.append(index + 1)
plt.plot(average_eps, average_success, color='#e41a1c', label="success")
plt.plot(average_eps, average_collision, color='#00529a', label="collision")
plt.plot(average_eps, average_no_action, color='#3FBF00', label="past 150 steps")
plt.legend( loc='upper left', borderaxespad=1)
plt.draw()
fig.savefig("result_multi_probability.png")
plt.pause(0)
|
[
"mickey333ff@gmail.com"
] |
mickey333ff@gmail.com
|
d2d9b448996e36b3842f5c9574cb580f9715777c
|
34b6995f57f5f599e93df6f3e3841bbb9616b85f
|
/gym_electric_motor/envs/gym_dcm/dcm_base_env.py
|
4d5cae80381ab8a7d50999601947b03aaae29c65
|
[
"MIT"
] |
permissive
|
zizai/gym-electric-motor
|
ec86584315b4abf91f5d3ee7978272e00085728e
|
48a0232edf3474e441453126df0f52dc391aed11
|
refs/heads/master
| 2020-11-24T20:39:37.934074
| 2019-12-20T10:01:53
| 2019-12-20T10:01:53
| 228,333,802
| 0
| 0
|
MIT
| 2019-12-16T08:02:48
| 2019-12-16T08:02:47
| null |
UTF-8
|
Python
| false
| false
| 33,923
|
py
|
import gym
from scipy.integrate import ode
import numpy as np
import json
from .models import dcmotor_model, converter_models, load_models
from ..dashboard import MotorDashboard
from ..utils import EulerSolver
class _DCMBaseEnv(gym.Env):
"""
**Description:**
An abstract environment for common functions of the DC motors
**Observation:**
Specified by the concrete motor. It is always a concatenation of the state variables, voltages, torque
and next reference values.
**Actions:**
Depending on the converter type the action space may be discrete or continuous
Type: Discrete(2 / 3 / 4)
Num Action: Depend on the converter
1Q Converter: (only positive voltage and positive current)
- 0: transistor block
- 1: positive DC-link voltage applied
2Q Converter: (only positive voltage and both current directions)
- 0: both transistors blocking
- 1: positive DC-link voltage applied
- 2: 0V applied
4Q Converter (both voltage and current directions)
- 0: short circuit with upper transistors, 0V applied
- 1: positive DC-link voltage
- 2: negative DC-link voltage
- 3: short circuit with lower transistors, 0V applied
Type: Box()
Defines the duty cycle for the transistors.\n
[0, 1]: 1Q and 2Q\n
[-1, 1]: 4Q
For an externally excited motor it is a two dimensional box from [-1, 1] or [0, 1]
**Reward:**
The reward is the cumulative squared error (se) or the cumulative absolute error (ae) between the
current value and the current reference of the state variables. Both are also available in a shifted
form with an added on such that the reward is positive. More details are given below.
The variables are normalised by their maximal values and weighted by the reward_weights.
**Starting State:**
All observations are assigned a random value.
**Episode Termination**:
An episode terminates, when all the steps in the reference have been simulated
or a limit has been violated.
**Attributes:**
+----------------------------+----------------------------------------------------------+
| **Name** | **Description** |
+============================+==========================================================+
| **state_vars** | Names of all the quantities that can be observed |
+----------------------------+----------------------------------------------------------+
| **state_var_positions** | Inverse dict of the state vars. Mapping of key to index. |
+----------------------------+----------------------------------------------------------+
| **limits** | Maximum allowed values of the state variables |
+----------------------------+----------------------------------------------------------+
| **reward_weights** | Ratio of the weight of the state variable for the reward |
+----------------------------+----------------------------------------------------------+
| **on_dashboard** | Flag indicating if the state var is shown on dashboard |
+----------------------------+----------------------------------------------------------+
| **noise_levels** | Percentage of the noise power to the signal power |
+----------------------------+----------------------------------------------------------+
| **zero_refs** | State variables that get a fixed zero reference |
+----------------------------+----------------------------------------------------------+
"""
OMEGA_IDX = 0
MOTOR_IDX = None
# region Properties
@property
def tau(self):
"""
Returns:
the step size of the environment Default: 1e-5 for discrete / 1e-4 for continuous action space
"""
return self._tau
@property
def episode_length(self):
"""
Returns:
The length of the current episode
"""
return self._episode_length
@episode_length.setter
def episode_length(self, episode_length):
"""
Set the length of the episode in the environment. Must be larger than the prediction horizon.
"""
self._episode_length = max(self._prediction_horizon + 1, episode_length)
@property
def k(self):
"""
Returns:
The current step in the running episode
"""
return self._k
@property
def limit_observer(self):
return self._limit_observer
@property
def safety_margin(self):
return self._safety_margin
@property
def prediction_horizon(self):
return self._prediction_horizon
@property
def motor_parameter(self):
"""
Returns:
motor parameter with calculated limits
"""
params = self.motor_model.motor_parameter
params['safety_margin'] = self.safety_margin
params['episode_length'] = self._episode_length
params['prediction_horizon'] = self._prediction_horizon
params['tau'] = self._tau
params['limits'] = self._limits.tolist()
return params
@property
def _reward(self):
return self._reward_function
# endregion
def __init__(self, motor_type, state_vars, zero_refs, converter_type, tau, episode_length=10000, load_parameter=None,
motor_parameter=None, reward_weight=(('omega', 1.0),), on_dashboard=('omega',), integrator='euler',
nsteps=1, prediction_horizon=0, interlocking_time=0.0, noise_levels=0.0, reward_fct='swsae',
limit_observer='off', safety_margin=1.3, gamma=0.9, dead_time=True):
"""
Basic setting of all the common motor parameters.
Args:
motor_type: Can be 'dc-series', 'dc-shunt', 'dc-extex' or 'dc-permex'. Set by the child classes.
state_vars: State variables of the DC motor. Set by the child classes.
zero_refs: State variables that get zero references. (E.g. to punish high control power)
motor_parameter: A dict of motor parameters that differ from the default ones. \n
For details look into the dc_motor model.
load_parameter: A dict of load parameters that differ from the default ones. \n
For details look into the load model.
converter_type: The specific converter type.'{disc/cont}-{1Q/2Q/4Q}'. For details look into converter
tau: The step size or sampling time of the environment.
episode_length: The episode length of the environment
reward_weight: Iterable of key/value pairs that specifies how the rewards in the environment
are weighted.
E.g. ::
(('omega', 0.9),('u', 0.1))
on_dashboard: Iterable that specifies the variables on the dashboard.
E.g.::
['omega','u']
integrator: Select which integrator to choose from 'euler', 'dopri5'
nsteps: Maximum allowed number of steps for the integrator.
prediction_horizon: The length of future reference points that are shown to the agents
interlocking_time: interlocking time of the converter
noise_levels: Noise levels of the state variables in percentage of the signal power.
reward_fct: Select the reward function between: (Each one normalised to [0,1] or [-1,0]) \n
'swae': Absolute Error between references and state variables [-1,0] \n
'swse': Squared Error between references and state variables [-1,0]\n
'swsae': Shifted absolute error / 1 + swae [0,1] \n
'swsse': Shifted squared error / 1 + swse [0,1] \n
limit_observer: Select the limit observing function. \n
'off': No limits are observed. Episode goes on. \n
'no_punish': Limits are observed, no punishment term for violation. This function should be used with
shifted reward functions. \n
'const_punish': Limits are observed. Punishment in the form of -1 / (1-gamma) to punish the agent with
the maximum negative reward for the further steps. This function should be used with non shifted reward
functions.
safety_margin: Ratio between maximal and nominal power of the motor parameters.
gamma: Parameter for the punishment of a limit violation. Should equal agents gamma parameter.
"""
self._gamma = gamma
self._safety_margin = safety_margin
self._reward_function, self.reward_range = self._reward_functions(reward_fct)
self._limit_observer = self._limit_observers(limit_observer)
self._tau = tau
self._episode_length = episode_length
self.state_vars = np.array(state_vars)
#: dict(int): Inverse state vars. Dictionary to map state names to positions in the state arrays
self._state_var_positions = {}
for ind, val in enumerate(state_vars):
self._state_var_positions[val] = ind
self._prediction_horizon = max(0, prediction_horizon)
self._zero_refs = zero_refs
#: array(bool): True, if the state variable on the index is a zero_reference. For fast access
self._zero_ref_flags = np.isin(self.state_vars, self._zero_refs)
self.load_model = load_models.Load(load_parameter)
self.motor_model = dcmotor_model.make(motor_type, self.load_model.load, motor_parameter)
self.converter_model = converter_models.Converter.make(converter_type, self._tau, interlocking_time, dead_time)
self._k = 0
self._dashboard = None
self._state = np.zeros(len(state_vars))
self._reference = np.zeros((len(self.state_vars), episode_length + prediction_horizon))
self._reward_weights = np.zeros(len(self._state))
self.reference_vars = np.zeros_like(self.state_vars, dtype=bool)
self._on_dashboard = np.ones_like(self.state_vars, dtype=bool)
if on_dashboard[0] == 'True':
self._on_dashboard *= True
elif on_dashboard[0] == 'False':
self._on_dashboard *= False
else:
self._on_dashboard *= False
for key in on_dashboard:
self._on_dashboard[self._state_var_positions[key]] = True
for key, val in reward_weight:
self._reward_weights[self._state_var_positions[key]] = val
for i in range(len(state_vars)):
if self._reward_weights[i] > 0 and self.state_vars[i] not in self._zero_refs:
self.reference_vars[i] = True
integrators = ['euler', 'dopri5']
assert integrator in integrators, f'Integrator was {integrator}, but has to be in {integrators}'
if integrator == 'euler':
self.system = EulerSolver(self._system_eq, nsteps)
else:
self.system = ode(self._system_eq, self._system_jac).set_integrator(integrator, nsteps=nsteps)
self.integrate = self.system.integrate
self.action_space = self.converter_model.action_space
self._limits = np.zeros(len(self.state_vars))
self._set_limits()
self._set_observation_space()
self._noise_levels = np.zeros(len(state_vars))
if type(noise_levels) is tuple:
for state_var, noise_level in noise_levels:
self._noise_levels[self._state_var_positions[state_var]] = noise_level
else:
self._noise_levels = np.ones(len(self.state_vars)) * noise_levels
self._noise = None
self._resetDashboard = True
def seed(self, seed=None):
"""
Seed the random generators in the environment
Args:
seed: The value to seed the random number generator with
"""
np.random.seed(seed)
def _set_observation_space(self):
"""
Child classes need to write their concrete observation space into self.observation_space here
"""
raise NotImplementedError
def _set_limits(self):
"""
Child classes need to write their concrete limits of the state variables into self._limits here
"""
raise NotImplementedError
def _step_integrate(self, action):
"""
The integration is done for one time period. The converter considers the dead time and interlocking time.
Args:
action: switching state of the converter that should be applied
"""
raise NotImplementedError
def step(self, action):
"""
Clips the action to its limits and performs one step of the environment.
Args:
action: The action from the action space that will be performed on the motor
Returns:
Tuple(array(float), float, bool, dict):
**observation:** The observation from the environment \n
**reward:** The reward for the taken action \n
**bool:** Flag if the episode has ended \n
**info:** An always empty dictionary \n
"""
last_state = np.array(self._state, copy=True)
self._step_integrate(action)
rew = self._reward(self._state/self._limits, self._reference[:, self._k].T)
done, punish = self.limit_observer(self._state)
observation_references = self._reference[self.reference_vars, self._k:self._k + self._prediction_horizon + 1]
# normalize the observation
observation = np.concatenate((
self._state/self._limits + self._noise[:, self._k], observation_references.flatten()
))
self._k += 1
if done == 0: # Check if period is finished
done = self._k == self._episode_length
else:
rew = punish
return observation, rew, done, {}
def _set_initial_value(self):
"""
call self.system.set_initial_value(initial_state, 0.0) to reset the state to initial.
"""
self.system.set_initial_value(self._state[self.MOTOR_IDX], 0.0)
def reset(self):
"""
Resets the environment.
All state variables will be set to a random value in [-nominal value, nominal value].
New references will be generated.
Returns:
The initial observation for the episode
"""
self._k = 0
# Set new state
self._set_initial_state()
# New References
self._generate_references()
# Reset Integrator
self._set_initial_value()
# Reset Dashboard Flag
self._resetDashboard = True
# Generate new gaussian noise for the state variables
self._noise = (
np.sqrt(self._noise_levels/6) / self._safety_margin
* np.random.randn(self._episode_length+1, len(self.state_vars))
).T
# Calculate initial observation
observation_references = self._reference[self.reference_vars, self._k:self._k + self._prediction_horizon+1]
observation = np.concatenate((self._state/self._limits, observation_references.flatten()))
return observation
def render(self, mode='human'):
"""
Call this function once a cycle to update the visualization with the current values.
"""
if not self._on_dashboard.any():
return
if self._dashboard is None:
# First Call: No dashboard was initialised before
self._dashboard = MotorDashboard(self.state_vars[self._on_dashboard], self._tau,
self.observation_space.low[:len(self.state_vars)][self._on_dashboard]
* self._limits[self._on_dashboard],
self.observation_space.high[:len(self.state_vars)][self._on_dashboard]
* self._limits[self._on_dashboard],
self._episode_length,
self._safety_margin,
self._reward_weights[self._on_dashboard] > 0)
if self._resetDashboard:
self._resetDashboard = False
self._dashboard.reset((self._reference[self._on_dashboard].T * self._limits[self._on_dashboard]).T)
self._dashboard.step(self._state[self._on_dashboard], self._k) # Update the plot in the dashboard
def close(self):
"""
When the environment is closed the dashboard will also be closed.
This function does not need to be called explicitly.
"""
if self._dashboard is not None:
self._dashboard.close()
def _system_eq(self, t, state, u_in, noise):
"""
The differential equation of the whole system consisting of the converter, load and motor.
This function is called by the integrator.
Args:
t: Current time of the system
state: The current state as a numpy array.
u_in: Applied input voltage
Returns:
The solution of the system. The first derivatives of all the state variables of the system.
"""
t_load = self.load_model.load(state[self.OMEGA_IDX])
return self.motor_model.model(state, t_load, u_in + noise)
def _system_jac(self, t, state):
"""
The Jacobian matrix of the systems equation.
Args:
t: Current time of the system.
state: Current state
Returns:
The solution of the Jacobian matrix for the current state
"""
load_jac = self.load_model.jac(state)
return self.motor_model.jac(state, load_jac)
# region Reference Generation
def _reference_sin(self, bandwidth=20):
"""
Set sinus references for the state variables with a random amplitude, offset and phase shift
Args:
bandwidth: bandwidth of the system
"""
x = np.arange(0, (self._episode_length + self._prediction_horizon))
if self.observation_space.low[0] == 0.0:
amplitude = np.random.rand() / 2
offset = np.random.rand() * (1 - 2*amplitude) + amplitude
else:
amplitude = np.random.rand()
offset = (2 * np.random.rand() - 1) * (1 - amplitude)
t_min, t_max = self._set_time_interval_reference('sin', bandwidth) # specify range for period time
t_s = np.random.rand() * (t_max - t_min) + t_min
phase_shift = 2 * np.pi * np.random.rand()
self._reference = amplitude * np.sin(2 * np.pi / t_s * x * self.tau + phase_shift) + offset
self._reference = self._reference*np.ones((len(self.state_vars), 1))/self._safety_margin
def _reference_rect(self, bandwidth=20):
"""
Set rect references for the state variables with a random amplitude, offset and phase shift
Args:
bandwidth: bandwidth of the system
"""
x = np.arange(0, (self._episode_length + self._prediction_horizon))
if self.observation_space.low[self.OMEGA_IDX] == 0.0:
amplitude = np.random.rand()
offset = np.random.rand() * (1 - amplitude)
else:
amplitude = 2 * np.random.rand() - 1
offset = (-1 + np.random.rand() * (2 - np.abs(amplitude))) * np.sign(amplitude)
t_min, t_max = self._set_time_interval_reference('rect', bandwidth)
# specify range for period time
t_s = np.random.rand() * (t_max - t_min) + t_min
# time period on amplitude + offset value
t_on = np.random.rand() * t_s
# time period on offset value
t_off = t_s - t_on
reference = np.zeros(self._episode_length + self._prediction_horizon)
reference[x * self.tau % (t_on + t_off) > t_off] = amplitude
reference += offset
self._reference = reference * np.ones((len(self.state_vars), 1)) / self._safety_margin
def _reference_tri(self, bandwidth=20):
"""
Set triangular reference with random amplitude, offset, times for rise and fall for all state variables
Args:
bandwidth: bandwidth of the system
"""
t_min, t_max = self._set_time_interval_reference('tri', bandwidth) # specify range for period time
t_s = np.random.rand() * (t_max-t_min) + t_min
t_rise = np.random.rand() * t_s
t_fall = t_s - t_rise
if self.observation_space.low[self.OMEGA_IDX] == 0.0:
amplitude = np.random.rand()
offset = np.random.rand() * (1 - amplitude)
else:
amplitude = 2 * np.random.rand() - 1
offset = (-1 + np.random.rand() * (2 - np.abs(amplitude))) * np.sign(amplitude)
reference = np.ones(self._episode_length + self._prediction_horizon)
for t in range(0, (self._episode_length + self._prediction_horizon)):
# use a triangular function
if (t*self.tau) % t_s <= t_rise:
reference[t] = ((t * self.tau) % t_s) / t_rise * amplitude + offset
else:
reference[t] = -((t * self.tau) % t_s - t_s) / t_fall * amplitude + offset
self._reference = reference*np.ones((len(self.state_vars), 1))/self._safety_margin
def _reference_sawtooth(self, bandwidth=20):
"""
Sawtooth signal generator with random time period and amplitude
Args:
bandwidth: bandwidth of the system
"""
t_min, t_max = self._set_time_interval_reference('sawtooth', bandwidth) # specify range for period time
t_s = np.random.rand() * (t_max - t_min) + t_min
if self.observation_space.low[self.OMEGA_IDX] == 0.0:
amplitude = np.random.rand()
else:
amplitude = 2 * np.random.rand() - 1
x = np.arange(self.episode_length + self._prediction_horizon, dtype=float)
self._reference = np.ones_like(x, dtype=float)
self._reference *= (x * self.tau) % t_s * amplitude / t_s
self._reference = self._reference * np.ones((len(self.state_vars), 1)) / self._safety_margin
def _generate_references(self, bandwidth=20):
"""
Select which reference to generate. The shaped references (rect, sin, triangular, sawtooth) are equally probable
with 12,5% and a random reference is generated with a probability of 50%
Args:
bandwidth: bandwidth of the system
"""
val = np.random.rand()
if val < 0.125:
self._reference_rect(bandwidth)
elif val < 0.25:
self._reference_sin(bandwidth)
elif val < 0.375:
self._reference_tri(bandwidth)
elif val < 0.5:
self._reference_sawtooth(bandwidth)
else:
self._generate_random_references()
# Set the supply voltage.
# In this step an additive noise to the supply voltage can be implemented in the future.
u_sup = np.ones(self.episode_length + self._prediction_horizon) * self.motor_model.u_sup \
/ self._limits[self._state_var_positions['u_sup']]
self._reference[self._state_var_positions['u_sup']] = u_sup
# Reset all zero references to zero.
self._reference[self._zero_ref_flags] = np.zeros((len(self._zero_refs),
self.episode_length + self._prediction_horizon))
def _generate_random_references(self):
"""
Each subclass needs to define its own random reference generation here.
"""
raise NotImplementedError()
def _generate_random_control_sequence(self, bw, maximum):
"""
Function that is called by the random reference generation in the motors to generate a random control sequence.
A random control sequence is applied onto the system and generates the reference trajectories.
Args:
bw: Bandwidth for the control sequence
maximum: Maximum value for the control sequence
Returns:
A random control sequence that is following the bandwidth and power constraints at most.
"""
ref_len = self.episode_length + self._prediction_horizon
rands = np.random.randn(2, ref_len // 2)
u = rands[0] + 1j * rands[1]
bw_noise = np.random.rand() * 0.5
bw *= bw_noise
delta_w = 2 * np.pi / ref_len / self._tau
u[int(bw / delta_w) + 1:] = 0.0
sigma = np.linspace(1, 0, int(bw / delta_w) + 1)
if len(sigma) < len(u):
u[:len(sigma)] *= sigma
else:
u *= sigma[:len(u)]
fourier = np.concatenate((np.random.randn(1), u, np.flip(np.conjugate(u))))
u = np.fft.ifft(fourier).real
power_noise = np.random.rand() + 0.5
u = u * maximum / np.sqrt((u ** 2).sum() / ref_len) * power_noise
leakage = np.random.rand(1) * 0.1
voltage_offset = maximum * ((self.converter_model.voltages[1] - self.converter_model.voltages[0])
* np.random.rand() + self.converter_model.voltages[0])
u += voltage_offset
u = np.clip(u, (self.converter_model.voltages[0] - leakage) * maximum,
(self.converter_model.voltages[1] + leakage) * maximum)
return u[:ref_len]
def _set_time_interval_reference(self, shape=None, bandwidth=20):
"""
This function returns the minimum and maximum time period specified by the bandwidth of the motor,
episode length and individual modifications for each shape
At least on time period of a shape should fit in an episode, but not to fast that the motor can not follow the
reference properly.
Args:
shape: shape of the reference
Returns:
Minimal and maximal time period
"""
bw = self._maximal_bandwidth(bandwidth) # Bandwidth of reference limited
t_episode = (self.episode_length+self._prediction_horizon)*self.tau
t_min = min(1 / bw, t_episode)
t_max = max(1 / bw, t_episode)
# In this part individual modifications can be made for each shape
# Modify the values to get useful references. Some testing necessary to find practical values.
if shape == 'sin':
t_min = t_min
t_max = t_max / 3
elif shape == 'rect':
t_min = t_min
t_max = t_max / 3
elif shape == 'tri':
t_min = t_min
t_max = t_max / 5
elif shape == 'sawtooth':
t_min = t_min
t_max = t_max / 5
else:
t_min = t_min
t_max = t_max/5
return min(t_min, t_max), max(t_min, t_max) # make sure that the order is correct
def _maximal_bandwidth(self, bandwidth=20):
"""
Computes the maximal allowed bandwidth, considering a user defined limit and the technical limit.
Args:
bandwidth: Maximal user defined value for the bandwidth
Returns:
Maximal bandwidth for the reference
"""
return min(self.motor_model.bandwidth(), bandwidth)
# endregion
def _set_initial_state(self):
"""
Defined in each motor itself. Sets the initial environment state.
"""
raise NotImplementedError
# region Reward Functions
def _reward_functions(self, key):
"""
Selector for the concrete reward function selected by the key string
Returns:
The selected reward function.
"""
return {
# (Reward Function, Reward Range)
'swae': (self._absolute_error, (-1, 0)),
'swse': (self._squared_error, (-1, 0)),
'swsae': (self._shifted_absolute_error, (0, 1)),
'swsse': (self._shifted_squared_error, (0, 1)),
}[key]
def _absolute_error(self, state, reference):
"""
The weighted, absolute error between the reference and state variables normalised to [-1,0]
Args:
state: the current state of the environment
reference: the current reference values of the observation variables
Returns:
The reward value
"""
return -(self._reward_weights * np.abs(state - reference)
/ (self.observation_space.high[:len(self.state_vars)]
- self.observation_space.low[:len(self.state_vars)])
).sum()
def _squared_error(self, state, reference):
"""
The weighted, squared absolute error between the reference and state variables normalised to [-1,0]
Args:
state: the current state of the environment
reference: the current reference values of the observation variables
Returns:
The reward value
"""
return -(self._reward_weights *
((state - reference)
/ (self.observation_space.high[:len(self.state_vars)]
- self.observation_space.low[:len(self.state_vars)])
)**2
).sum()
def _shifted_squared_error(self, state, reference):
"""
The weighted, squared error between the reference and state variables normalised to [0,1]
Args:
state: the current state of the environment
reference: the current reference values of the observation variables
Returns:
The reward value
"""
return 1 + self._squared_error(state, reference)
def _shifted_absolute_error(self, state, reference):
"""
The weighted, absolute error between the reference and state variables normalised to [0,1]
Args:
state: the current state of the environment
reference: the current reference values of the observation variables
Returns:
The reward value
"""
return 1 + self._absolute_error(state, reference)
# endregion
# region Limit Observers
def _limit_observers(self, key):
"""
Selector for the concrete limit observer by the key string.
Returns:
The selected limit observer function.
"""
return {
'off': self._no_observation,
'no_punish': self._no_punish,
'const_punish': self._const_punish,
}[key]
def _no_punish(self, state):
"""
No reward punishment, only break the episode when limits are violated. Recommended for positive rewards.
Args:
state: Current state of the environment
Returns:
Tuple of a flag if the episode should be terminated and the punishment for the reward
"""
if self._limits_violated(state):
return False, 0.0
else:
return True, 0.0
def _const_punish(self, state):
"""
Punishment, if constraints are violated and termination of the episode.
The punishment equals -1 / (1 - self.gamma), which is equivalent to the by gamma discounted reward a learner
would receive, if it receives always the minimum reward after the limit violation.
This punishment is recommended, when taking a negative reward function.
Args:
state: Current state of the environment
Returns:
Tuple of a flag if the episode should be terminated and the punishment for the reward
"""
if self._limits_violated(state):
return False, 0.0
else:
# Terminate the episode if constraints are violated
return True, -1 * 1 / (1 - self._gamma)
def _limits_violated(self, state):
"""
Check, if any limit is violated.
Args:
state: Current state of the environment
Returns:
True, if any limit is violated, false otherwise.
"""
return (np.abs(state) <= self.observation_space.high[:len(self.state_vars)] * self._limits).all()
def _no_observation(self, *_):
"""
No limit violations are observed. No punishment and the episode continues even after limit violations.
Args:
state: Current state of the motor
Returns:
Tuple of a flag if the episode should be terminated (here always false)
and the punishment for the reward (here always 0)
"""
return False, 0.0
# endregion
def get_motor_param(self):
"""
Returns:
This function returns all motor parameters, sampling time, safety margin and converter limits
"""
params = self.motor_parameter
params['converter_voltage'] = self.converter_model.voltages
params['converter_current'] = self.converter_model.currents
return params
|
[
"wilhelmk@campus.uni-paderborn.de"
] |
wilhelmk@campus.uni-paderborn.de
|
db5af6c9ccb8290c2b3765b621537f4f20a2bf9b
|
fc85f6e336d4d5624af45d58e4d2b6a7b6edafaf
|
/image_diet/tests/test_commands.py
|
2948470b1b1960159a208d6f7081a138281c1f53
|
[
"MIT"
] |
permissive
|
ArabellaTech/django-image-diet
|
2d0cf77369035c2ebfe6685e2f4ffe347507e092
|
dcf904d89f65a5123509a0718ef3758ea5674579
|
refs/heads/master
| 2020-04-05T22:54:17.661895
| 2016-07-25T07:35:21
| 2016-07-25T07:35:21
| 31,337,651
| 1
| 1
| null | 2016-07-25T07:12:47
| 2015-02-25T21:52:16
|
Python
|
UTF-8
|
Python
| false
| false
| 1,018
|
py
|
import os
from os.path import join, dirname
from shutil import copyfile
from django.test import TestCase
from image_diet.management.commands import diet_images
TEST_DIR = join(dirname(__file__), 'test_files')
class DietCommandTest(TestCase):
def setUp(self):
image_path = join(TEST_DIR, 'stockholm.jpg')
self.nested_dir = join('dir1', 'dir2', 'dir3')
self.test_root_dir = join(TEST_DIR, 'dir1')
os.makedirs(join(TEST_DIR, self.nested_dir))
self.test_image_path = join(TEST_DIR, self.nested_dir, 'stockholm.jpg')
copyfile(image_path, self.test_image_path)
def tearDown(self):
os.remove(self.test_image_path)
os.chdir(TEST_DIR)
os.removedirs(self.nested_dir)
def test_diet_images(self):
old_size = os.stat(self.test_image_path).st_size
action = diet_images.Command()
action.handle(self.test_root_dir)
new_size = os.stat(self.test_image_path).st_size
self.assertTrue(new_size < old_size)
|
[
"markos@gaivo.net"
] |
markos@gaivo.net
|
466524b45d53aaa85ccff0695a52ed8c641b06bf
|
8a0297dbf9b90f001077ba487f6d7c9263e1242b
|
/setup.py
|
0205937a6520d9311cb7270c3353578328a97bbe
|
[
"MIT",
"LicenseRef-scancode-public-domain"
] |
permissive
|
shea256/pybitcointools
|
b2173fe2735229380384460315f00185e3310d3c
|
f7223208e5ce260f27a447fcef22a98957e938c2
|
refs/heads/master
| 2021-01-17T11:52:18.686497
| 2013-12-10T02:13:02
| 2013-12-10T02:13:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 347
|
py
|
#!/usr/bin/env python
from distutils.core import setup
setup(name='pybitcointools',
version='1.0',
description='Python Bitcoin Tools',
author='Vitalik Buterin',
author_email='vbuterin@gmail.com',
url='http://github.com/vbuterin/pybitcointools',
packages=['pybitcointools'],
scripts=['pybtctool']
)
|
[
"vub@gmail.com"
] |
vub@gmail.com
|
7df7704850cb5541240900662aa18de7e49573fc
|
ec9f242c13c271910cf9db0fb8202ab6f2fcdf9c
|
/Chapter_3/Chapter_3_1_1_1.py
|
78c42c8309e6f85f8ca0065f4b9ca077ec018a47
|
[
"Apache-2.0"
] |
permissive
|
flytian/python_machinelearning
|
0f32807c73e92b98b008cea1e6d8fb92702cb4fb
|
004707c3e66429f102272a7da97e532255cca293
|
refs/heads/master
| 2021-08-30T12:52:20.259662
| 2017-12-16T07:16:29
| 2017-12-16T07:16:29
| 114,345,987
| 0
| 0
|
Apache-2.0
| 2017-12-16T07:16:30
| 2017-12-15T08:23:23
|
Python
|
UTF-8
|
Python
| false
| false
| 5,622
|
py
|
# coding:utf-8
# 定义一组字典列表,用来表示多个数据样本(每个字典代表一个数据样本)。
measurements = [{'city': 'Dubai', 'temperature': 33.}, {'city': 'London', 'temperature': 12.},
{'city': 'San Fransisco', 'temperature': 18.}]
# 从sklearn.feature_extraction 导入 DictVectorizer
from sklearn.feature_extraction import DictVectorizer
# 初始化DictVectorizer特征抽取器
vec = DictVectorizer()
# 输出转化之后的特征矩阵。
print vec.fit_transform(measurements).toarray()
# 输出各个维度的特征含义。
print vec.get_feature_names() # ['city=Dubai', 'city=London', 'city=San Fransisco', 'temperature']
# 从sklearn.datasets里导入20类新闻文本数据抓取器。
from sklearn.datasets import fetch_20newsgroups
# 从互联网上即时下载新闻样本,subset='all'参数代表下载全部近2万条文本存储在变量news中。
news = fetch_20newsgroups(subset='all')
# 从sklearn.cross_validation导入train_test_split模块用于分割数据集。
from sklearn.cross_validation import train_test_split
# 对news中的数据data进行分割,25%的文本用作测试集;75%作为训练集。
X_train, X_test, y_train, y_test = train_test_split(news.data, news.target, test_size=0.25, random_state=33)
# 从sklearn.feature_extraction.text里导入CountVectorizer
from sklearn.feature_extraction.text import CountVectorizer
# 采用默认的配置对CountVectorizer进行初始化(默认配置不去除英文停用词),并且赋值给变量count_vec。
count_vec = CountVectorizer()
# 只使用词频统计的方式将原始训练和测试文本转化为特征向量。
X_count_train = count_vec.fit_transform(X_train)
X_count_test = count_vec.transform(X_test)
# 从sklearn.naive_bayes里导入朴素贝叶斯分类器。
from sklearn.naive_bayes import MultinomialNB
# 使用默认的配置对分类器进行初始化。
mnb_count = MultinomialNB()
# 使用朴素贝叶斯分类器,对CountVectorizer(不去除停用词)后的训练样本进行参数学习。
mnb_count.fit(X_count_train, y_train)
# 输出模型准确性结果。
print 'The accuracy of classifying 20newsgroups using Naive Bayes (CountVectorizer without filtering stopwords):', mnb_count.score(
X_count_test, y_test)
# 将分类预测的结果存储在变量y_count_predict中。
y_count_predict = mnb_count.predict(X_count_test)
# 从sklearn.metrics 导入 classification_report。
from sklearn.metrics import classification_report
# 输出更加详细的其他评价分类性能的指标。
print classification_report(y_test, y_count_predict, target_names=news.target_names)
# 从sklearn.feature_extraction.text里分别导入TfidfVectorizer。
from sklearn.feature_extraction.text import TfidfVectorizer
# 采用默认的配置对TfidfVectorizer进行初始化(默认配置不去除英文停用词),并且赋值给变量tfidf_vec。
tfidf_vec = TfidfVectorizer()
# 使用tfidf的方式,将原始训练和测试文本转化为特征向量。
X_tfidf_train = tfidf_vec.fit_transform(X_train)
X_tfidf_test = tfidf_vec.transform(X_test)
# 依然使用默认配置的朴素贝叶斯分类器,在相同的训练和测试数据上,对新的特征量化方式进行性能评估。
mnb_tfidf = MultinomialNB()
mnb_tfidf.fit(X_tfidf_train, y_train)
print 'The accuracy of classifying 20newsgroups with Naive Bayes (TfidfVectorizer without filtering stopwords):', mnb_tfidf.score(
X_tfidf_test, y_test)
y_tfidf_predict = mnb_tfidf.predict(X_tfidf_test)
print classification_report(y_test, y_tfidf_predict, target_names=news.target_names)
# 继续沿用代码56与代码57中导入的工具包(在同一份源代码中,或者不关闭解释器环境),分别使用停用词过滤配置初始化CountVectorizer与TfidfVectorizer。
count_filter_vec, tfidf_filter_vec = CountVectorizer(analyzer='word', stop_words='english'), TfidfVectorizer(
analyzer='word', stop_words='english')
# 使用带有停用词过滤的CountVectorizer对训练和测试文本分别进行量化处理。
X_count_filter_train = count_filter_vec.fit_transform(X_train)
X_count_filter_test = count_filter_vec.transform(X_test)
# 使用带有停用词过滤的TfidfVectorizer对训练和测试文本分别进行量化处理。
X_tfidf_filter_train = tfidf_filter_vec.fit_transform(X_train)
X_tfidf_filter_test = tfidf_filter_vec.transform(X_test)
# 初始化默认配置的朴素贝叶斯分类器,并对CountVectorizer后的数据进行预测与准确性评估。
mnb_count_filter = MultinomialNB()
mnb_count_filter.fit(X_count_filter_train, y_train)
print 'The accuracy of classifying 20newsgroups using Naive Bayes (CountVectorizer by filtering stopwords):', mnb_count_filter.score(
X_count_filter_test, y_test)
y_count_filter_predict = mnb_count_filter.predict(X_count_filter_test)
# 初始化另一个默认配置的朴素贝叶斯分类器,并对TfidfVectorizer后的数据进行预测与准确性评估。
mnb_tfidf_filter = MultinomialNB()
mnb_tfidf_filter.fit(X_tfidf_filter_train, y_train)
print 'The accuracy of classifying 20newsgroups with Naive Bayes (TfidfVectorizer by filtering stopwords):', mnb_tfidf_filter.score(
X_tfidf_filter_test, y_test)
y_tfidf_filter_predict = mnb_tfidf_filter.predict(X_tfidf_filter_test)
# 对上述两个模型进行更加详细的性能评估。
from sklearn.metrics import classification_report
print classification_report(y_test, y_count_filter_predict, target_names=news.target_names)
print classification_report(y_test, y_tfidf_filter_predict, target_names=news.target_names)
|
[
"flytonus@sina.cn"
] |
flytonus@sina.cn
|
71d68c642a70a8d625599303258d762986ccd3f6
|
46537fe6906fa10ed515baf36598168ff948aeaf
|
/mq/apps.py
|
bd83a9c748c327fc22707100d3383b38124cd392
|
[] |
no_license
|
dima-kov/django-mq
|
6b0538499a6091601ada1ecb962875d5f5634104
|
08c458780173e64785d30f87536121fa9e8a29ae
|
refs/heads/master
| 2023-07-31T10:33:40.209376
| 2021-09-19T10:18:34
| 2021-09-19T10:18:34
| 408,092,675
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 139
|
py
|
from django.apps import AppConfig
class MqAppConfig(AppConfig):
def ready(self):
from mq.facade import QueuesFacade # noqa
|
[
"dima.kovalchuk.v@gmail.com"
] |
dima.kovalchuk.v@gmail.com
|
9f9baa8b7fa3b27ad62e12bd7f6621fcb8b83ba6
|
9bbf1cb7b0cd2444f2830efb696640ad42a2bfd4
|
/python/__init__.py
|
611fda76681e56175d3830d41a6a4dd31dbb5d14
|
[] |
no_license
|
idaohang/KalmanAnalyzer
|
aad4dfd209c1c160a5cdd8258d5ee77a01bfc769
|
12cdfc1f8ff480c2a2b8a5ca795eb982e1936ed9
|
refs/heads/master
| 2021-01-22T08:19:11.261942
| 2014-11-17T13:25:14
| 2014-11-17T13:25:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 198
|
py
|
#Automatically created by SCRAM
import os
__path__.append(os.path.dirname(os.path.abspath(__file__).rsplit('/UserCode/KalmanAnalyzer/',1)[0])+'/cfipython/slc6_amd64_gcc472/UserCode/KalmanAnalyzer')
|
[
"e.bouvier@ipnl.in2p3.fr"
] |
e.bouvier@ipnl.in2p3.fr
|
b31250d3654352faa232e299e85343692965b7ff
|
1951c50108892a1b89777749dd951cf49a4361ae
|
/blog/__init__.py
|
696460cf82c714d5d021fca0e6d398d58b4308b0
|
[] |
no_license
|
bluewhale1207/myblog
|
3e04c7b4a3d598d52890624a361b16cc752250d9
|
e8351cf68b36dfdbd8290cffaaa0915fc182a1b7
|
refs/heads/master
| 2016-09-05T16:16:30.627316
| 2015-11-16T11:25:07
| 2015-11-16T11:25:07
| 31,940,336
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 692
|
py
|
# -*- coding: utf-8 -*-
import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from blog.models.model_user import User
app = Flask(__name__)
app.config.from_object(os.environ['BLOG_SETTINGS'])
# 登录
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = '.user_login_required'
login_manager.login_message = u'请登录'
db = SQLAlchemy()
db.init_app(app)
@login_manager.user_loader
def load_user(username):
return User.query.filter_by(name=username).first()
from blog.views import general
from blog.views import user
app.register_blueprint(general.mod)
app.register_blueprint(user.mod)
|
[
"liushujie@papayamobile.com"
] |
liushujie@papayamobile.com
|
056bd878933e3e8f3a603973ac2a820ac07bce18
|
fd43e56e22254e8a653e32ad7262c1f41c670391
|
/lcov/__init__.py
|
468b86309784894323139b90053882aff64f7019
|
[
"MIT"
] |
permissive
|
hubiao7/scons-lcov
|
0ba681537aa8b81d1e5668b4bc011c182bf47eee
|
0c88bea03c787d001691f970faf5e9b7a3fe98ba
|
refs/heads/master
| 2021-06-01T05:49:25.356588
| 2016-06-01T20:47:08
| 2016-06-01T20:47:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,068
|
py
|
import SCons
from SCons.Builder import Builder
from SCons.Script import Dir, Flatten, Mkdir
from os import path
class ToolLCovWarning(SCons.Warnings.Warning):
pass
class LCovExecutableNotFound(ToolLCovWarning):
pass
def lcov_generator(source, target, env, for_signature):
cmd = ['lcov --capture']
cmd += ['--output-file', target[0].abspath]
if 'LCOVDIR' in env:
cmd += ['--directory', str(Dir(env['LCOVDIR']))]
if 'LCOVBASEDIR' in env:
cmd += ['--base-directory', str(Dir(env['LCOVBASEDIR']))]
return ' '.join(Flatten(cmd))
_lcov_builder = Builder(generator=lcov_generator)
def generate(env):
env['LCov'] = _detect(env)
env['BUILDERS']['LCov'] = _lcov_builder
def _detect(env):
try:
return env['LCov']
except KeyError:
pass
lcov = env.WhereIs('lcov')
if lcov:
return lcov
raise SCons.Errors.StopError(LCovExecutableNotFound,
'Cound not detect lcov executable')
return None
def exists(env):
return _detect(env)
|
[
"rhythm.mail@gmail.com"
] |
rhythm.mail@gmail.com
|
d5f4573a4e213f3cc96fd81d923189be3a18f7b8
|
e7b5561944ca0cbec321110e17618815e4ff627c
|
/backend/app/migrations/garpix_notify/0002_auto_20210720_2244.py
|
9371a081124611fbdb04a0a27df63a7bec829035
|
[] |
no_license
|
AlexandrMikhailovich/cms_test3
|
3a6ac4be10ef7ae5bda2bfdaf2ff38ad9bc5c641
|
1579f853cc2c526f0fdaab9f14baf9659c23d178
|
refs/heads/master
| 2023-06-23T01:34:19.806972
| 2021-07-21T06:08:39
| 2021-07-21T06:08:39
| 388,012,831
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,345
|
py
|
# Generated by Django 3.1 on 2021-07-20 19:44
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('auth', '0012_alter_user_first_name_max_length'),
('garpix_notify', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='notifyuserlistparticipant',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='user_lists', to=settings.AUTH_USER_MODEL, verbose_name='Пользователь (получатель)'),
),
migrations.AddField(
model_name='notifyuserlistparticipant',
name='user_list',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='participants', to='garpix_notify.notifyuserlist', verbose_name='Список пользователей для рассылки'),
),
migrations.AddField(
model_name='notifyuserlist',
name='user_groups',
field=models.ManyToManyField(blank=True, to='auth.Group', verbose_name='Группы пользователей'),
),
migrations.AddField(
model_name='notifytemplate',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='templates', to='garpix_notify.notifycategory', verbose_name='Категория'),
),
migrations.AddField(
model_name='notifytemplate',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='Пользователь (получатель)'),
),
migrations.AddField(
model_name='notifytemplate',
name='user_lists',
field=models.ManyToManyField(blank=True, to='garpix_notify.NotifyUserList', verbose_name='Списки пользователей, которые получат копию уведомления'),
),
migrations.AddField(
model_name='notifyerrorlog',
name='notify',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='logs', to='garpix_notify.notify', verbose_name='Notify'),
),
migrations.AddField(
model_name='notify',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='notifies', to='garpix_notify.notifycategory', verbose_name='Категория'),
),
migrations.AddField(
model_name='notify',
name='files',
field=models.ManyToManyField(to='garpix_notify.NotifyFile', verbose_name='Файлы'),
),
migrations.AddField(
model_name='notify',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='notifies', to=settings.AUTH_USER_MODEL, verbose_name='Пользователь (получатель)'),
),
]
|
[
"Alexandr1990@gitlab.com"
] |
Alexandr1990@gitlab.com
|
8e6db07c7045df813af567ff7094d94f80b3b8c6
|
fb6482e5d6c6f93b6d04165048e32ba47ab0f605
|
/CSCI 127/Labs/lab12.py
|
ba610d51fe0d1d80d44ec6d31c607ae79398ced5
|
[] |
no_license
|
Risauce/Pre2015Code
|
3a5a13bc38769708b151b9a790cf7ccfc0251574
|
fc0e641e7bebbaeec8202550ece880b98b48c1fc
|
refs/heads/master
| 2020-08-28T09:33:18.171859
| 2019-10-26T05:54:24
| 2019-10-26T05:54:24
| 217,662,210
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,931
|
py
|
import numpy as np
import matplotlib.pyplot as plt
# -----------------------------------------------------
# CSCI 127, Lab 12
# November 21, 2017
# Your Name
# -----------------------------------------------------
def read_file(name):
input_file = open(name, "r")
number_buckets = int(input_file.readline())
total_counties = int(input_file.readline())
county_populations = np.zeros([total_counties], dtype="int")
for county_number in range(total_counties):
line = input_file.readline().split(",")
county_populations[county_number] = int(line[1])
county_populations.sort()
input_file.close()
return number_buckets, county_populations
# -----------------------------------------------------
def print_summary(averages):
print("Population Grouping Summary")
print("---------------------------")
for grouping in range(len(averages)):
print("Grouping", grouping + 1, "has a population average of",
averages[grouping])
# -----------------------------------------------------
# Do not change anything above this line
# -----------------------------------------------------
def calculate_averages(number_buckets, county_populations):
numberOfSplit = len(county_populations) / number_buckets
for i in range(number_buckets):
if i == 0:
average = np.average(county_populations[0:numberOfSplit])
print(average)
else:
print("none")
# -----------------------------------------------------
def graph_summary(averages):
pass
# -----------------------------------------------------
number_buckets, county_populations = read_file("montana-counties.txt")
averages = calculate_averages(number_buckets, county_populations)
print_summary(averages)
graph_summary(averages)
|
[
"noreply@github.com"
] |
noreply@github.com
|
92167807a2c7f1bf7c9b2fa2d8c101cf3984620c
|
2f1a2a175bd3b6ef646b6329169dda18127e34b2
|
/todoclass/urls.py
|
2397da1dd7a14e1820678879959d92ca02b8dd8b
|
[] |
no_license
|
Alisher007/todomain
|
13608ca796a47b69f86ca709c1fafd2b424978b4
|
cdd08a21f2bdd0b80bf2f6ae2ebc6825ed760869
|
refs/heads/master
| 2022-12-14T04:50:09.869081
| 2020-09-11T11:47:00
| 2020-09-11T11:47:00
| 294,675,849
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 519
|
py
|
from django.contrib import admin
from django.urls import path
from .views import TodoListView, TodoDetailView, TodoCreateView, TodoUpdateView, TodoDeleteView
app_name = 'todoclass'
urlpatterns = [
path('', TodoListView.as_view(), name='list'),
path('create/', TodoCreateView.as_view(), name='create'),
path('<int:pk>/', TodoDetailView.as_view(), name='detail'),
path('<int:pk>/update/', TodoUpdateView.as_view(), name='update'),
path('<int:pk>/delete/', TodoDeleteView.as_view(), name='delete'),
]
|
[
"alisher.khalikulov@jaresorts.com"
] |
alisher.khalikulov@jaresorts.com
|
11fffdf455b5b2de1d41093e5db837b67414fb80
|
5cd8fb2e84e5f50f39505a97e9021198700920e3
|
/src/employees/models.py
|
9e10dd1d3e77cc6a7f661f1a78ffe127e549711f
|
[] |
no_license
|
danliu277/openbag_python
|
81a597f72bfc943f8ff98e8b732fe7d6fb936999
|
aef1596709042f66a93883d67114b5b08f8f504f
|
refs/heads/master
| 2022-12-04T03:23:09.670440
| 2020-08-26T20:49:18
| 2020-08-26T20:49:18
| 287,374,863
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 340
|
py
|
from django.db import models
class Employee(models.Model):
name = models.CharField(max_length=120)
username = models.CharField(max_length=120)
password = models.CharField(max_length=120)
address = models.CharField(max_length=120)
email = models.CharField(max_length=120)
def __str__(self):
return self.name
|
[
"danliu277@gmail.com"
] |
danliu277@gmail.com
|
145e5904cf2bc4e6e47030788b2461978b486ece
|
6318f1458f9c6cca91cb00aa415638a599d8ba26
|
/arcade/python/arcade-theCore/11_SpringOfIntegration/091_Combs.py
|
ec81b4e9bfbc202b226d08d5d49310be3d66ef37
|
[
"MIT"
] |
permissive
|
netor27/codefights-solutions
|
836016a048086cd2bc644b2c40b7686102b6f179
|
69701ab06d45902c79ec9221137f90b75969d8c8
|
refs/heads/master
| 2021-10-28T13:04:42.940059
| 2019-01-16T23:12:08
| 2019-01-16T23:12:08
| 110,753,675
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,377
|
py
|
'''
Miss X has only two combs in her possession, both of which are old and miss a tooth or two. She also has many purses of different length, in which she carries the combs. The only way they fit is horizontally and without overlapping. Given teeth' positions on both combs, find the minimum length of the purse she needs to take them with her.
It is guaranteed that there is at least one tooth at each end of the comb.
It is also guaranteed that the total length of two strings is smaller than 32.
Note, that the combs can not be rotated/reversed.
Example
For comb1 = "*..*" and comb2 = "*.*", the output should be
combs(comb1, comb2) = 5.
Although it is possible to place the combs like on the first picture, the best way to do this is either picture 2 or picture 3.
'''
def combs(comb1, comb2):
n1, n2 = len(comb1), len(comb2)
res = n1 + n2
m1, m2 = mask(comb1), mask(comb2)
for i in range(n1 + 1):
if (m2 << i) & m1 == 0:
temp = max(n2 + i, n1)
if temp < res:
res = temp
for i in range(n2 + 1):
if (m1 << i) & m2 == 0:
temp = max(n1 + i, n2)
if temp < res:
res = temp
return res
def mask(s):
r = 0
for c in s:
digit = 0
if c == '*':
digit = 1
r = (r << 1) + digit
return r
|
[
"neto.r27@gmail.com"
] |
neto.r27@gmail.com
|
b2a3440874508491785688101a4108cfd7f6edcc
|
5d2ad10a424fd71cc2f12e1ca78d278362435c3b
|
/Day4/newPkg/src/myPkg/scripts/comp_sub.py
|
4617429fc49187b0c83bde193dc654ee8ec0815b
|
[] |
no_license
|
IsraaFahmy/ROS-training-
|
ec7034f55692c062ed42aa2cd9a63d9201db67e8
|
61924149a170292b9e7b049cfd704ed807c01e9a
|
refs/heads/master
| 2022-12-03T00:51:11.341733
| 2020-08-23T22:06:41
| 2020-08-23T22:06:41
| 287,059,743
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 285
|
py
|
#!/usr/bin/env python
import rospy
from myPkg.msg import comp
def callback(message):
rospy.loginfo("complex number recieved: %d + %d i", message.real,message.imaginary)
rospy.init_node('comp_node2', anonymous=True)
rospy.Subscriber("comp_topic", comp, callback)
rospy.spin()
|
[
"israafahmy@aucegypt.edu"
] |
israafahmy@aucegypt.edu
|
f723cbdc7b1832a6b9940919ccfcb541b77cc299
|
a6b010255c544b51edef707fa675a2f2f120c159
|
/_site/lib/ml_level.py
|
4e13a8dd97eb2bbb2361ca372f122f4491c5faa6
|
[] |
no_license
|
StonyBrookNLP/irene
|
f30d3dcdc5b336f4816c37017d6cbfd9d4eb80a5
|
54862c65f65bd4eb302344e110401d45c36af49c
|
refs/heads/master
| 2023-07-18T03:54:40.400352
| 2021-09-08T22:01:28
| 2021-09-08T22:01:28
| 371,047,659
| 8
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,211
|
py
|
import argparse
from typing import Tuple, List, Dict
import json
import copy
import pickle # may be change to dill?
from collections import defaultdict
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn import linear_model
from lib.tree_node import TreeNode
np.random.seed(13370)
def train_linear_regressor(features: np.array, ground_truths: np.array) -> Tuple:
"""
Scales data and trains a simple linear regressor.
"""
regressor = linear_model.LinearRegression()
scale_standardizer = StandardScaler().fit(features)
features = scale_standardizer.transform(features)
transformations = [scale_standardizer]
regressor = regressor.fit(features, ground_truths)
return regressor, transformations
def predict_linear_regressor(
regressor, transformations: List, features: np.array, ids: List[str]
) -> Dict[str, float]:
"""
Generates prediction using trained regressor on the passed features
and returns a dictionary of id to predictions.
"""
for transformation in transformations:
features = transformation.transform(features)
predicted_outputs = regressor.predict(features)
id_to_predicted_values = {
id_: pred for id_, pred in zip(list(ids), list(predicted_outputs))
}
return id_to_predicted_values
def train_ml_level_models(train_trees: List[TreeNode]) -> Tuple[Dict, Dict]:
"""
Trains ML-level regressor on the leaf nodes of training trees and outputs
trained regressor and scalars.
"""
operationwise_ml_level_instances = defaultdict(list)
for tree in train_trees:
for operation_type, ml_level_instances in tree.get_ml_level_data().items():
operationwise_ml_level_instances[operation_type].extend(ml_level_instances)
operationwise_ml_level_model = {}
operationwise_ml_level_transformations = {}
for operation_type, ml_level_instances in operationwise_ml_level_instances.items():
features = np.stack(
[np.array(instance["features"]) for instance in ml_level_instances], axis=0
)
ground_truths = np.array(
[instance["gold_energy"] for instance in ml_level_instances]
)
regressor, transformations = train_linear_regressor(
features=features, ground_truths=ground_truths
)
operationwise_ml_level_model[operation_type] = regressor
operationwise_ml_level_transformations[operation_type] = transformations
return operationwise_ml_level_model, operationwise_ml_level_transformations
def predict_ml_level_models(
operationwise_ml_level_model: Dict,
operationwise_ml_level_transformations: Dict,
predict_trees: List[TreeNode],
) -> List[TreeNode]:
"""
Runs regressor on the leaf/ml-level nodes of the predic_trees and saves
the predicted_energy field into it. Returns predicted_energy annotated trees.
"""
assert set(operationwise_ml_level_model.keys()) == set(
operationwise_ml_level_transformations.keys()
)
predict_trees = copy.deepcopy(predict_trees)
for predict_tree in predict_trees:
operationwise_ml_level_instances = predict_tree.get_ml_level_data()
for (
operation_type,
ml_level_instances,
) in operationwise_ml_level_instances.items():
if operation_type not in operationwise_ml_level_model:
raise Exception(
f"Given model isn't trained on operation_type {operation_type}"
)
regressor = operationwise_ml_level_model[operation_type]
transformations = operationwise_ml_level_transformations[operation_type]
features = np.stack(
[np.array(instance["features"]) for instance in ml_level_instances],
axis=0,
)
ids = [instance["id"] for instance in ml_level_instances]
id_to_predicted_values = predict_linear_regressor(
regressor, transformations, features, ids
)
predict_tree.update_tree_node_attributes(
"predicted_energy", id_to_predicted_values
)
return predict_trees
|
[
"yklal95@gmail.com"
] |
yklal95@gmail.com
|
2baa1ddb9774e7deee027fc888daf5f6d3280f5e
|
a63bd0c2b9ce527a163ebc7f326316dc1d8c50b0
|
/tests/fileSizeTest.py
|
b4346abaa5df91e031f403931282400d1a85f57c
|
[] |
no_license
|
jan-polaczek/isodBot-demo
|
902c20d49ffce225736f82a696fef69e914a7c44
|
e963b1835f6706f526249f83237223557ef27f02
|
refs/heads/master
| 2020-06-04T16:49:33.686587
| 2019-06-15T18:20:45
| 2019-06-15T18:20:45
| 192,110,375
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,433
|
py
|
import os,sys,inspect
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)
from Registration import *
def run():
errorMessages = {}
number_of_test = 5
counter = 0
if Registration.fileSize('anonek.tst') == 1:
errorMessages['anonek'] = 'test: podano tylko login'
elif Registration.fileSize('anonek.tst') == 2:
errorMessages['anonek'] = 'test: podano login i hasło'
elif Registration.fileSize('anonek.tst') > 2:
errorMessages['anonek'] = 'test: za dużo linijek w pliku'
else:
#errorMessages['anonek'] = 'test: plik nie istnieje'
counter += 1
if Registration.fileSize('balasm.tst') == 1:
errorMessages['balasm'] = 'test: podano tylko login'
elif Registration.fileSize('balasm.tst') == 2:
errorMessages['balasm'] = 'test: podano login i hasło'
elif Registration.fileSize('balasm.tst') > 2:
#errorMessages['balasm'] = 'test: za dużo linijek w pliku'
counter += 1
else:
errorMessages['balasm'] = 'test: plik nie istnieje'
if Registration.fileSize('boguszj.tst') == 1:
errorMessages['boguszj'] = 'test: podano tylko login'
elif Registration.fileSize('boguszj.tst') == 2:
errorMessages['boguszj'] = 'test: podano login i hasło'
elif Registration.fileSize('boguszj.tst') > 2:
#errorMessages['boguszj'] = 'test: za dużo linijek w pliku'
counter += 1
else:
errorMessages['boguszj'] = 'test: plik nie istnieje'
if Registration.fileSize('polaczej.tst') == 1:
#errorMessages['polaczej'] = 'test: podano tylko login'
counter += 1
elif Registration.fileSize('polaczej.tst') == 2:
errorMessages['polaczej'] = 'test: podano login i hasło'
elif Registration.fileSize('polaczej.tst') > 2:
errorMessages['polaczej'] = 'test: za dużo linijek w pliku'
else:
errorMessages['polaczej'] = 'test: plik nie istnieje'
if Registration.fileSize('ktokolwiek.tst') == 1:
errorMessages['ktokolwiek'] = 'test: podano tylko login'
elif Registration.fileSize('ktokolwiek.tst') == 2:
errorMessages['ktokolwiek'] = 'test: podano login i hasło'
elif Registration.fileSize('ktokolwiek.tst') > 2:
errorMessages['ktokolwiek'] = 'test: za dużo linijek w pliku'
else:
#errorMessages['ktokolwiek'] = 'test: plik nie istnieje'
counter += 1
errorMessages['ilość testów'] = number_of_test
errorMessages['ilość testów zaliczonych'] = counter
return errorMessages
|
[
"jan.polaczek@interia.pl"
] |
jan.polaczek@interia.pl
|
cbae2ad04a7ab972f74b8a1132069e8c30ab885a
|
dac5f6f1314fa1b2cc19ccc5e3f6ba35dcb04672
|
/Loop_for.py
|
2be2ca49cbe2584c3de74e2729b3d964c1b88a72
|
[] |
no_license
|
mohamedasa2019/PYTHON-ALL-CODE
|
f0942f3c37847dc301a4e873efdfa279dfa175f0
|
5ab782662c0c4489130b841cc0d953b5ef485bf5
|
refs/heads/master
| 2020-04-30T09:40:20.204921
| 2019-03-20T15:08:01
| 2019-03-20T15:08:01
| 176,754,829
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 210
|
py
|
def main():
L=[1,3,4.5,"hi"] # تستخدم for loop لطباعه كل عنصر قي سطر في
for item in L:
print(item)
if __name__ == '__main__': main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
261f0667d897b235c0624553e90745571f971418
|
43e8e14e9ffa1a85d4383334d1e9bd0a041131fb
|
/setup.py
|
5fc4d7d525f17172d05a6ce9bc7d98c975aaafbf
|
[
"BSD-3-Clause"
] |
permissive
|
Python3pkg/PandaRSS
|
a952d24762ceec0e65a44859590a6e9e49ae49fb
|
8e8727744c8a876b314879193ae01422831a76dd
|
refs/heads/master
| 2021-01-21T17:38:48.551614
| 2017-05-21T17:48:59
| 2017-05-21T17:48:59
| 91,976,509
| 0
| 0
| null | 2017-05-21T17:48:57
| 2017-05-21T17:48:57
| null |
UTF-8
|
Python
| false
| false
| 1,307
|
py
|
#!/usr/bin/python
from setuptools import setup, find_packages
import pandarss
install_requires = [
'Twisted>=15.0.0',
'bottle>=0.12.7'
]
package_data={
'pandarss': [
'views/css/*',
'views/js/*',
'views/*.html'
]
}
setup(name='pandarss',
version='0.2',
author='pandaman',
author_email='pandaman1999@foxmail.com',
url='https://github.com/PandaPark/PandaRSS',
license='BSD',
description='ToughRADIUS Self-service Portal',
long_description=open('README.md').read(),
classifiers=[
'Development Status :: 6 - Mature',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Systems Administration :: Authentication/Directory',
],
packages=find_packages(),
package_data=package_data,
keywords=['radius','toughradius','self-service ','pandarss'],
zip_safe=True,
include_package_data=True,
eager_resources=['pandarss'],
install_requires=install_requires,
entry_points={
'console_scripts': [
'pandarss = pandarss.pandarss:main',
'pandarss_txrun = pandarss.pandarss:txrun',
]
}
)
|
[
"pandaman1999@foxmail.com"
] |
pandaman1999@foxmail.com
|
5429b29c77f91823ead2f1173cbc0e47dd660763
|
d95e6dbbcd0673b8adb81b4bd8c6bf5b8917a6c4
|
/spatial_paper_data_archive.py
|
d4feb4e672e31530d051c3031367e3ff7b3d0e81
|
[
"MIT"
] |
permissive
|
Timothy-W-Hilton/COS_Spatial_Analyses
|
ab907c811605ab6cbd406451bd1dbb386e88695c
|
dfb6f99f8c7181739c2079936dce83a4d86f5c1f
|
refs/heads/master
| 2021-01-20T20:39:00.748486
| 2016-06-17T20:56:25
| 2016-06-17T20:56:25
| 61,402,018
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,481
|
py
|
"""Calculate total size of COS spatial paper data files and copy the
data files to a single directory tree for archiving
"""
import os
import os.path
import shutil
from stem_pytools import NERSC_data_paths as ndp
def Get_Human_Readable(size, precision=2):
"""http://stackoverflow.com/questions/5194057/better-way-to-convert-file-sizes-in-python
"""
suffixes = ['B', 'KB', 'MB', 'GB', 'TB']
suffixIndex = 0
while size > 1024 and suffixIndex < 4:
suffixIndex += 1 # increment the index of the suffix
size = size/1024.0 # apply the division
return "%.*f%s" % (precision, size, suffixes[suffixIndex])
def get_spatial_paper_data_total(runs):
all_data_sum = 0
for k, this_run in runs.items():
for this_file in (this_run.aqout_path, this_run.gpp_path,
this_run.gppraw_path, this_run.fcos_path):
if this_file is not None:
all_data_sum += os.path.getsize(this_file)
print "Spatial paper data total: " + Get_Human_Readable(all_data_sum)
return all_data_sum
def make_data_archive(root_dir, runs):
"""Copy all non-regridded GPP, regridded GPP, STEM AQOUT, and fCOS
netcdf files to a single directory tree for archiving.
"""
if os.path.exists(root_dir):
try:
shutil.rmtree(root_dir)
except:
print "unable to delete".format(root_dir)
try:
os.makedirs(root_dir)
except:
print "unable to create {}".format(root_dir)
for k, this_run in runs.items():
print "copying {} files".format(k)
this_run_dir = os.path.join(root_dir, k)
os.makedirs(this_run_dir)
for this_file in (this_run.aqout_path, this_run.gpp_path,
this_run.gppraw_path, this_run.fcos_path):
if this_file is not None:
print " copying {}".format(os.path.basename(this_file))
shutil.copy(this_file, this_run_dir)
if k is 'climatological_bnd':
for this_bnd in (runs[k].top_bounds_path,
runs[k].lateral_bounds_path):
print " copying {}".format(os.path.basename(this_bnd))
shutil.copy(this_bnd, this_run_dir)
if __name__ == "__main__":
runs = ndp.get_Spatial_Paper_runs()
total = get_spatial_paper_data_total(runs)
archive_dir = os.path.join(os.getenv('SCRATCH'), 'SpatialPaperData')
make_data_archive(archive_dir, runs)
|
[
"thilton@ucmerced.edu"
] |
thilton@ucmerced.edu
|
85e36f729f2df6e0df523fa8d40795f65a763c64
|
44576656e6be64c8a8f6823f989ecaae1ffc32c8
|
/blog/migrations/0007_auto_20200225_2025.py
|
456bf4d0c25b73c63c0ce130bb7946c4d9bdca5f
|
[] |
no_license
|
Rainysponge/mysite
|
ecbaf5d08a0b4863894e9037af82d4c7b18818a7
|
4ee8aff0c5b90a91041853cea0a14d2a3d063144
|
refs/heads/master
| 2021-01-26T01:56:21.129558
| 2020-02-26T13:37:10
| 2020-02-26T13:37:10
| 243,265,221
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
# Generated by Django 2.1.15 on 2020-02-25 20:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0006_auto_20200212_1741'),
]
operations = [
migrations.AlterField(
model_name='blog',
name='content',
field=models.TextField(),
),
]
|
[
"375364412@qq.com"
] |
375364412@qq.com
|
2d2699072eae36c651fe088d627f69f90b657d58
|
b39ec77a8f5a5779edcecf5a09c39224472fd139
|
/Clase03/contar.py
|
0b60d0a7381ff2765efd0891e4b3ce879ffe2a47
|
[] |
no_license
|
GonzaloMonteodorisio/ejercicios-python-unsam
|
76b6288491ccba8f44b819c26bed4811268e995e
|
37ba16197107717a4c582eb552175e1c981c286b
|
refs/heads/main
| 2023-07-28T07:18:10.178029
| 2021-09-15T05:42:46
| 2021-09-15T05:42:46
| 406,627,663
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 56
|
py
|
for n in range(10,0,-1):
print(n, end=' ')
|
[
"gonzalomonteodorisio@gmail.com"
] |
gonzalomonteodorisio@gmail.com
|
a5e6bca6ad202cb0a10c51af6c42e62ce5c65b3c
|
a113ca707799131092e5e5ad9feb71e69c3659e7
|
/Employee_project/urls.py
|
7816a4acebf308e0c7be98e0f351ad69e90bb4b0
|
[] |
no_license
|
thuytran-team/Employee_Project
|
648680e9a1fb9ab7827ae17d21b569b05e007ccc
|
6bf588b5d294da12a5cfb380b062203bfd68f9e2
|
refs/heads/master
| 2022-12-17T06:53:39.358047
| 2020-09-12T22:55:12
| 2020-09-12T22:55:12
| 295,039,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 823
|
py
|
"""Employee_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('employee/',include('employee_register.urls'))
]
|
[
"thuytran898@gmail.com"
] |
thuytran898@gmail.com
|
9041c058921688f8d5835b092cb95e45d74fffcf
|
042f3881f11f9fc7f7d70aa8d7822c40f21c8fd0
|
/crankycoin/__init__.py
|
5526dd401e47ec089c0926d4f6b50b5e590db7c3
|
[
"MIT"
] |
permissive
|
benthomasson/crankycoin
|
1e3801c06a1e9e217de0a171f2b6d5f6926d2446
|
37dc3048cef9b17745da0d21b0c9095a081a87a0
|
refs/heads/master
| 2021-01-01T15:21:37.481370
| 2017-07-17T09:00:04
| 2017-07-17T09:00:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 106
|
py
|
from block import *
from blockchain import *
from node import *
from wallet import *
from errors import *
|
[
"cranklin@gmail.com"
] |
cranklin@gmail.com
|
20635375b97f4e276fbfab8866c1ba60fc8aff05
|
ffddf50985bd61a6bb4d7001fe838f8d5c709bf4
|
/Builders/TechnologyBUILD
|
3833e5eef716d6e008e45dbb6d8221286f31d5c2
|
[] |
no_license
|
NoahBarrett98/Lost-and-Found
|
9a7216e401aa5f3e31da637f1c20e75d681eb12d
|
0d828029c466aeda9e5aac27695d22335e574e26
|
refs/heads/master
| 2021-01-26T07:44:06.501000
| 2020-03-22T19:47:42
| 2020-03-22T19:47:42
| 243,370,004
| 1
| 1
| null | 2020-03-10T13:58:11
| 2020-02-26T21:31:06
|
Python
|
UTF-8
|
Python
| false
| false
| 752
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 8 09:27:37 2020
@author: hannahmacdonell
"""
#H: Technology(itemID, tBrand, device, serialNo)
import random
import csv
d = []
c = open("device.txt", "r")
for line in c:
d.append(line.strip().split('\n')[0])
c.close()
l = []
c = open("tBrand.txt", "r")
for line in c:
l.append(line.strip().split('\n')[0])
c.close()
with open('tech.csv', mode='w') as data_file:
data_writer = csv.writer(data_file, delimiter=',')
data_writer.writerow(['tBrand','Device','SerialNo'])
for x in range(1000):
data_writer.writerow([l[random.randint(0,len(l)-1)],d[random.randint(0,len(d)-1)],str(random.randint(4123456,9123456))])
data_file.close()
|
[
"noreply@github.com"
] |
noreply@github.com
|
|
3c70c973d79447bece9afe2d49c5fd583a4173dd
|
4bfe4afd1b1e11f9a03d8e3640aa297c875c076d
|
/demos/basic.py
|
9a86954581726ae9f13bad67294d6355e90d696a
|
[] |
no_license
|
pankajti/capstone
|
81cdd2187e71e8d1bf327579b574ea7cf91a7e76
|
af57a52d34dbcdd40e8e81f1d72c142263a98893
|
refs/heads/master
| 2021-03-02T09:49:51.054153
| 2020-07-09T02:28:58
| 2020-07-09T02:28:58
| 245,857,468
| 0
| 0
| null | 2020-03-22T00:54:01
| 2020-03-08T17:26:43
| null |
UTF-8
|
Python
| false
| false
| 240
|
py
|
from tensorflow.keras.layers import Dense,SimpleRNN
from tensorflow.keras import Sequential
import numpy as np
from tensorflow.keras.utils import plot_model
model =Sequential()
model.add(Dense(2))
model.add(Dense(1))
plot_model(model)
|
[
"pankaj.tiwari2@gmail.com"
] |
pankaj.tiwari2@gmail.com
|
add2368027110c4b923645c5840c3a6f70084c32
|
a354f18367975097f0b19de816e763425e31f599
|
/lists/admin.py
|
ea9e3c8fc9bfcb1226d740be04827446a4be89a3
|
[] |
no_license
|
egibney/listapp
|
56f02c4a3311059aca0c73933241bff4d01f177a
|
f15875c304ff622985eb2dad7b8a20cc4def8b3f
|
refs/heads/master
| 2021-01-22T17:47:59.312735
| 2012-09-14T03:04:55
| 2012-09-14T03:04:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 145
|
py
|
from lists.models import List
from lists.models import Item
from django.contrib import admin
admin.site.register(List)
admin.site.register(Item)
|
[
"epgibney@gmail.com"
] |
epgibney@gmail.com
|
237ae19dcd1861ce4f5ee4f9d3bcf53f20e82e1f
|
ca152095b72ce93b6ca79042084f5ef70c658576
|
/search_hparam.py
|
ae2d717cf0cdbbb6e9de860e789f8fc2287a1fcc
|
[] |
no_license
|
fl16180/SeqModel
|
72806eca1ec21b564262f8d444366a984ede7c64
|
3bba92bc23d0fef55a479f18e731c50e1feed186
|
refs/heads/master
| 2020-12-14T16:49:13.468564
| 2020-05-12T00:24:17
| 2020-05-12T00:24:17
| 234,813,501
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,639
|
py
|
import argparse
import numpy as np
# hide sklearn deprecation message triggered within skorch
from warnings import simplefilter
simplefilter('ignore', category=FutureWarning)
import torch
from skorch import NeuralNetClassifier
from skorch.callbacks import LRScheduler
from skorch.callbacks import EpochScoring
from sklearn.metrics import plot_roc_curve, roc_auc_score
from sklearn.metrics import plot_precision_recall_curve, average_precision_score
from constants import *
from datasets import *
import models
from utils.model_utils import *
from utils.data_utils import get_roadmap_col_order
DATA_CHOICES = ['mpra', 'mpra+scores', 'neighbor', 'neighbor+scores']
MODEL_CFG = {
'mpra': None,
'mpra+scores': None,
'neighbor': None,
'neighbor+scores': None
}
def fit_model(args):
torch.manual_seed(1000)
print(f'Fitting model for {args.data}:')
project = args.project
auc = EpochScoring(scoring='roc_auc', lower_is_better=False)
apr = EpochScoring(scoring='average_precision', lower_is_better=False)
if args.data == 'mpra':
train_df = load_train_set(project, datasets=['roadmap'])
proc = Processor(project)
train_df = proc.fit_transform(train_df, na_thresh=0.05)
proc.save(args.data)
X_train = train_df.drop(['chr', 'pos', 'Label'], axis=1) \
.values \
.astype(np.float32)
y_train = train_df['Label'].values.astype(np.int64)
net = NeuralNetClassifier(
models.MpraDense,
batch_size=256,
optimizer=torch.optim.Adam,
optimizer__weight_decay=2e-6,
lr=1e-4,
max_epochs=20,
module__n_input=1016,
module__n_units=(400, 250),
module__dropout=0.3,
callbacks=[auc, apr],
iterator_train__shuffle=True,
train_split=None
)
elif args.data == 'mpra+scores':
train_df = load_train_set(project, datasets=['roadmap', 'eigen', 'regbase'])
proc = Processor(project)
train_df = proc.fit_transform(train_df, na_thresh=0.05)
proc.save(args.data)
X_train = train_df.drop(['chr', 'pos', 'Label'], axis=1) \
.values \
.astype(np.float32)
y_train = train_df['Label'].values.astype(np.int64)
net = NeuralNetClassifier(
models.MpraDense,
batch_size=256,
optimizer=torch.optim.Adam,
optimizer__weight_decay=2e-6,
lr=1e-4,
max_epochs=20,
module__n_input=1079,
module__n_units=(400, 250),
module__dropout=0.3,
callbacks=[auc, apr],
iterator_train__shuffle=True,
train_split=None
)
elif args.data == 'neighbor':
X_train = load_train_neighbors(project).astype(np.float32)
tmp = load_train_set(project, datasets=['roadmap', 'eigen', 'regbase'],
make_new=False)
y_train = tmp['Label'].values.astype(np.int64)
assert X_train.shape[0] == y_train.shape[0]
net = NeuralNetClassifier(
models.MpraCNN,
batch_size=256,
optimizer=torch.optim.Adam,
optimizer__weight_decay=1e-4,
lr=5e-4,
max_epochs=20,
callbacks=[auc, apr],
iterator_train__shuffle=True
)
elif args.data == 'neighbor+scores':
print('\tLoading neighbors')
X_neighbor = load_train_neighbors(project).astype(np.float32)
print('\tLoading scores')
train_df = load_train_set(project, datasets=['roadmap', 'eigen', 'regbase'])
proc = Processor(project)
train_df = proc.fit_transform(train_df, na_thresh=0.05)
proc.save(args.data)
print('\tArranging data')
rm_cols = [f'{x}-E116' for x in ROADMAP_MARKERS]
# rm_cols = [x for x in get_roadmap_col_order(order='marker') if 'E116' in x]
X_score = train_df.drop(['chr', 'pos', 'Label'] + rm_cols, axis=1) \
.values \
.astype(np.float32)
y_train = train_df['Label'].values.astype(np.int64)
X_train = (X_neighbor, X_score)
net = NeuralNetClassifier(
models.MpraFullCNN,
batch_size=256,
optimizer=torch.optim.Adam,
optimizer__weight_decay=0,
lr=5e-4,
max_epochs=20,
callbacks=[auc, apr],
iterator_train__shuffle=True
)
# import sys; sys.exit()
net.fit(X_train, y_train)
class_pred = net.predict(X_train)
score_pred = net.predict_proba(X_train)
print('\tAUROC: ', roc_auc_score(y_train, score_pred[:, 1]))
print('\tAUPR: ', average_precision_score(y_train, score_pred[:, 1]))
save_model(net, project, args.data)
def evaluate_model(args):
print(f"Evaluating model for {args.data}:")
project = args.project
net = load_model(project, args.data)
X_test = load_test_neighbors(project)
X_test = X_test.astype(np.float32)
tmp = load_test_set(project, datasets=['roadmap', 'eigen', 'regbase'])
y_test = tmp['Label'].values.astype(np.int64)
# test_df = load_test_set(project, datasets=['roadmap', 'eigen', 'roadmap'])
# proc = Processor(project)
# proc.load(args.data)
# test_df = proc.transform(test_df)
# X_test = test_df.drop(['chr', 'pos', 'Label'], axis=1) \
# .values \
# .astype(np.float32)
# y_test = test_df['Label'].values.astype(np.int64)
class_pred = net.predict(X_test)
score_pred = net.predict_proba(X_test)
print('\tAUROC: ', roc_auc_score(y_test, score_pred[:, 1]))
print('\tAUPR: ', average_precision_score(y_test, score_pred[:, 1]))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--project', '-p', choices=PROJ_CHOICES, required=True)
parser.add_argument('--data', '-d', default='mpra+scores', choices=DATA_CHOICES,
help='Which data/model to train on')
parser.add_argument('--full', default=False,
help='Fit all models (overrides --data)')
parser.add_argument('--evaluate', '-e', action='store_true', default=False,
help='Evaluate model on test set after fitting')
args = parser.parse_args()
fit_model(args)
if args.evaluate:
evaluate_model(args)
|
[
"fredlu.flac@gmail.com"
] |
fredlu.flac@gmail.com
|
55dc1576cdd1996d90d1c4d72010b67b9c856d33
|
a54809b430481f1b0047f046d412ffc3f0c7fe68
|
/myenv/lib/python3.6/encodings/mbcs.py
|
fef3c7ee1eaa9a11983fb638e663bd09cc620156
|
[] |
no_license
|
vineet0713/PaaS-ProberS-AWS-EB-
|
2521d4ee7e41e6d25c839cfea672d5706b4dbd85
|
1f9ca9e5d59ddcb3f94d7aaa96ca66719bc805cf
|
refs/heads/master
| 2020-04-30T05:47:55.793035
| 2019-03-20T02:54:43
| 2019-03-20T02:54:43
| 176,635,164
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 105
|
py
|
/usr/local/Cellar/python/3.6.4_3/Frameworks/Python.framework/Versions/3.6/lib/python3.6/encodings/mbcs.py
|
[
"vineet0713@gmail.com"
] |
vineet0713@gmail.com
|
46296617c7f07b0473680d71fe3811728becd60d
|
5f3902453ad41436382bb77e3e6a06dfe94deaed
|
/ask7.py
|
b2b9666cc40348812f8a3686d3d5bcd7c8b2a150
|
[] |
no_license
|
dimitrisadam/askhseis
|
23a6c45a33004c2b50837a42e10dae72479adc34
|
504f270056e2afa6e331bd501a8f476bf35dd991
|
refs/heads/master
| 2021-01-21T08:21:33.339580
| 2017-02-27T18:13:38
| 2017-02-27T18:13:38
| 83,341,566
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,647
|
py
|
import tweepy
from tweepy import OAuthHandler
import sys
consumer_key = ''
consumer_secret = ''
access_token = ''
access_secret = ''
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth)
#pairnei thn lista me ta tweets (sunolika 10).Ta bazei ta epejergazetai ena ena
#afairwntas ta https-links ka8ws den einai lejeis kai lejeis pou periexoun mesa ton xarakthra
# '\' ka8ws upodikniei oti einai photografia.Epishs afairei memonomenes paules kai dipla kena.
#Telos, epistrefei to a8roisma twn lejewn pou metrhse sta tweets.
def countWords(alltweets):
httpFlag = "http"
delimeter = "\\"
paula="-"
doublespace=""
totalWords = 0
test =" "
for i in range(len(alltweets)):
test = str(alltweets[i])
test = test.split(" ")
for j in range(len(test)):
if delimeter not in test[j] and httpFlag not in test[j] and test[j] is not paula and test[j] is not doublespace:
totalWords+=1
#print test[j]
return totalWords
firstUser = raw_input("Dwste to tweeter username tou prwtou xrhsth: \n")
secondUser = raw_input("Dwste to tweeter username tou deuterou xrhsth: \n")
#firstUserTweets = api.user_timeline(screen_name="RollingStones",count=10)
#edw diavazw kai vazei sto firsttweets ta 10 pio prosfata tweets tou prwtou user
firstUserTweets = api.user_timeline(screen_name=firstUser,count=10)
firsttweets = [[tweet.text.encode('utf-8')] for tweet in firstUserTweets]
#print firsttweets
#secondUserTweets = api.user_timeline(screen_name="rogerfederer",count=10)
#edw diavazw kai vazei sto secondtweets ta 10 pio prosfata tweets tou deuterou user
secondUserTweets = api.user_timeline(screen_name=secondUser,count=10)
secondtweets = [[tweet.text.encode('utf-8')] for tweet in secondUserTweets]
#print secondtweets
# Elegxos gia an exoun ginei ta 10 tweets. An oxi to afhnw na sunexisei 8a borousa omws na kanw kai ena sys.exit(0)
if len(firsttweets) < 10:
print '\nWARNING: O xrhsths',firstUser,'den exei kanei 10 tweets'
if len(secondtweets) < 10:
print '\nWARNING: O xrhsths',secondUser,'den exei kanei 10 tweets'
firstUserTotalWorlds = countWords(firsttweets)
secondUserTolalWorlds = countWords(secondtweets)
if firstUserTotalWorlds > secondUserTolalWorlds:
print '\nPerissoteres lexeis exei o user',firstUser,'pou exei',firstUserTotalWorlds,'lexeis.O user',secondUser,'exei',secondUserTolalWorlds,'lexeis'
else:
print '\nPerissoteres lexeis exei o user',secondUser,'pou exei',secondUserTolalWorlds,'lexeis.O user',firstUser,'exei',firstUserTotalWorlds,'lexeis'
#print 'totalwords =',countWords(firsttweets)
#print 'totalwords =',countWords(secondtweets)
|
[
"mitsoseleysina2@gmail.com"
] |
mitsoseleysina2@gmail.com
|
cc74a182310695445168f47a0e42b74f72ac72f7
|
6026c5fa42c561256510fd997286c21cb935690b
|
/volumeWidgets.py
|
be70dd6721a0fd5913db0939ec99b7a62eba1e57
|
[] |
no_license
|
frankbx/Volume
|
b5894e7ac13491e0c52af2ec39ebfea5a695ecf2
|
516815a1498e26b43d73f0c7f55da5fb2765b2d2
|
refs/heads/master
| 2020-04-03T22:40:43.346951
| 2017-03-13T04:37:05
| 2017-03-13T04:37:05
| 58,268,339
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,676
|
py
|
import pyqtgraph as pg
from PyQt4 import QtCore, QtGui
# Create a subclass of GraphicsObject.
# The only required methods are paint() and boundingRect()
# (see QGraphicsItem documentation)
class CandlestickItem(pg.GraphicsObject):
def __init__(self):
pg.GraphicsObject.__init__(self)
self.flagHasData = False
def set_data(self, data):
self.data = data # data must have fields: time, open, close, min, max
self.flagHasData = True
self.generatePicture()
self.informViewBoundsChanged()
def generatePicture(self):
# pre-computing a QPicture object allows paint() to run much more quickly,
# rather than re-drawing the shapes every time.
self.picture = QtGui.QPicture()
p = QtGui.QPainter(self.picture)
p.setPen(pg.mkPen('w'))
barWidth = 1 / 3.
for (open, close, min, max, index) in self.data:
p.drawLine(QtCore.QPointF(index, min), QtCore.QPointF(index, max))
if open > close:
p.setBrush(pg.mkBrush('r'))
else:
p.setBrush(pg.mkBrush('g'))
p.drawRect(QtCore.QRectF(index - barWidth, open, barWidth * 2, close - open))
p.end()
def paint(self, p, *args):
if self.flagHasData:
p.drawPicture(0, 0, self.picture)
def boundingRect(self):
# boundingRect _must_ indicate the entire area that will be drawn on
# or else we will get artifacts and possibly crashing.
# (in this case, QPicture does all the work of computing the bouning rect for us)
return QtCore.QRectF(self.picture.boundingRect())
class CandleWidget(pg.PlotWidget):
def __init__(self, raw_data):
super(CandleWidget, self).__init__()
self.update(raw_data)
# self.candle_data = raw_data.loc[:, ['open', 'close', 'low', 'high']]
# r, c = self.candle_data.shape
# self.candle_data['num'] = range(1, r + 1)
# self.item = CandlestickItem()
# self.item.set_data(self.candle_data.values)
self.addItem(self.item)
def update(self, raw_data):
# raw_data.sort_index(axis=0, inplace=True)
self.candle_data = raw_data.loc[:, ['open', 'close', 'low', 'high']]
r, c = self.candle_data.shape
self.candle_data['num'] = range(1, r + 1)
self.item = CandlestickItem()
self.item.set_data(self.candle_data.values)
# app = QtGui.QApplication([])
# df = ts.get_hist_data('000681', '2015-01-01', ktype='w')
# r, c = df.shape
# print(r)
# cData = df.copy().loc[:, ['open', 'close', 'low', 'high']]
# cData['num'] = range(1, r + 1)
#
# print(cData)
# # cData = np.array(cData)
# item = CandlestickItem()
# item.set_data(cData.values)
#
# plt = pg.plot()
# plt.addItem(item)
# plt.setWindowTitle('pyqtgraph example: customGraphicsItem')
#
#
# def update():
# global item
# df = ts.get_hist_data('000681', '2015-01-01', ktype='d')
# r, c = df.shape
# print(r)
# cData = df.loc[:, ['open', 'close', 'low', 'high']]
# cData['num'] = range(1, r + 1)
# item.set_data(cData.values)
# # app.processEvents() ## force complete redraw for every plot
#
#
# timer = QtCore.QTimer()
# timer.timeout.connect(update)
# timer.start(10000)
# df = ts.get_hist_data('000681', '2015-01-01', ktype='w')
# print(enumerate(df))
# for (value) in df.head(10).values:
# print(value)
# print(type(value))
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
|
[
"xiang.bao.frank@gmail.com"
] |
xiang.bao.frank@gmail.com
|
5f1d4ae6e02e4e33bd1e5716d22ee7da2b0c0cbd
|
f774ccfe88871fbe37b52487253108144c07f968
|
/exer95.py
|
d4a749738c38516b4e2e8805d75fdd3495a48c8e
|
[] |
no_license
|
capy-larit/exercicios_python
|
4bcfdc8985983dc69c63f315931c200c4c9f1100
|
c92b8ff31e2eb0c87f2dfdad9d97149db6f1181e
|
refs/heads/master
| 2023-03-23T05:29:07.409948
| 2020-09-08T22:42:48
| 2020-09-08T22:42:48
| 245,882,895
| 0
| 0
| null | 2021-03-15T21:43:15
| 2020-03-08T20:28:15
|
Python
|
UTF-8
|
Python
| false
| false
| 675
|
py
|
"""
Faça um programa utilizando um dict (dicionário) que leia dados de entrada do usuário. O
usuário deve entrar com os dados de uma pessoa como nome, idade e cidade onde mora.
Após isso, você deve imprimir os dados como o exemplo abaixo:
nome: João
idade: 20
cidade: São Paulo
"""
def chamar_menu():
nome = input('Digite seu nome: ')
idade = int(input('Digite sua idade: '))
cidade = input('Digite sua cidade: ')
dict[nome]=[idade, cidade]
dict = {}
try:
chamar_menu()
except:
print('A idade deve ser um número inteiro.')
chamar_menu()
for chave, item in dict.items():
print(f'Nome: {chave}\nIdade: {item[0]}\nCidade: {item[1]}')
|
[
"larissa.laritt@icloud.com"
] |
larissa.laritt@icloud.com
|
48201b6182773eb907fb42c0093c1f0bf47efc96
|
853c189602a667990eda858db98d163fb597caa1
|
/tfx/orchestration/experimental/core/constants.py
|
e5a5208afa70ef912a346dc02d4fe9ccce962866
|
[
"Apache-2.0"
] |
permissive
|
swap-10/tfx
|
9bef96fc592810ed2d7dfa5dd60044c9ac481e02
|
8e80ce2486b4d7b219dcff906d6930e62c5fdd45
|
refs/heads/master
| 2023-07-15T22:54:18.642120
| 2021-09-06T06:17:48
| 2021-09-06T06:17:48
| 402,296,955
| 0
| 0
|
Apache-2.0
| 2021-09-02T05:49:03
| 2021-09-02T05:09:23
| null |
UTF-8
|
Python
| false
| false
| 823
|
py
|
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constants shared across modules."""
EXECUTION_ERROR_MSG_KEY = '__execution_error_msg__'
IMPORTER_NODE_TYPE = 'tfx.dsl.components.common.importer.Importer'
RESOLVER_NODE_TYPE = 'tfx.dsl.components.common.resolver.Resolver'
|
[
"tensorflow-extended-nonhuman@googlegroups.com"
] |
tensorflow-extended-nonhuman@googlegroups.com
|
71a4c69ae3c773949b3127804ef78ee861a60fee
|
f9407b6f9454640b35753a39ac3fc57c1a105630
|
/parse_out_email_text.py
|
16017ff5240a650c9e78703bc219d8a033e475aa
|
[] |
no_license
|
saeidmoha/tools
|
5e91c7efc79fe75a1a780565233cdcd9b23c000d
|
18f20dfdade5a374c5ec2cbd71f4b661d61788db
|
refs/heads/master
| 2021-08-14T14:43:31.181434
| 2017-11-16T02:00:27
| 2017-11-16T02:00:27
| 110,909,987
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,703
|
py
|
#!/usr/bin/python
from nltk.stem.snowball import SnowballStemmer
import string
def parseOutText(f):
""" given an opened email file f, parse out all text below the
metadata block at the top
(in Part 2, you will also add stemming capabilities)
and return a string that contains all the words
in the email (space-separated)
example use case:
f = open("email_file_name.txt", "r")
text = parseOutText(f)
"""
f.seek(0) ### go back to beginning of file (annoying)
all_text = f.read()
### split off metadata
content = all_text.split("X-FileName:")
#print ("content[0] = ", content[0], "content[1] = ", content[1])
words = ""
stemmer = SnowballStemmer("english")
if len(content) > 1:
### remove punctuation
#text_string = content[1].translate(string.maketrans("", ""), string.punctuation)
text_string = content[1].translate(str.maketrans("", "", string.punctuation))
### project part 2: comment out the line below
#words = text_string
### split the text string into individual words, stem each word,
### and append the stemmed word to words (make sure there's a single
### space between each stemmed word)
text_string = ' '.join(text_string.split())
for word in text_string.split(" "):
stemword = stemmer.stem(word)
words += stemword + ' '
return words
def main():
ff = open("../text_learning/test_email.txt", "r")
#ff = open("../maildir/bailey-s/deleted_items/101.", "r")
text = parseOutText(ff)
print (text)
if __name__ == '__main__':
main()
|
[
"saeid@saeidm.com"
] |
saeid@saeidm.com
|
482b54447b3f7cd5d3fb519221920951b5b68ed0
|
d9764a604c85c134ff217747d243eac8fe28e792
|
/src/demo2.py
|
e3c0801f18c91206c2e18df08c2caacf8e0007bf
|
[] |
no_license
|
afcarl/INF421-project
|
5a0130c3ba6e0c767323001048d3f191379dbc6e
|
dc6eef684f6d277b6a9bbbc227a9e20a1525e115
|
refs/heads/master
| 2020-03-19T21:21:53.465240
| 2017-08-14T13:39:52
| 2017-08-14T13:39:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,334
|
py
|
#!/usr/bin/env python3
"""
Special notes :
This implementation supports MULTIPLE shortest path.
(except for the number_of_possible_locations_with_mindist_simple function)
"""
import random
from Graph import Graph
from algo import *
from unused import *
from Dijkstra import *
from util import timeit
from reach import reach
####################
data = '/Users/louisabraham/Downloads/RoadNetworks/data/france.in'
logging = '/Users/louisabraham/Downloads/RoadNetworks/vis/points.js'
hour = 3600000
# We can control the display of chronos using timeit.activated
timeit.activated = True
####################
# graph importation
g = Graph.from_file(data)
# we chose a random starting point
v = random.choice(list(g.keys()))
#
# # Question 1.1
# print(number_of_possible_locations(g, v, 1 * hour))
#
# # the same result is computed
# print(number_of_possible_locations_with_mindist_dijkstra(
# g, v, 1 * hour, 0))
# print(number_of_possible_locations_with_mindist_dijkstra(
# g, v, 1 * hour, 0))
print(number_of_possible_locations_with_mindist_dijkstra(
g, v, 1 * hour, 2 * hour, logging=logging))
input()
g.generate_converse()
print(number_of_possible_locations_with_mindist_dijkstra(
g.converse, v, 1 * hour, 2 * hour, logging=logging))
# print(reach(g, v))
#
# # We can free memory like this
# dijkstra.clean()
|
[
"louis.abraham@yahoo.fr"
] |
louis.abraham@yahoo.fr
|
e253dc4bc39f59c0d01f1734c35d33edfc76853a
|
c5e7926ffa9af44e3d3fea7c854c013898b8f346
|
/scrap_tenders/scrap_tenders/items.py
|
da60390af0bf3875f87e8865f44838ec5e8df41d
|
[
"MIT"
] |
permissive
|
Salomari1987/med-tenders-egypt
|
a821cd1064a5c68cbd7318c8ade254667692b7d9
|
31b5061fe28c56d5e9a8bb4b267148848bfeaf5a
|
refs/heads/master
| 2021-01-19T18:32:36.773483
| 2017-04-19T00:36:02
| 2017-04-19T00:40:26
| 88,364,697
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 520
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
# from scrapy.item import Item, Field
#
#
# class StackItem(Item):
# Tender_Notice_Type = Field()
# Country = Field()
# Category = Field()
# Description = Field()
# Deadline = Field()
# Ref = Field()
from scrapy_djangoitem import DjangoItem
from tenders_django_app.models import Tender
class StackItem(DjangoItem):
django_model = Tender
|
[
"s.z.alomari.1987@gmail.com"
] |
s.z.alomari.1987@gmail.com
|
5c80ed9e14391ad32e4cc6fd9fcae8dce388c672
|
479518429066a4200b0c9ffbc42f22620dee1749
|
/app.py
|
5074f7904d2af983e17faf125c1a1f1f6874b9a4
|
[] |
no_license
|
nikhilkumarsingh/nitdhack
|
d2b4871c2aa3ef461c409a2f75e4f346759f1797
|
633ddf770c19fb8b0dd66479bc8e865e36181ffa
|
refs/heads/master
| 2021-01-19T21:33:27.880021
| 2017-04-18T23:43:06
| 2017-04-18T23:43:06
| 88,665,337
| 0
| 1
| null | 2018-10-03T05:33:57
| 2017-04-18T19:59:40
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,291
|
py
|
import flask
app = flask.Flask(__name__,static_folder='static')
app.config.from_object(__name__)
app.config['SECRET_KEY'] = '7d441f27d441f27567d441f2b6176a'
@app.route('/')
def home():
return flask.render_template('index.html')
def NearbySearch(lat,lng,keyword,radius=1000):
key="AIzaSyApuFoKxVMRQ2einlsA0rkx2S4WJjJIh34"
url="https://maps.googleapis.com/maps/api/place/nearbysearch/json?"
url+="location=%f,%f&" % (lat,lng)
url+="radius=%i&" % radius
url+="type=%s&" % keyword
url+="key=%s" % key
response=requests.get(url)
json_dict=response.json()
res=json_dict['results']
info_pack=[]
for x in res:
placeid = x['place_id']
url = "https://maps.googleapis.com/maps/api/place/details/json?placeid={}&key={}".format(placeid,key)
r = requests.get(url).json()['result']
info = {}
info['name'] = r['name']
info['lat'] = r['geometry']['location']['lat']
info['lng'] = r['geometry']['location']['lng']
info_pack.append(info)
return info_pack
@app.route('/query', methods = ['POST'])
def query():
if flask.request.method == 'POST':
# lat,lang =
lat, lang = 28,76
data = {'locations':NearbySearch(lat,lng,'doctor')}
print(flask.request.form['query'])
return data
if __name__ == "__main__":
app.run(debug = True, port=5003)
|
[
"nikhilksingh97@gmail.com"
] |
nikhilksingh97@gmail.com
|
d43e3d2d2a4cade3e15bd4256deff2b47f891672
|
2e4c9dafb6fc03d48df9f80d506474b87438056d
|
/fibers.py
|
55b6a4bd996a1723437447420a402201d3353313
|
[] |
no_license
|
ericwang915/pypbc
|
fd35d2d91d2f50c6b0353abc84b3dcd72261006f
|
22c97949c549867103e667d998e8be2cfb1911a6
|
refs/heads/master
| 2020-06-11T00:01:54.795875
| 2009-09-23T09:00:41
| 2009-09-23T09:00:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,479
|
py
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Basic functions to read and write TrackVis .trk files and to play
with fibers.
Copyright (c) 2009 Emanuele Olivetti <emanuele_AT_relativita.com>
This library is free software; you can redistribute it and/or modify
it either under the terms of the GNU General Public License version 3
as published by the Free Software Foundation.
"""
import numpy as N
import sys
# Definition of trackvis header structure.
# See http://www.trackvis.org/docs/?subsect=fileformat
# See http://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html
trk_header_structure = [['id_string', 1, 'S6'],
['dim', 3, '<h'],
['voxel_size', 3, '<f4'],
['origin', 3, '<f4'],
['n_scalars', 1, '<h'],
['scalar_name', 10, 'S20'],
['n_properties', 1, '<h'],
['property_name', 10, 'S20'],
['reserved', 1, 'S508'],
['voxel_order', 1, 'S4'],
['pad2', 1, 'S4'],
['image_orientation_patient', 6, '<f4'],
['pad1', 1, 'S2'],
['invert_x', 1, 'S1'],
['invert_y', 1, 'S1'],
['invert_z', 1, 'S1'],
['swap_xy', 1, 'S1'],
['swap_yz', 1, 'S1'],
['swap_zx', 1, 'S1'],
['n_count', 1, '<i4'],
['version', 1, '<i4'],
['hdr_size', 1, '<i4'],
]
def read_header(f):
""" Read and parse .trk file header structure.
"""
header = {}
for field_name, count, dtype in trk_header_structure:
header[field_name] = N.fromfile(f, dtype=dtype, count=count)
pass
assert(f.tell()==1000) # header is always 1000 bytes.
return header
def write_header(f, header):
"""Write .trk header to file.
"""
for field_name, count, dtype in trk_header_structure:
# Note that ".astype(dtype)" is just to be sure or correct types:
header[field_name].astype(dtype).tofile(f)
pass
assert(f.tell()==1000) # header is always 1000 bytes.
return
def print_header(header):
"""Print relevant info of .trk header.
"""
print "Header:"
relevant_fields = ['dim', 'voxel_size', 'origin', 'n_count' ]
for field in relevant_fields:
print '\t',field, ':', header[field]
pass
return
def progress_meter(position, total, message, steps=10):
"""Simple progress meter.
"""
if position%(int(total/steps))==0:
print message, str(1+int(100.0*position/total))+'%'
sys.stdout.flush()
pass
return
def read_fibers(f, header):
"""Read fibers from .trk file and fill a list.
"""
fiber = []
# structure of each entry of the list:
# [[X1,Y1,Z1,SCALAR1...],...,[Xn,Yn,Zn,SCALARn...]], [PROPERTIES]
# Note that in PBC2009 trckvis files there are no scalars or
# properties, which means that the actual structure of the fiber
# list is simply:
# fiber_id : [[X1,Y1,Z1],...,[Xn,Yn,Zn]], []
n_scalars = header['n_scalars'][0]
n_fibers = header['n_count'][0]
for fiber_id in range(n_fibers):
num_points = N.fromfile(f, dtype='<i4', count=1)[0]
xyz_scalar = N.fromfile(f, dtype='<f4', count=num_points*(3+n_scalars)).reshape(num_points, 3+n_scalars)
properties = N.fromfile(f, dtype='<f4', count=header['n_properties'][0])
fiber.append([xyz_scalar, properties])
progress_meter(fiber_id, n_fibers, 'Reading fibers...')
pass
return fiber
def write_fibers(f, fiber, header):
"""Write fibers to file in .trk format. Assumption: header has
already been written.
"""
n_scalars = header['n_scalars'][0]
n_fibers = header['n_count'][0]
for fiber_id in range(n_fibers):
num_points = N.array((fiber[fiber_id][0]).shape[0], dtype='<i4')
num_points.tofile(f)
xyz_scalar = N.array(fiber[fiber_id][0], dtype='<f4')
xyz_scalar.tofile(f)
properties = N.array(fiber[fiber_id][1], dtype='<f4')
properties.tofile(f)
progress_meter(fiber_id, n_fibers, 'Writing fibers...')
pass
return
def mm2voxel(xyz, header):
"""Converts coordinates from mm to voxel.
"""
return N.floor(xyz/header['voxel_size']).astype('i')
def voxel2mm(Vxyz, header):
"""Converts coordinates from voxel to mm.
"""
return (Vxyz+0.5)*header['voxel_size']
def build_voxel_fibers_dict(fiber, header):
"""Build a dictionary that given a voxel returns all fibers (IDs)
crossing it.
"""
voxel2fibers = {}
n_fibers = len(fiber)
for fiber_id in range(n_fibers):
xyz = fiber[fiber_id][0]
ijk = mm2voxel(xyz, header)
for i in range(xyz.shape[0]):
try:
voxel2fibers[tuple(ijk[i,:])].append(fiber_id)
except KeyError:
voxel2fibers[tuple(ijk[i,:])] = [fiber_id]
pass
pass
progress_meter(fiber_id, n_fibers, 'Mapping voxels to fibers...')
pass
n_voxels = len(voxel2fibers.keys())
# Now transform each list of IDs in an array of IDs:
for n, ijk in enumerate(voxel2fibers.keys()):
voxel2fibers[ijk] = N.array(voxel2fibers[ijk])
progress_meter(n, n_voxels, 'Converting lists to arrays...')
pass
return voxel2fibers
if __name__=="__main__":
print "This simple program reads a TrackVis .trk file, parse it, build"
print "structures to represent fibers as Python list of arrays"
print "and then saves structures in TrackVis .trk file format."
print "The resulting file is expected to be identical to the original."
print "As a further step a dictionary, mapping voxel to fibers, is built"
print "and some examples using it are shown."
# filename = "dsi.trk"
# filename = "dti.trk"
filename = "hardiO10.trk"
print
print "file:", filename
f = open(filename)
header = read_header(f)
print_header(header)
fiber = read_fibers(f, header)
f.close()
print
fiber_id = 1000
print "Example: fiber_id=",fiber_id
print fiber[fiber_id]
print "Convert points from mm to voxel coordinates:"
Vxyz = mm2voxel(fiber[fiber_id][0], header)
print Vxyz
print "Convert back and check whether differences are less than grid size...",
assert(((voxel2mm(Vxyz, header)-fiber[fiber_id][0])<header['voxel_size']).all())
print "OK."
print
filename2 = filename+"_COPY.trk"
print "Saving to:", filename2
f = open(filename2,'w')
write_header(f, header)
write_fibers(f, fiber, header)
f.close()
print
print "Building voxel2fibers dictionary:"
voxel2fibers = build_voxel_fibers_dict(fiber, header)
voxel = tuple(header['dim'] / 2)
print "Example: fibers crossing voxel", voxel
try:
print voxel2fibers[voxel]
except KeyError:
print []
print "There are no fibers crossing this voxel."
pass
print
x = header['dim'][0] / 2
print "Example: counting fibers crossing plane x =", x
counter = 0
for y in range(header['dim'][1]):
for z in range(header['dim'][2]):
try:
counter += voxel2fibers[(x,y,z)].size
except KeyError:
pass
pass
pass
print "Number of fibers:", counter
print
fiber_id = 2000
print "Which fibers cross (the voxels of) fiber[fiber_id=",fiber_id,"] ?"
xyz = fiber[fiber_id][0]
ijk = mm2voxel(xyz, header)
fiber_id_list = N.unique(N.hstack([voxel2fibers[i,j,k] for i,j,k in ijk]))
print fiber_id_list
print fiber_id_list.size, "fibers."
print
print "Saving .trk file with just the previous list of fibers."
filename3 = filename+'_cross_fiber_id_'+str(fiber_id)+'.trk'
print "Saving to:", filename3
import copy
fiber2 = [fiber[fiber_id] for fiber_id in fiber_id_list]
header2 = copy.deepcopy(header)
header2['n_count'] = N.array([fiber_id_list.size])
f = open(filename3, 'w')
write_header(f, header2)
write_fibers(f, fiber2, header2)
f.close()
|
[
"emanuele@relativita.com"
] |
emanuele@relativita.com
|
73ea759b8c4f767004d46136a4cb1eec0f7feabe
|
eb99e1d5008f90e5a54724863dacba4878fb2cea
|
/tests/test_basic.py
|
875207f11e43de580d9a19a086b9fd20315d8529
|
[] |
no_license
|
machow/hoof
|
fe529ef6573ecae35ba51704cd5c95c188c50295
|
4c9460492f283abd539ab3577982226efe15db5a
|
refs/heads/master
| 2022-05-20T03:43:33.847717
| 2020-04-25T21:47:48
| 2020-04-25T21:47:48
| 256,900,890
| 1
| 0
| null | 2020-04-25T21:38:26
| 2020-04-19T02:56:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,519
|
py
|
from hoof import Hoof, AntlrAst
class Program(AntlrAst):
_fields = ["body"]
class BinaryExpr(AntlrAst):
_fields = ["left", "right", "op"]
class RunExpr(AntlrAst):
_fields = ["op", "expr"]
_remap = ["RUN->op"]
_rules = "RunExpr"
hoof = Hoof("hoof_examples.Expr")
class AstVisitor(hoof.Visitor):
def visitParentheses(self, ctx):
# skip parentheses
return self.visit(ctx.expr())
def visitTerminal(self, ctx):
return ctx.getText()
hoof.register("Prog", Program, ["expr->body"]) # no config on node
hoof.register("BinaryExpr", BinaryExpr) # no need to remap
hoof.register(RunExpr) # rule and remap on node
hoof.bind(AstVisitor)
def test_program():
node = hoof.parse("1 + 2; 3 - 4;", "prog")
assert isinstance(node, Program)
assert len(node.body) == 2
assert isinstance(node.body[0], BinaryExpr)
def test_binary():
node = hoof.parse("1 + 2", "expr")
assert isinstance(node, BinaryExpr)
assert node.left == "1"
assert node.right == "2"
assert node.op == "+"
def test_put():
node = hoof.parse("run 2", "expr")
assert isinstance(node, RunExpr)
assert node.expr == "2"
def test_parentheses():
node = hoof.parse("(1 + 1)", "expr")
assert isinstance(node, BinaryExpr)
def test_expr_integer():
# this is a Token (INT) with no explicit shaping, so is result of visitTerminal
node = hoof.parse("1", "expr")
node == "1"
|
[
"machow@princeton.edu"
] |
machow@princeton.edu
|
4faba1910def77457e265813a6749d9fcdc2c9fa
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/ec2_write_3/managed-prefix-list_create.py
|
a37a54b7d58925db27ffcd48c98d760451977f82
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379
| 2020-12-27T13:38:45
| 2020-12-27T13:38:45
| 318,686,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,393
|
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_three_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/create-managed-prefix-list.html
if __name__ == '__main__':
"""
delete-managed-prefix-list : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/delete-managed-prefix-list.html
describe-managed-prefix-lists : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-managed-prefix-lists.html
modify-managed-prefix-list : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/modify-managed-prefix-list.html
"""
parameter_display_string = """
# prefix-list-name : A name for the prefix list.
Constraints: Up to 255 characters in length. The name cannot start with com.amazonaws .
# max-entries : The maximum number of entries for the prefix list.
# address-family : The IP address type.
Valid Values: IPv4 | IPv6
"""
add_option_dict = {}
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_three_parameter("ec2", "create-managed-prefix-list", "prefix-list-name", "max-entries", "address-family", add_option_dict)
|
[
"hcseo77@gmail.com"
] |
hcseo77@gmail.com
|
c081b2cc5c19aacf9997f2dcf145d5c6d6a94c75
|
a985c0797ed10fc7eef59c527b0490dbfeadd2af
|
/Docker Model/utils/makeprediction.py
|
c5e55cdb241d98d033c0a7d78c01784c14785250
|
[] |
no_license
|
etheleon/data-science-for-fun
|
1bd0c9f04a8c5f0e533d42816a085c8e0656092d
|
7488c3f9a3a0e36371905c71fdf7f2528e9d0e95
|
refs/heads/master
| 2021-07-23T14:21:08.860090
| 2017-10-30T02:30:43
| 2017-10-30T02:30:43
| 108,791,255
| 0
| 0
| null | 2017-10-30T02:19:55
| 2017-10-30T02:19:54
| null |
UTF-8
|
Python
| false
| false
| 520
|
py
|
from sklearn import datasets
from sklearn.neighbors import KNeighborsClassifier
def predict(inputFeatures):
iris = datasets.load_iris()
knn = KNeighborsClassifier()
knn.fit(iris.data, iris.target)
predictInt = knn.predict(inputFeatures)
if predictInt[0] == 0:
predictString = 'setosa'
elif predictInt[0] == 1:
predictString = 'versicolor'
elif predictInt[0] == 2:
predictString = 'virginica'
else:
predictString = 'null'
return predictString
|
[
"A0134553@u.nus.edu"
] |
A0134553@u.nus.edu
|
837341341225792eaf8191f24e39c25694df9f97
|
726d8518a8c7a38b0db6ba9d4326cec172a6dde6
|
/0909. Snakes and Ladders/Solution.py
|
29b98be3c2e7335ac5e219780c47380b948657d7
|
[] |
no_license
|
faterazer/LeetCode
|
ed01ef62edbcfba60f5e88aad401bd00a48b4489
|
d7ba416d22becfa8f2a2ae4eee04c86617cd9332
|
refs/heads/master
| 2023-08-25T19:14:03.494255
| 2023-08-25T03:34:44
| 2023-08-25T03:34:44
| 128,856,315
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,069
|
py
|
from collections import deque
from typing import List
class Solution:
def snakesAndLadders(self, board: List[List[int]]) -> int:
n = len(board)
visited = [False] * (n * n + 1)
queue = deque([1])
steps = 0
while queue:
size = len(queue)
for _ in range(size):
pos = queue.popleft()
for i in range(pos + 1, min(pos + 7, n * n + 1)):
if visited[i]:
continue
visited[i] = True
if i == n * n:
return steps + 1
r, c = divmod(i - 1, n)
if r & 1:
c = n - 1 - c
r = n - 1 - r
if board[r][c] != -1 and board[r][c] == n * n:
return steps + 1
if board[r][c] == -1:
queue.append(i)
else:
queue.append(board[r][c])
steps += 1
return -1
|
[
"yubowen.ssr@bytedance.com"
] |
yubowen.ssr@bytedance.com
|
d6b4abc7fbe0628b62ce4ae5c4de91acedb25971
|
962feeffee41625ff841f6590f97bb09cef9be4c
|
/torch_glow/tests/nodes/avgpool3d_test.py
|
93e26349ac4e677a2d89d2388568725436963f2f
|
[
"Apache-2.0"
] |
permissive
|
SushantDaga/glow
|
8c4c3fbc58c3ae760bdd8e1df2e8c05a72ff07bc
|
aab22c3e0421dadd29950c2ebfa88b86027cecf5
|
refs/heads/master
| 2022-11-03T08:39:33.958233
| 2020-06-19T17:03:14
| 2020-06-19T17:05:42
| 273,568,864
| 2
| 0
|
Apache-2.0
| 2020-06-19T19:12:31
| 2020-06-19T19:12:30
| null |
UTF-8
|
Python
| false
| false
| 860
|
py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn.functional as F
from tests.utils import jitVsGlow
import unittest
class TestAvgPool3d(unittest.TestCase):
def test_avg_pool3d_basic(self):
"""Basic test of the PyTorch avg_pool3d Node on Glow."""
def test_f(inputs):
return F.avg_pool3d(inputs, 3)
inputs = torch.randn(1, 4, 5, 5, 5)
jitVsGlow(test_f, inputs, expected_fused_ops={"aten::avg_pool3d"})
def test_avg_pool3d_with_args(self):
"""Test of the PyTorch avg_pool3d Node with arguments on Glow."""
def test_f(inputs):
return F.avg_pool3d(inputs, padding=2, kernel_size=(4, 7, 7))
inputs = torch.randn(1, 4, 10, 10, 10)
jitVsGlow(test_f, inputs, expected_fused_ops={"aten::avg_pool3d"})
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
3e4c84c039144eaf018a0dbe4dfa92d68101bbe8
|
f745231568d2f15c75a82638ffa4fd86c5b682ea
|
/assignment_4/shapes.py
|
2856f2733a4a28e8f7ad12c5634c596f054b3aef
|
[
"WTFPL"
] |
permissive
|
gauravjuvekar/ppl
|
c53dccd274e93207f543afc8ded787cff9319085
|
fc5592623fa294c18a6e24444b9e06e2a08b2f6c
|
refs/heads/master
| 2016-09-12T21:31:07.960658
| 2016-04-26T07:16:00
| 2016-04-26T07:16:00
| 57,103,970
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,886
|
py
|
#!/usr/bin/env python3
import math
class Shape(object):
def __init__(self, turtle=None):
self.turtle = turtle
class Polygon(Shape):
def __init__(self, points, turtle=None):
Shape.__init__(self, turtle)
self.points = points
def draw(self, turtle=None):
if turtle is None:
turtle = self.turtle
turtle.penup()
pos = turtle.pos()
relative = lambda x, y: (pos[0] + x, pos[1] + y)
turtle.goto(relative(*(self.points[-1])))
turtle.pendown()
for point in self.points:
turtle.goto(relative(*point))
turtle.penup()
turtle.goto(pos)
turtle.pendown()
def transform(self, matrix):
if not (len(matrix) == 2 and
(len(matrix[0]) == len(matrix[1]) == 2)):
raise ValueError("Transformation matrix must be order 2 square")
apply = lambda point, matrix: (
(point[0] * matrix[0][0]) + (point[1] * matrix[0][1]),
(point[0] * matrix[1][0]) + (point[1] * matrix[1][1]))
self.points = [apply(point, matrix) for point in self.points]
class RegularPolygon(Polygon):
def __init__(self, sides, radius, turtle=None):
step_angle = 360 / sides
points = []
angle = 0
while angle < 360:
points.append((
radius * math.cos(math.radians(angle)),
radius * math.sin(math.radians(angle))))
angle += step_angle
Polygon.__init__(self, points, turtle)
class Ellipse(RegularPolygon):
def __init__(self, rad_x, rad_y, turtle=None):
sides = max((rad_x, rad_y))
RegularPolygon.__init__(self, sides, min((rad_x, rad_y)), turtle)
if rad_x < rad_y:
self.transform(((1, 0), (0, rad_y / rad_x)))
else:
self.transform(((rad_x / rad_y, 0), (0, 1)))
|
[
"gauravjuvekar@gmail.com"
] |
gauravjuvekar@gmail.com
|
1186138ee1bd98ce6cc3c24b6d4b5d7158920d79
|
f81099738d3ab7d4a4773a04ed9e36e493632590
|
/angelos-portfolio/test/test_domain_update.py
|
2ccd8c81f1a7ea5f7e2d64656a9b8ccd5a5df49a
|
[
"MIT"
] |
permissive
|
kristoffer-paulsson/angelos
|
eff35753e4d7e4465d2aadac39265f206b09fcf9
|
d789f47766fe3a63a6752b92e4ea955f420dbaf9
|
refs/heads/master
| 2022-05-05T15:16:59.340527
| 2022-03-27T16:05:51
| 2022-03-27T16:05:51
| 142,691,235
| 9
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,287
|
py
|
#
# Copyright (c) 2018-2020 by Kristoffer Paulsson <kristoffer.paulsson@talenten.se>.
#
# This software is available under the terms of the MIT license. Parts are licensed under
# different terms if stated. The legal terms are attached to the LICENSE file and are
# made available on:
#
# https://opensource.org/licenses/MIT
#
# SPDX-License-Identifier: MIT
#
# Contributors:
# Kristoffer Paulsson - initial implementation
#
"""Security tests putting the policies to the test."""
from unittest import TestCase
from angelos.common.policy import evaluate
from angelos.lib.policy.types import PersonData
from angelos.portfolio.domain.create import CreateDomain
from angelos.portfolio.domain.update import UpdateDomain
from angelos.portfolio.entity.create import CreatePersonEntity
from test.fixture.generate import Generate
class TestUpdateDomain(TestCase):
def test_perform(self):
data = PersonData(**Generate.person_data()[0])
portfolio = CreatePersonEntity().perform(data)
CreateDomain().perform(portfolio)
self.assertIsNotNone(portfolio.domain)
with evaluate("Domain:Update") as report:
domain = UpdateDomain().perform(portfolio)
self.assertIs(domain, portfolio.domain)
self.assertTrue(report)
|
[
"kristoffer.paulsson@talenten.se"
] |
kristoffer.paulsson@talenten.se
|
8b95e2ada92485e2e3e8915583d7b6c7899d04f7
|
5022b48f311ba4710e1851855552b9546a3142c5
|
/unittest/case_test.py
|
3b355326b97f14c7a95801f1b8d7f47cb5b04d82
|
[] |
no_license
|
18786262315/python_lx
|
a7a15a294312b8382c3d1fd97a8d0ede38f1c5a5
|
a870d49cc4ca6efd1b54c2b89dfbf5e3d911a568
|
refs/heads/master
| 2020-03-21T12:37:30.748759
| 2020-03-18T09:31:31
| 2020-03-18T09:31:31
| 138,563,274
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,228
|
py
|
'''
unittest条件断言
tester: cc
此文仅做翻译只用,不介绍具体使用
'''
Skiptest() # 在测试中引发此异常以跳过该异常。
_ShouldStop() # 停止测试
_UnexpectedSuccess() # 测试本来应该是失败的,但是没有失败
Skip() # 无条件跳过测试。
skipIf(condition, reason) # 条件为真时跳过测试
skipUnless(condition, reason) # 条件为假时跳过测试
expectedFailure(test_item) # 标记该测试预期就是失败,如果运行失败时,不算作失败用例。
_is_subtype(expected, basetype) # 判断类型是否符合预期
addTypeEqualityFunc(typeobj, function) # 为自定义检查类提供检查方法
addCleanup( function , *args , **kwargs ) #添加针对每个测试用例执行完tearDown()方法之后的清理方法,添加进去的函数按照后进先出(LIFO)的顺序执行,当然,如果setUp()方法执行失败,那么不会执行tearDown()方法,自然也不会执行addCleanup()里添加的函数。
setUp()#在执行每个测试用例之前被执行,任何异常(除了unittest.SkipTest和AssertionError异常以外)都会当做是error而不是failure,且会终止当前测试用例的执行。
tearDown()#执行了setUp()方法后,不论测试用例执行是否成功,都执行tearDown()方法。如果tearDown()的代码有异常(除了unittest.SkipTest和AssertionError异常以外),会多算一个error。
setUpClass( cls )与tearDownClass( cls )#测试用例们被执行前、后执行的方法,定义时必须加上classmethod装饰符
countTestCases()#返回测试用例的个数,对于TestCase实例来说,这个返回值一直是1.
defaultTestResult()#如果在run()方法中未提供result参数,该函数返回一个包含本用例测试结果的TestResult对象。
shortDescription()#返回测试用例的描述,即函数的docstring,如果没有,返回None。可以用于测试结果输出中描述测试内容。
id()#返回测试用例的编号,通常是如下格式:模块名.类名.函数名。可以用于测试结果的输出。
subTest( msg=_subtest_msg_sentinel, **params)#返回一个上下文管理器,它将返回由可选消息和关键字参数标识的子测试中的封闭代码块。子测试中的失败标志着测试用例失败,但在封闭块结束时恢复执行,允许执行进一步的测试代码。
run( result =None)#运行一个测试用例,将测试结果收集到result变量中,测试结果不返回给调用者。如果result参数的值为None,则测试结果在下面提到的defaultTestResult()方法的返回值中
doCleanups()#无条件强制调用addCleanup()添加的函数,适用于setUp()方法执行失败但是需要执行清理函数的场景,或者希望在tearDown()方法之前执行这些清理函数。
debug()#与run方法将测试结果存储到result变量中不同,debug方法运行测试用例将异常信息上报给调用者。
fail( msg =None)#无条件声明一个测试用例失败,msg是失败信息。
assertEqual(set1,set2,msg=None) #检测两个值是否相等
assertFalse( expr, msg=None) #检查表达式是否为假
assertTrue( expr, msg=None) #检查表达式是否为真
assertAlmostEqual与assertNotAlmostEqual(, first, second, places=None, msg=None,delta=None) #判断两个值是否约等于或者不约等于,places表示小数点后精确的位数
assertSequenceEqual(seq1, seq2, msg=None, seq_type=None) #有序序列的相等断言,如元组、列表
assertListEqual( list1, list2, msg=None) #列表相等的特定断言
assertTupleEqual(tuple1, tuple2, msg=None) #元组相等的特定断言
assertSetEqual( set1, set2, msg=None) #集合相等的特定断言
assertIn与assertNotIn( member, container, msg=None) #判断a 是否存在b中
assertIs与assertIsNot( expr1, expr2, msg=None) #判断a是不是b
assertDictEqual( d1, d2, msg=None) #检查两个字典是否相等
assertDictContainsSubset( subset, dictionary, msg=None) #检查字典是否是子集的超集。
assertCountEqual(first, second, msg=None) #判断两个无序列表内所出现的内容是否相等
assertMultiLineEqual( first, second, msg=None) #断言两个多行字符串相等
assertLess( a, b, msg=None) #断言a<b
assertLessEqual( a, b, msg=None) #断言a<=b
assertGreater( a, b, msg=None) #断言a>b
assertGreaterEqual(a, b, msg=None) #断言a>=b
assertIsNone与assertIsNotNone( obj, msg=None) #判断obj是否为空
assertIsInstance(a, b)与assertNotIsInstance(a, b)# 与assertTrue相同,其中的类型b,既可以是一个类型,也可以是类型组成的元组。
assertRaisesRegex( expected_exception, expected_regex,*args, **kwargs)#断言在引发异常中的消息与正则表达式匹配。
assertWarnsRegex( expected_warning, expected_regex,*args, **kwargs)#断言触发警告中的消息与ReGEXP匹配。基本功能类似于AdvestWr.NS.()只有消息与正则表达式匹配的警告。被认为是成功的匹配
assertRegex与assertNotRegex(text, expected_regex, msg=None) #判断文本与正则表达式是否匹配
shortDescription()#返回测试用例的描述,即函数的docstring,如果没有,返回None。可以用于测试结果输出中描述测试内容。
|
[
"843092012@qq.com"
] |
843092012@qq.com
|
a720e9e9dba4c9e67cd739029eb1e94d9b40b70b
|
7b532a7d7c79601a5a8dd7beaf6b06e7a23b3666
|
/Inception_model/softmax.py
|
7f2dd55bc8779ae70bfbfb3f2fe90788f2300a17
|
[] |
no_license
|
lanardo/Image_processing_server
|
e3d3151cf825ebca01a64d851642bca0e99b0646
|
957ab8d82a453049885f85f440efcfc60c0e7d7f
|
refs/heads/master
| 2021-05-14T13:13:27.358287
| 2018-07-04T19:41:06
| 2018-07-04T19:41:06
| 116,435,267
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,287
|
py
|
import tensorflow as tf
import csv
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--input_dir", help="path to folder containing the trainning data")
parser.add_argument("--output_dir", help="path to folder containing the result coef files")
parser.add_argument("--restore", default="yes", help="restore from the checkpoint")
parser.add_argument("--rate", type=float, default=0.0001, help="rate(alpha) for trainning")
parser.add_argument("--epochs", type=int, default=200000, help="max epoches")
parser.add_argument("--strip", type=int, default=50, help="step for writing the result on loop")
a = parser.parse_args()
# a.input_dir = './model'
# a.output_dir = './model'
# a.restore = "no"
def xaver_init(n_inputs, n_outputs, uniform=True):
if uniform:
init_range = tf.sqrt(6.0 / (n_inputs + n_outputs))
return tf.random_uniform_initializer(-init_range, init_range)
else:
stddev = tf.sqrt(3.0 / (n_inputs + n_outputs))
return tf.truncated_normal_initializer(stddev=stddev)
def acc(d1, d2):
cnt = 0
for i in range(d1.__len__()):
if d1[i] == d2[i]:
cnt += 1
return float(cnt)/d1.__len__()
def sel_max(data):
ret_ind = []
for i in range(data.__len__()):
if data[i][0] == 1:
ret_ind.append(0)
else:
ret_ind.append(1)
return ret_ind
if __name__ == '__main__':
learning_rate = a.rate
in_dir = a.input_dir
out_dir = a.output_dir
epochs = a.epochs
strip = a.strip
train_data_path = os.path.join(in_dir, 'train_data.csv')
w_coef_path = os.path.join(out_dir, 'w.csv')
b_coef_path = os.path.join(out_dir, 'b.csv')
ckpt_path = os.path.join(out_dir, 'model_bin.ckpt')
labels = ['front', 'front_3_quarter', 'side', 'rear_3_quarter', 'rear', 'interior', 'tire']
directions = [
[1, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 1]
]
x_data = []
y_data = []
""" Loading training data from csv files """
print('[Step 1] Loading training data ...')
# for python 2x
with open(train_data_path) as fp:
csv_reader = csv.reader(fp, delimiter=',')
for row in csv_reader:
x_data.append([float(row[i]) for i in range(0, len(row)-7)])
y_data.append([float(row[i]) for i in range(len(row)-7, len(row))])
print("total features :" + str(len(x_data)))
print("length of feature :" + str(len(x_data[0])))
print("length of label :" + str(len(y_data[0])))
""" Placeholder """
print('[Step 2] Placeholder')
x = tf.placeholder('float', [None, 2048]) # len(feature) = 2048
y = tf.placeholder('float', [None, 7]) # len(Directions) = 7 : classes
W1 = tf.get_variable('W1', shape=[2048, 7], initializer=xaver_init(2048, 7))
b1 = tf.Variable(tf.zeros([7]))
activation = tf.add(tf.matmul(x, W1), b1)
t1 = tf.nn.softmax(activation)
""" Minimize error using cross entropy """
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=activation, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # Gradient Descent
""" Initializing the variables """
print('[Step 3] Initializing the variables.')
# init = tf.initialize_all_variables() # python 3x
init = tf.global_variables_initializer() # python 2x
sess = tf.Session()
sess.run(init)
saver = tf.train.Saver()
print(a.restore)
if a.restore == "yes":
print('Loading the last learning Session.')
saver.restore(sess, ckpt_path)
""" Training cycle """
print('[Step 4] Training...')
for step in range(epochs):
sess.run(optimizer, feed_dict={x: x_data, y: y_data})
if step % strip == 0:
ret = sess.run(t1, feed_dict={x: x_data})
acc1 = acc(sess.run(tf.arg_max(ret, 1)), sess.run(tf.arg_max(y_data, 1))) * 100
print(' ' + str(step) + ' ' + str(sess.run(cost, feed_dict={x: x_data, y: y_data})) + ' ' + str(acc1))
saver.save(sess, ckpt_path)
print('Optimization Finished!')
|
[
"williams.lanardo@gmail.com"
] |
williams.lanardo@gmail.com
|
e1bea179688f6a672cd83a7f2b9f861bbb702425
|
4b5c21db88a80fcca418c0c8b431d93774d9336a
|
/envfarmaciaveterinaria/Scripts/viewer.py
|
fb21137537dab877fe83180aef6958ef73bea3df
|
[] |
no_license
|
laMoradaPostrera/FarmaciaVeterinariaUnillanos
|
e9620b1b108ab53956a50e754dd7f339e237f150
|
2312ccee591c4991c3ee0627ea4815de65e7a1eb
|
refs/heads/master
| 2020-10-01T23:43:19.395012
| 2018-06-14T05:15:36
| 2018-06-14T05:15:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,076
|
py
|
#!c:\users\lenovo~1\mispro~1\unilla~1\farmac~2\envfar~1\scripts\python.exe
#
# The Python Imaging Library
# $Id$
#
from __future__ import print_function
import sys
if sys.version_info[0] > 2:
import tkinter
else:
import Tkinter as tkinter
from PIL import Image, ImageTk
#
# an image viewer
class UI(tkinter.Label):
def __init__(self, master, im):
if im.mode == "1":
# bitmap image
self.image = ImageTk.BitmapImage(im, foreground="white")
tkinter.Label.__init__(self, master, image=self.image, bd=0,
bg="black")
else:
# photo image
self.image = ImageTk.PhotoImage(im)
tkinter.Label.__init__(self, master, image=self.image, bd=0)
#
# script interface
if __name__ == "__main__":
if not sys.argv[1:]:
print("Syntax: python viewer.py imagefile")
sys.exit(1)
filename = sys.argv[1]
root = tkinter.Tk()
root.title(filename)
im = Image.open(filename)
UI(root, im).pack()
root.mainloop()
|
[
"diegoasencio96@gmail.com"
] |
diegoasencio96@gmail.com
|
e9f935855c936f7be736e9cada0f8dfb9d5cbf2c
|
6f444f025f27a10dd7b1bf61083ea2832ffcb196
|
/backend/location/api/v1/serializers.py
|
f4a37f977e26a6abd08e6dffcee6108c10dadd98
|
[] |
no_license
|
crowdbotics-apps/ledger-wallet-29295
|
2fe0eee9e06cb1f5c8e514ad650df8276aac789b
|
d96542a71685ce6d335882c10cf840355c8252f7
|
refs/heads/master
| 2023-06-24T00:46:30.889717
| 2021-07-30T20:37:03
| 2021-07-30T20:37:03
| 391,182,590
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 669
|
py
|
from rest_framework import serializers
from location.models import TaskLocation, CustomerLocation, TaskerLocation, MapLocation
class CustomerLocationSerializer(serializers.ModelSerializer):
class Meta:
model = CustomerLocation
fields = "__all__"
class MapLocationSerializer(serializers.ModelSerializer):
class Meta:
model = MapLocation
fields = "__all__"
class TaskerLocationSerializer(serializers.ModelSerializer):
class Meta:
model = TaskerLocation
fields = "__all__"
class TaskLocationSerializer(serializers.ModelSerializer):
class Meta:
model = TaskLocation
fields = "__all__"
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
f8a4758b89fce2ae01dfdac0c57540060c9a0e3f
|
1f416c5f06c6ccf14e0f9778e52a2e556e6888b7
|
/Config/ssd_config.py
|
73fd796b7d333e67b702743a2931a59f96af67bd
|
[] |
no_license
|
LLLibra/yolo-v3
|
0a8961b3e7c8d099174c72685176b82c3e627f59
|
6dedf28f5b9d07cb609dc1c91119c328d02b6e17
|
refs/heads/master
| 2020-09-24T17:54:22.993738
| 2019-12-28T08:04:11
| 2019-12-28T08:04:11
| 225,812,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,108
|
py
|
# -*- coding:UTF-8 -*-
from utils.ssd_loss import *
from Model.SSD import build_SSD
extras = {
'300': [[1024,256,512],[512,128,256],[256,128,256],[256,128,256]],
'512': [],
}
mbox = {
'300': [4, 6, 6, 6, 4, 4], # 最终特征图中每个点有多少个box
'512': [],
}
##SSD 300 config
voc = {
'num_classes': 21,
'feature_maps':[38,19,10,5,3,1],
'min_dim':300,
'img_size':300,
'xywh':False,
'steps':[8,16,32,64,100,300],
'min_sizes':[30,60,111,162,216,264],
'max_sizes':[60,111,162,213,264,315],
'aspect_ratio':[[2],[2,3],[2,3],[2,3],[2],[2]],
'variance':[0.1,0.2],
'clip':True,
'name':'VOC',
}
coco = {
'num_classes': 201,
'lr_steps': (280000, 360000, 400000),
'max_iter': 400000,
'feature_maps': [38, 19, 10, 5, 3, 1],
'min_dim': 300,
'img_size':300,
'steps': [8, 16, 32, 64, 100, 300],
'min_sizes': [21, 45, 99, 153, 207, 261],
'max_sizes': [45, 99, 153, 207, 261, 315],
'aspect_ratios': [[2], [2, 3], [2, 3], [2, 3], [2], [2]],
'variance': [0.1, 0.2],
'clip': True,
'name': 'COCO',
}
|
[
"245407754@qq.com"
] |
245407754@qq.com
|
4fe2cf1c3b72558c9e48a68238cf6abf7425b930
|
073e5c1775886ec42ed741378e682534e79bb856
|
/kdb/MTS_patch.py
|
3ff2e1324c47467096b8afed52a839baa28a898d
|
[] |
no_license
|
tt9024/huan
|
97edd01e280651720a7556ff75dd64cc91184a04
|
48dcc7ef0ea40902e33bc67faf0298736a3ebe6b
|
refs/heads/master
| 2023-07-26T12:30:53.116852
| 2023-07-11T02:30:14
| 2023-07-11T02:30:14
| 134,997,307
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,463
|
py
|
import numpy as np
import repo_dbar as repo
import l1
import os
import glob
import dill
# patch vbs of various barseconds
def patch_vbs(dbar, day, utc, vbs, barsec):
bar, col, bs = dbar.load_day(day)
if bar is None or len(bar)==0:
print('problem getting bars from repo on ', day)
return
# make sure it's a multiple
bs_mul = barsec//bs
if bs_mul*bs != barsec:
print('barsec ', barsec, ' is not a multiple of repo barsec ', bs, ' on ', day)
return
utc_bs = dbar._make_daily_utc(day, barsec)
nbar = len(utc)
ix = np.clip(np.searchsorted(utc, utc_bs),0,nbar-1)
ixz = np.nonzero(utc[ix] == utc_bs)[0]
if len(ixz) == 0:
print('nothing found in repo on ', day)
return
# reuse the existing if not provided, but aggregated at barsec
#vbs_bs = np.zeros(len(utc_bs))
vbs_bs = np.sum(bar[:,repo.vbsc].reshape((len(utc_bs),bs_mul)),axis=1)
vbs_bs[ixz] = vbs[ix][ixz]
# calculate the weight to be vol within the barsec
vbs0 = bar[:,repo.volc].reshape((len(utc_bs),bs_mul))
vbs0 = (vbs0.T/np.sum(vbs0,axis=1)).T
vbs0[np.isinf(vbs0)] = 1.0/bs_mul
vbs0[np.isnan(vbs0)] = 1.0/bs_mul
vbs_bs0 = (vbs0.T*vbs_bs).T.reshape((len(utc_bs)*bs_mul,1))
# write this day back
dbar.overwrite([vbs_bs0], [day], [[repo.vbsc]], bs)
print('!!DONE ', day)
def update_array(dbar, vbs_array, barsec):
"""
vbs_array shape [nndays, 2], of utc and vbs
"""
nndays, nc = vbs_array.shape
assert nc == 2, 'vbs_array expected shape 2 (utc,vbs)'
utc=vbs_array[:,0]
vbs=vbs_array[:,1]
assert utc[1]-utc[0] == barsec, 'barsec mismatch! ' + str((utc[1]-utc[0],barsec))
start_day = l1.trd_day(vbs_array[0,0])
end_day = l1.trd_day(vbs_array[-1,0])
tdi = l1.TradingDayIterator(start_day)
day = tdi.yyyymmdd()
while day != end_day:
patch_vbs(dbar, day, utc, vbs, barsec)
tdi.next()
day = tdi.yyyymmdd()
def update_array_path(array_path='/home/bfu/kisco/kr/vbs/2021_1125_2022_0114', barsec=15, repo_path = '/home/bfu/kisco/kr/repo'):
os.system('gunzip ' + os.path.join(array_path,'*.npy.gz'))
fn = glob.glob(os.path.join(array_path, '*.npy'))
for f in fn:
print('processing ', f)
# expect file name as CL.npy
symbol = f.split('/')[-1].split('.')[0]
vsarr = np.load(open(f,'rb'))
dbar = repo.RepoDailyBar(symbol, repo_path=repo_path)
update_array(dbar, vsarr, barsec)
def update_dict(dict_file, barsec, repo_path='/home/bfu/kisco/kr/repo', symbol_list=None):
"""dict: {symbol : { 'utc': shape [ndays,2], 'vbs': shape [ndays, n] } }
where utc has each day's first/last utc
the barsec is given for verification purpose: barsec = (utc1-utc0)/n
"""
d = dill.load(open(dict_file, 'rb'))
for symbol in d.keys():
if symbol_list is not None:
if symbol not in symbol_list:
continue
utc=d[symbol]['utc']
vbs=d[symbol]['vbs']
ndays, nc = utc.shape
assert nc==2, 'utc shape not 2 for ' + symbol
print('got ',ndays,' for ', symbol)
dbar = repo.RepoDailyBar(symbol, repo_path=repo_path)
for u, v in zip(utc, vbs):
(u0,u1)=u
day = l1.trd_day(u0)
# LCO could have utc up until 18:00
# turn it on when fixed in mts_repo
#assert day == l1.trd_day(u1), 'not same trade day for %s on %d: %f-%f'%(symbol, day, u0, u1)
utc0 = np.arange(u0,u1+barsec,barsec).astype(int)
n = len(v)
assert len(utc0)==n, 'vbs shape mismatch with utc for %s on %s: %d-%d'%(symbol, day, (u1-u0)//barsec,n)
print('process %s on %s'%(symbol, day))
patch_vbs(dbar, day, utc0, v, barsec)
def update_dict_all():
# a scripted update, modify as needed
# the 2 _N1 from 20220223 to 20220415 with barsec=5
path = '/home/bfu/kisco/kr/vbs/update_0415'
dict_files = ['0223_0302_5s.dill', '0303_0415_5s.dill']
barsec=5
repo_path = '/home/bfu/kisco/kr/repo'
for df in dict_files:
update_dict(os.path.join(path, df), barsec, repo_path=repo_path)
# the _N2 from 20211125 to 20220415 with barsec=30
dict_files = ['20211125_2022_0415_N2_30s.dill']
barsec=30
repo_path = '/home/bfu/kisco/kr/repo_nc'
for df in dict_files:
update_dict(os.path.join(path, df), barsec, repo_path=repo_path)
|
[
"joy@joy.com"
] |
joy@joy.com
|
81f726744a38d25f6099ad36107663ac8a5d3212
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/synthetic/stdlib-big-2805.py
|
b2c7ae07ef65cab60cc16a7073cc6a18c9d869b1
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,992
|
py
|
# ChocoPy library functions
def int_to_str(x: int) -> str:
digits:[str] = None
result:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def int_to_str2(x: int, x2: int) -> str:
digits:[str] = None
digits2:[str] = None
result:str = ""
result2:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def int_to_str3(x: int, x2: int, x3: int) -> str:
digits:[str] = None
digits2:[str] = None
digits3:[str] = None
result:str = ""
result2:str = ""
result3:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def int_to_str4(x: int, x2: int, x3: int, x4: int) -> str:
digits:[str] = None
digits2:[str] = None
digits3:[str] = None
digits4:[str] = None
result:str = ""
result2:str = ""
result3:str = ""
result4:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def int_to_str5(x: int, x2: int, x3: int, x4: int, x5: int) -> str:
digits:[str] = None
digits2:[str] = None
digits3:[str] = None
digits4:[str] = None
digits5:[str] = None
result:str = ""
result2:str = ""
result3:str = ""
result4:str = ""
result5:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def str_to_int(x: str) -> int:
result:int = 0
digit:int = 0
char:str = ""
sign:int = 1
first_char:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
def str_to_int2(x: str, x2: str) -> int:
result:int = 0
result2:int = 0
digit:int = 0
digit2:int = 0
char:str = ""
char2:str = ""
sign:int = 1
sign2:int = 1
first_char:bool = True
first_char2:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
def str_to_int3(x: str, x2: str, x3: str) -> int:
result:int = 0
result2:int = 0
result3:int = 0
digit:int = 0
digit2:int = 0
digit3:int = 0
char:str = ""
char2:str = ""
char3:str = ""
sign:int = 1
sign2:int = 1
sign3:int = 1
first_char:bool = True
first_char2:bool = True
first_char3:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
def str_to_int4(x: str, x2: str, x3: str, x4: str) -> int:
result:int = 0
result2:int = 0
result3:int = 0
result4:int = 0
digit:int = 0
digit2:int = 0
digit3:int = 0
digit4:int = 0
char:str = ""
char2:str = ""
char3:str = ""
char4:str = ""
sign:int = 1
sign2:int = 1
sign3:int = 1
sign4:int = 1
first_char:bool = True
first_char2:bool = True
first_char3:bool = True
first_char4:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
def str_to_int5(x: str, x2: str, x3: str, x4: str, x5: str) -> int:
result:int = 0
result2:int = 0
result3:int = 0
result4:int = 0
result5:int = 0
digit:int = 0
digit2:int = 0
digit3:int = 0
digit4:int = 0
digit5:int = 0
char:str = ""
char2:str = ""
char3:str = ""
char4:str = ""
char5:str = ""
sign:int = 1
sign2:int = 1
sign3:int = 1
sign4:int = 1
sign5:int = 1
first_char:bool = True
first_char2:bool = True
$TypedVar = True
first_char4:bool = True
first_char5:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
# Input parameters
c:int = 42
c2:int = 42
c3:int = 42
c4:int = 42
c5:int = 42
n:int = 10
n2:int = 10
n3:int = 10
n4:int = 10
n5:int = 10
# Run [-nc, nc] with step size c
s:str = ""
s2:str = ""
s3:str = ""
s4:str = ""
s5:str = ""
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
i = -n * c
# Crunch
while i <= n * c:
s = int_to_str(i)
print(s)
i = str_to_int(s) + c
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
7a8949c381732e07d4f492876a8541503b3d5db0
|
8f7b755a7d21332ae26a9d2e59dc0da00da8affb
|
/euler-081.py
|
ab42d76c52e73ce1d22a8a8550abf38c61c371fb
|
[] |
no_license
|
msbelal/Project-Euler
|
95204d1ea455f45a49e9ce517d427db80fe15e36
|
1eda6b8a1786f0613023193d3dcde3090edaac9a
|
refs/heads/master
| 2020-04-12T12:07:41.921989
| 2012-04-01T15:41:12
| 2012-04-01T15:41:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 460
|
py
|
from __future__ import with_statement
with open ("matrix-81.txt") as f:
lines = [ line.strip().split(",") for line in f.readlines() ]
q = {}
for i in xrange(159) :
for j in xrange(0,i+1) :
x, y = j, i - j
if (0 <= x < 80) and (0 <= y < 80) :
if x == 0 and y == 0:
q[x,y] = 0
elif x == 0 :
q[x,y] = q[x,y-1]
elif y == 0 :
q[x,y] = q[x-1,y]
else :
q[x,y] = min(q[x-1,y], q[x, y-1])
q[x,y] += int(lines[x][y])
print q[79,79]
|
[
"hughdbrown@.(none)"
] |
hughdbrown@.(none)
|
4120b422aab2e14830f4047978f9995ac06fa5c4
|
430722ea44c3704706f506554bb3ce64a7ee6596
|
/tests/image/test_backbones.py
|
60369275557cfe450f81d029b1bc533270f6f016
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
ethanwharris/lightning-flash
|
462907c7e7dbbbf0840917ae72a67b5ddbdc7138
|
48bdfd86639aa4aad493d264cd8a6eeeb50a394f
|
refs/heads/master
| 2023-06-07T10:06:06.672478
| 2021-07-12T11:55:15
| 2021-07-12T11:55:15
| 385,288,776
| 0
| 0
|
Apache-2.0
| 2021-07-12T15:09:39
| 2021-07-12T15:09:39
| null |
UTF-8
|
Python
| false
| false
| 2,080
|
py
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urllib.error
import pytest
from pytorch_lightning.utilities import _TORCHVISION_AVAILABLE
from flash.core.utilities.imports import _BOLTS_AVAILABLE, _TIMM_AVAILABLE
from flash.image.backbones import catch_url_error, IMAGE_CLASSIFIER_BACKBONES
@pytest.mark.parametrize(["backbone", "expected_num_features"], [
pytest.param("resnet34", 512, marks=pytest.mark.skipif(not _TORCHVISION_AVAILABLE, reason="No torchvision")),
pytest.param("mobilenetv2_100", 1280, marks=pytest.mark.skipif(not _TIMM_AVAILABLE, reason="No timm")),
pytest.param("simclr-imagenet", 2048, marks=pytest.mark.skipif(not _BOLTS_AVAILABLE, reason="No bolts")),
pytest.param("swav-imagenet", 2048, marks=pytest.mark.skipif(not _BOLTS_AVAILABLE, reason="No bolts")),
pytest.param("mobilenet_v2", 1280, marks=pytest.mark.skipif(not _TORCHVISION_AVAILABLE, reason="No torchvision")),
])
def test_image_classifier_backbones_registry(backbone, expected_num_features):
backbone_fn = IMAGE_CLASSIFIER_BACKBONES.get(backbone)
backbone_model, num_features = backbone_fn(pretrained=False)
assert backbone_model
assert num_features == expected_num_features
def test_pretrained_backbones_catch_url_error():
def raise_error_if_pretrained(pretrained=False):
if pretrained:
raise urllib.error.URLError('Test error')
with pytest.warns(UserWarning, match="Failed to download pretrained weights"):
catch_url_error(raise_error_if_pretrained)(pretrained=True)
|
[
"noreply@github.com"
] |
noreply@github.com
|
820ed298b2d0d51b64a647c759fec6a4a95c79e1
|
0c4b33d04cf7fb73b3752b03af89eeaf76b8a0d2
|
/第14章-网络编程/client.py
|
93a57207689113ca5cbd684fb77a81dba69d2db4
|
[] |
no_license
|
kingflyfly/python_study
|
3b3ab427d23174b61b8f14c223059cfa9f303219
|
8a63a7c11b408bbc11a2b636517beaa424b37725
|
refs/heads/master
| 2020-06-11T01:39:52.655730
| 2020-03-24T16:09:39
| 2020-03-24T16:09:39
| 193,817,757
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 275
|
py
|
import socket
import sys
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = socket.gethostname()
port = 9992
# 连接服务,指定主机和端口
s.connect((host, port))
# 接收小于 1024 字节的数据
msg = s.recv(1024)
s.close()
print (msg.decode('utf-8'))
|
[
"542001608@qq.com"
] |
542001608@qq.com
|
096c2a0a7401aae836823744ed882e946775d8c3
|
74309d28c3c966ab46fe1d7bd7c6d6ca9e7009d4
|
/setup.py
|
86192f497fb7f45cf50128f2fc1870d69363a8a8
|
[
"MIT"
] |
permissive
|
seporaitis/graphqlpy
|
c476b4632c3d117a95663ee88d1710a4999f22e7
|
c16623a00a851a785eaef7b27a72c35d49b0c4a4
|
refs/heads/master
| 2023-01-05T06:52:14.647528
| 2017-09-07T20:56:48
| 2017-09-07T20:56:48
| 102,777,202
| 1
| 0
|
MIT
| 2022-12-26T19:45:27
| 2017-09-07T19:25:20
|
Python
|
UTF-8
|
Python
| false
| false
| 1,497
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
from setuptools import find_packages, setup
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
version = get_version('graphqlpy')
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
requirements = []
test_requirements = []
setup(
name='graphqlpy',
version=version,
description="A humble attempt at a library generating GraphQL queries programatically.",
long_description=readme + '\n\n' + history,
author="Julius Seporaitis",
author_email='julius@seporaitis.net',
url='https://github.com/seporaitis/graphqlpy',
packages=find_packages(exclude=['tests', 'tests.*']),
package_dir={
'graphqlpy': 'graphqlpy',
},
include_package_data=True,
install_requires=requirements,
license="MIT",
zip_safe=False,
keywords='graphql',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3.6',
],
test_suite='tests',
tests_require=test_requirements
)
|
[
"julius@seporaitis.net"
] |
julius@seporaitis.net
|
5258a863174e0b8e1845a0504b06107374ae09f5
|
357eaed9e37bc97357261098e06a1219cfba73ff
|
/fpeg_helion/wsgi.py
|
9cc2d70e23adb4bb095d5e1310b1c62a795b0075
|
[
"MIT"
] |
permissive
|
andrewschreiber/fpeg
|
7acc6d39b7eb3fb6e662a5ac205f38f5372a3680
|
16fc3afb795040aea2e41216d6a9f88cedff2ba4
|
refs/heads/master
| 2021-07-12T09:19:31.461056
| 2020-06-30T21:43:17
| 2020-06-30T21:43:17
| 30,467,410
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 937
|
py
|
import bottle
from bottle import route, request, post, template
import logging
import json
import os
logging.basicConfig()
log = logging.getLogger("fpeg")
log.setLevel(logging.DEBUG)
STATIC_ROOT = os.path.join(os.path.dirname(__file__), 'static')
@route('/')
def home():
bottle.TEMPLATE_PATH.insert(0, './views')
return bottle.template('home', sent=False, body=None)
@post('/compress')
def compress():
data = request.files.get("upload")
if data and data.file:
raw = data.file.read()
filename = data.filename
log.debug("uploaded {} ({} bytes).".format(filename, len(raw)))
else:
log.error("upload failed")
@route('/static/:filename')
def serve_static(filename):
log.debug("serving static assets")
return bottle.static_file(filename, root=STATIC_ROOT)
application = bottle.app()
application.catchall = False
bottle.run(application, host='0.0.0.0', port=os.getenv('PORT', 8080))
|
[
"Andrew Stocker"
] |
Andrew Stocker
|
ace0c793df344ee3d16d8b97ce61547ac0670a0d
|
7accb98587c694db57507468525261458e707138
|
/fabfile.py
|
f12579758e7aff044a31e3975c7fa50ea643997a
|
[] |
no_license
|
kotechkice/kicekriea
|
47f6ce4b9fa162b3dafe8dda45c640876a3e4aeb
|
6457e97aeea13f768488287abc4a8afcf40f8131
|
refs/heads/master
| 2021-01-01T03:46:39.199835
| 2016-04-26T05:57:16
| 2016-04-26T05:57:16
| 57,111,058
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| true
| false
| 22,058
|
py
|
from __future__ import print_function, unicode_literals, with_statement
from future.builtins import input, open
import os
import re
import sys
from functools import wraps
from getpass import getpass, getuser
from glob import glob
from contextlib import contextmanager
from posixpath import join
from os.path import basename, dirname
from fabric.api import env, cd, prefix, sudo as _sudo, run as _run, hide, task
#from fabric.api import settings
from fabric.api import puts
from fabric.contrib.files import exists, upload_template
from fabric.colors import yellow, green, blue, red
from fabric.utils import warn
import pdb
###############
# Fab Command #
###############
#fab command
#fab install
################
# Config setup #
################
conf = {}
if sys.argv[0].split(os.sep)[-1] in ("fab", "fab-script.py"):
# Ensure we import settings from the current dir
try:
#conf = __import__("settings", globals(), locals(), [], 0).FABRIC
#conf = __import__("project.settings", globals(), locals(), [], 0).FABRIC
from project import settings
conf = settings.FABRIC
try:
conf["HOSTS"][0]
except (KeyError, ValueError):
raise ImportError
except (ImportError, AttributeError):
print("Aborting, no hosts defined.")
exit()
env.db_pass = conf.get("DB_PASS", None)
env.db_root_pass = env.db_pass
#env.admin_pass = conf.get("ADMIN_PASS", None)
env.user = conf.get("SSH_USER", getuser())
env.password = conf.get("SSH_PASS", None)
env.key_filename = conf.get("SSH_KEY_PATH", None)
env.hosts = conf.get("HOSTS", [""])
env.proj_name = conf.get("PROJECT_NAME", os.getcwd().split(os.sep)[-1])
env.venv_home = conf.get("VIRTUALENV_HOME", "/home/%s" % env.user)
env.venv_path = "%s/%s" % (env.venv_home, env.proj_name)
env.proj_dirname = "project"
env.proj_path = "%s/%s" % (env.venv_path, env.proj_dirname)
env.manage = "%s/bin/python %s/project/manage.py" % ((env.venv_path,) * 2)
env.domains = conf.get("DOMAINS", [conf.get("LIVE_HOSTNAME", env.hosts[0])])
env.domains_nginx = " ".join(env.domains)
env.domains_python = ", ".join(["'%s'" % s for s in env.domains])
env.ssl_disabled = "#" if len(env.domains) > 1 else ""
env.repo_url = conf.get("REPO_URL", "")
env.git = env.repo_url.startswith("git") or env.repo_url.endswith(".git")
env.reqs_path = conf.get("REQUIREMENTS_PATH", None)
env.locale = conf.get("LOCALE", "en_US.UTF-8")
env.secret_key = conf.get("SECRET_KEY", "")
env.nevercache_key = conf.get("NEVERCACHE_KEY", "")
env.django_user = conf.get("DJANGO_USER", "duser")
env.django_user_group = env.django_user
env.django_project_settings = "settings"
env.gunicorn_workers = 2
env.gunicorn_logfile = '%(venv_path)s/logs/projects/%(proj_name)s_gunicorn.log' % env
#env.rungunicorn_script = '%(venv_path)s/scripts/rungunicorn_%(proj_name)s.sh' % env
env.rungunicorn_script = '%(venv_path)s/bin/gunicorn_start' % env
env.gunicorn_worker_class = "eventlet"
env.gunicorn_loglevel = "info"
env.gunicorn_port = conf.get("GUNICORN_PORT", 8000)
env.supervisor_program_name = env.proj_name
env.supervisorctl = '/usr/bin/supervisorctl'
env.supervisor_autostart = 'true'
env.supervisor_autorestart = 'true'
env.supervisor_redirect_stderr = 'true'
env.supervisor_stdout_logfile = '%(venv_path)s/logs/projects/supervisord_%(proj_name)s.log' % env
#env.supervisord_conf_file = '%(venv_path)s/configs/supervisord/%(proj_name)s.conf' % env
env.supervisord_conf_file = '/etc/supervisor/conf.d/%(proj_name)s.conf' % env
##################
# Template setup #
##################
# Each template gets uploaded at deploy time, only if their
# contents has changed, in which case, the reload command is
# also run.
templates = {
"nginx": {
"local_path": "deploy/nginx.conf",
"remote_path": "/etc/nginx/sites-enabled/%(proj_name)s.conf",
},
"supervisor": {
"local_path": "deploy/supervisord.conf",
"remote_path": env.supervisord_conf_file,
},
"cron": {
"local_path": "deploy/crontab",
"remote_path": "/etc/cron.d/%(proj_name)s",
"owner": "root",
"mode": "600",
},
"gunicorn": {
"local_path": "deploy/gunicorn_start",
"remote_path": "%(venv_path)s/bin/gunicorn_start",
},
"settings": {
"local_path": "deploy/local_settings",
"remote_path": "%(proj_path)s/project/local_settings.py",
},
"mysql": {
"local_path": "deploy/mysql.cnf",
"remote_path": "/etc/mysql/my.cnf",
}
}
######################################
# Context for virtualenv and project #
######################################
@contextmanager
def virtualenv():
"""
Runs commands within the project's virtualenv.
"""
with cd(env.venv_path):
with prefix("source %s/bin/activate" % env.venv_path):
yield
@contextmanager
def project():
"""
Runs commands within the project's directory.
"""
with virtualenv():
with cd(env.proj_dirname):
yield
@contextmanager
def update_changed_requirements():
"""
Checks for changes in the requirements file across an update,
and gets new requirements if changes have occurred.
"""
reqs_path = join(env.proj_path, env.reqs_path)
get_reqs = lambda: run("cat %s" % reqs_path, show=False)
old_reqs = get_reqs() if env.reqs_path else ""
yield
if old_reqs:
new_reqs = get_reqs()
if old_reqs == new_reqs:
# Unpinned requirements should always be checked.
for req in new_reqs.split("\n"):
if req.startswith("-e"):
if "@" not in req:
# Editable requirement without pinned commit.
break
elif req.strip() and not req.startswith("#"):
if not set(">=<") & set(req):
# PyPI requirement without version.
break
else:
# All requirements are pinned.
return
pip("-r %s/%s" % (env.proj_path, env.reqs_path))
###########################################
# Utils and wrappers for various commands #
###########################################
def _print(output):
print()
print(output)
print()
def print_command(command):
_print(blue("$ ", bold=True) +
yellow(command, bold=True) +
red(" ->", bold=True))
@task
def run(command, show=True):
"""
Runs a shell comand on the remote server.
"""
if show:
print_command(command)
with hide("running"):
return _run(command)
@task
def sudo(command, show=True):
"""
Runs a command as sudo.
"""
if show:
print_command(command)
with hide("running"):
return _sudo(command)
def log_call(func):
@wraps(func)
def logged(*args, **kawrgs):
header = "-" * len(func.__name__)
_print(green("\n".join([header, func.__name__, header]), bold=True))
return func(*args, **kawrgs)
return logged
def get_templates():
"""
Returns each of the templates with env vars injected.
"""
injected = {}
for name, data in templates.items():
injected[name] = dict([(k, v % env) for k, v in data.items()])
return injected
def upload_template_and_reload(name):
"""
Uploads a template only if it has changed, and if so, reload a
related service.
"""
template = get_templates()[name]
local_path = template["local_path"]
if not os.path.exists(local_path):
project_root = os.path.dirname(os.path.abspath(__file__))
local_path = os.path.join(project_root, local_path)
remote_path = template["remote_path"]
reload_command = template.get("reload_command")
owner = template.get("owner")
mode = template.get("mode")
remote_data = ""
if exists(remote_path):
with hide("stdout"):
remote_data = sudo("cat %s" % remote_path, show=False)
with open(local_path, "r") as f:
local_data = f.read()
# Escape all non-string-formatting-placeholder occurrences of '%':
local_data = re.sub(r"%(?!\(\w+\)s)", "%%", local_data)
if "%(db_pass)s" in local_data:
env.db_pass = db_pass()
local_data %= env
clean = lambda s: s.replace("\n", "").replace("\r", "").strip()
if clean(remote_data) == clean(local_data):
return
upload_template(local_path, remote_path, env, use_sudo=True, backup=False)
if owner:
sudo("chown %s %s" % (owner, remote_path))
if mode:
sudo("chmod %s %s" % (mode, remote_path))
if reload_command:
sudo(reload_command)
def db_pass():
"""
Prompts for the database password if unknown.
"""
if not env.db_pass:
env.db_pass = getpass("Enter the database password: ")
return env.db_pass
@task
def apt(packages):
"""
Installs one or more system packages via apt.
"""
return sudo("apt-get install -y -q " + packages)
@task
def pip(packages):
"""
Installs one or more Python packages within the virtual environment.
"""
with virtualenv():
return sudo("pip install %s" % packages)
def postgres(command):
"""
Runs the given command as the postgres user.
"""
show = not command.startswith("psql")
return run("sudo -u root sudo -u postgres %s" % command, show=show)
@task
def psql(sql, show=True):
"""
Runs SQL against the project's database.
"""
out = postgres('psql -c "%s"' % sql)
if show:
print_command(sql)
return out
@task
def backup(filename):
"""
Backs up the database.
"""
return postgres("pg_dump -Fc %s > %s" % (env.proj_name, filename))
@task
def restore(filename):
"""
Restores the database.
"""
return postgres("pg_restore -c -d %s %s" % (env.proj_name, filename))
@task
def python(code, show=True):
"""
Runs Python code in the project's virtual environment, with Django loaded.
"""
#pdb.set_trace()
setup = "import os; os.environ[\'DJANGO_SETTINGS_MODULE\']=\'settings\';"
full_code = 'python -c "%s%s"' % (setup, code.replace("`", "\\\`"))
with project():
result = run(full_code, show=False)
if show:
print_command(code)
return result
def static():
"""
Returns the live STATIC_ROOT directory.
"""
return python("from django.conf import settings;"
"print settings.STATIC_ROOT", show=False).split("\n")[-1]
@task
def manage(command):
"""
Runs a Django management command.
"""
return run("%s %s" % (env.manage, command))
#########################
# Install and configure #
#########################
@task
@log_call
def all():
"""
Installs everything required on a new system and deploy.
From the base software, up to the deployed project.
"""
install()
create_virtualenv()
create_SSH()
create_git()
#create_DB()
set_SSL()
create_django_user()
set_password_django_user()
upload_rungunicorn_script()
upload_supervisord_conf()
create_nginx()
set_project()
@task
@log_call
def install():
"""
Installs the base system and Python requirements for the entire server.
"""
#locale = "LC_ALL=%s" % env.locale
#with hide("stdout"):
# if locale not in sudo("cat /etc/default/locale"):
# sudo("update-locale %s" % locale)
# run("exit")
sudo("apt-get update -y -q")
apt("nginx libjpeg-dev python-dev python-setuptools git-core "
"libpq-dev memcached supervisor")
#apt("mysql-server mysql-client")
apt("openssh-server libev-dev python-all-dev build-essential")
apt("debconf-utils")
sudo("easy_install pip")
#sudo("pip install virtualenv mercurial")
apt("python-virtualenv virtualenvwrapper")
#sudo("apt-get install -y python-virtualenv virtualenvwrapper")
@task
@log_call
def create_virtualenv():
"""
Create a new virtual environment & git.
"""
#pdb.set_trace()
if not exists(env.venv_home):
run("mkdir %s" % env.venv_home)
with cd(env.venv_home):
if exists(env.proj_name):
prompt = input("\nVirtualenv exists: %s"
"\nWould you like to replace it? (yes/no) "
% env.proj_name)
if prompt.lower() != "yes":
print("\nAborting!")
return False
remove()
run("export WORKON_HOME=$HOME/.virtualenvs")
run("export PIP_VIRTUALENV_BASE=$WORKON_HOME")
run("source /usr/share/virtualenvwrapper/virtualenvwrapper.sh && mkvirtualenv %s"% env.proj_name)
@task
@log_call
def create_SSH():
"""
Create a new ssh key.
"""
#pdb.set_trace()
ssh_path = "/home/%s/.ssh" % env.user
if not exists(ssh_path):
run("mkdir %s" % env.ssh_path)
pub_path = ssh_path+"/id_rsa.pub"
with cd(ssh_path):
if not exists(pub_path):
run('ssh-keygen -t rsa')
run("cat %s"% pub_path)
input("\nSet SSH & Press Enter!")
@task
@log_call
def create_git():
"""
Create a new git.
"""
if not exists(env.venv_path):
print("\nVirtual env path isn't exists!")
return False
run("git clone %s %s" % (env.repo_url, env.proj_path))
def mysql_execute(sql, user, password):
""" Executes passed sql command using mysql shell. """
#user = user or env.conf.DB_USER
from fabric.api import prompt
sql = sql.replace('"', r'\"')
#if password == None:
# password = prompt('Please enter MySQL root password:')
return run('echo "%s" | mysql --user="%s" --password="%s"' % (sql, user , password))
@task
@log_call
def create_DB():
"""
Create DB and DB user.
"""
from fabric.api import settings, prompt
with settings(hide('warnings', 'stderr'), warn_only=True):
result = sudo('dpkg-query --show mysql-server')
if result.failed is False:
warn('MySQL is already installed')
else:
#sudo('echo "mysql-server-5.0 mysql-server/root_password password %s" | debconf-set-selections' % env.db_root_pass)
#sudo('echo "mysql-server-5.0 mysql-server/root_password_again password %s" | debconf-set-selections' % env.db_root_pass)
run('echo "mysql-server-5.0 mysql-server/root_password password %s" | sudo debconf-set-selections' % env.db_root_pass)
run('echo "mysql-server-5.0 mysql-server/root_password_again password %s" | sudo debconf-set-selections' % env.db_root_pass)
apt('mysql-server mysql-client')
upload_template_and_reload("mysql")
sql = 'CREATE DATABASE %(proj_name)s DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci' % env
mysql_execute(sql, 'root', env.db_root_pass)
sql = """CREATE USER '%(proj_name)s'@'%%' IDENTIFIED BY '%(db_pass)s';""" % env
#sql = """CREATE USER '%(proj_name)s'@'localhost' IDENTIFIED BY '%(db_pass)s';""" % env
mysql_execute(sql, 'root', env.db_root_pass)
sql = """GRANT ALL ON %(proj_name)s.* TO '%(proj_name)s'@'%%'; FLUSH PRIVILEGES;""" % env
#sql = """GRANT ALL ON %(proj_name)s.* TO '%(proj_name)s'@'localhost'; FLUSH PRIVILEGES;""" % env
mysql_execute(sql, 'root', env.db_root_pass)
sudo('service mysql restart')
@task
@log_call
def remove_DB():
"""
Remove DB and DB user.
"""
sql = 'DROP DATABASE %(proj_name)s' % env
mysql_execute(sql, 'root', env.db_root_pass)
sql = """DROP USER '%(proj_name)s';""" % env
mysql_execute(sql, 'root', env.db_root_pass)
sudo("service mysql stop")
sudo("apt-get remove -y --purge mysql-server mysql-client")
#sudo("netstat -tap | grep mysql")
sudo("apt-get remove -y --purge mysql-server*")
sudo("apt-get remove -y --purge mysql-client*")
@task
@log_call
def set_SSL():
"""
# Set up SSL certificate.
"""
if not env.ssl_disabled:
conf_path = "/etc/nginx/conf"
if not exists(conf_path):
sudo("mkdir %s" % conf_path)
with cd(conf_path):
crt_file = env.proj_name + ".crt"
key_file = env.proj_name + ".key"
if not exists(crt_file) and not exists(key_file):
try:
crt_local, = glob(join("deploy", "*.crt"))
key_local, = glob(join("deploy", "*.key"))
except ValueError:
parts = (crt_file, key_file, env.domains[0])
sudo("openssl req -new -x509 -nodes -out %s -keyout %s "
"-subj '/CN=%s' -days 3650" % parts)
else:
upload_template(crt_local, crt_file, use_sudo=True)
upload_template(key_local, key_file, use_sudo=True)
@task
@log_call
def migrate():
"""
migrate.
"""
manage('migrate')
@task
@log_call
def set_project():
"""
Set up project.
"""
upload_template_and_reload("settings")
with project():
if env.reqs_path:
pip("-r %s/%s" % (env.proj_path, env.reqs_path))
apt('libmysqlclient-dev')
pip("fabric django python-social-auth "
"gunicorn django-hosts mysql-python django-crontab pytz django-dbbackup")
manage('migrate')
manage('createsuperuser')
@task
@log_call
def create_django_user():
"""
create django user
"""
sudo('groupadd --system %(django_user)s' % env)
sudo('useradd --system --gid %(django_user)s --home %(venv_path)s %(django_user)s' % env)
sudo('chown -R %(django_user)s:%(django_user)s %(venv_path)s' % env)
sudo('chmod -R g+w %(venv_path)s' % env)
sudo('usermod -a -G %(django_user)s %(user)s' % env)
@task
@log_call
def set_password_django_user():
"""
set password django user
"""
sudo('passwd %(django_user)s' % env)
@task
@log_call
def upload_rungunicorn_script():
"""
upload rungunicorn conf
"""
sudo('mkdir -p %s' % dirname(env.gunicorn_logfile))
sudo('chown %s %s' % (env.django_user, dirname(env.gunicorn_logfile)))
sudo('chmod -R 775 %s' % dirname(env.gunicorn_logfile))
sudo('touch %s' % env.gunicorn_logfile)
sudo('chown %s %s' % (env.django_user, env.gunicorn_logfile))
sudo('mkdir -p %s' % dirname(env.rungunicorn_script))
upload_template_and_reload("gunicorn")
sudo('chmod u+x %s' % env.rungunicorn_script)
sudo('chown -R %(django_user)s:%(django_user)s %(rungunicorn_script)s' % env)
@task
@log_call
def upload_supervisord_conf():
''' upload supervisor conf '''
sudo('mkdir -p %s' % dirname(env.supervisor_stdout_logfile))
sudo('chown %s %s' % (env.django_user, dirname(env.supervisor_stdout_logfile)))
sudo('chmod -R 775 %s' % dirname(env.supervisor_stdout_logfile))
sudo('touch %s' % env.supervisor_stdout_logfile)
sudo('chown %s %s' % (env.django_user, env.supervisor_stdout_logfile))
sudo('mkdir -p %s' % dirname(env.supervisord_conf_file))
upload_template_and_reload("supervisor")
sudo('%(supervisorctl)s reread' % env)
sudo('%(supervisorctl)s update' % env)
@task
@log_call
def create_nginx():
'''
create nginx
'''
upload_template_and_reload("nginx")
sudo('unlink /etc/nginx/sites-enabled/default')
sudo("service nginx restart")
@task
@log_call
def restart():
"""
Restart gunicorn worker processes for the project.
"""
pid_path = "%s/gunicorn.pid" % env.proj_path
if exists(pid_path):
#sudo("kill -HUP `cat %s`" % pid_path)
#$sudo("kill -HUP $(cat %s)" % pid_path)
run("cat %s" % pid_path)
prompt = input("\npid number(upper number) : ")
sudo("kill -HUP %s" % prompt)
else:
start_args = (env.proj_name, env.proj_name)
sudo("supervisorctl start %s:gunicorn_%s" % start_args)
##########
# Deploy #
##########
@task
@log_call
def pull_git():
"""
run git pull
"""
with cd(env.proj_path):
run("git pull")
@task
@log_call
def collectstatic():
"""
collect static for mangae django
"""
manage('collectstatic')
@task
@log_call
def restart_supervisor():
"""
restart supervisor
"""
sudo("supervisorctl restart %(proj_name)s" % env)
@task
@log_call
def upload_local_settings():
"""
upload_local_settings
"""
upload_template_and_reload("settings")
@task
@log_call
def upload_nginx():
'''
create nginx
'''
upload_template_and_reload("nginx")
sudo("service nginx restart")
@task
@log_call
def deploy():
"""
Deploy latest version of the project.
Check out the latest version of the project from version
control, install new requirements, sync and migrate the database,
collect any new static assets, and restart gunicorn's work
processes for the project.
"""
for name in get_templates():
upload_template_and_reload(name)
with project():
#backup("last.db")
#static_dir = static()
#if exists(static_dir):
# run("tar -cf last.tar %s" % static_dir)
git = env.git
last_commit = "git rev-parse HEAD"
run("%s > last.commit" % last_commit)
with update_changed_requirements():
run("git pull origin master -f")
#manage("collectstatic -v 0 --noinput")
#manage("syncdb --noinput")
#manage("migrate --noinput")
restart()
return True
@task
@log_call
def remove():
"""
Blow away the current project.
"""
if exists(env.venv_path):
sudo("rm -rf %s" % env.venv_path)
#for template in get_templates().values():
# remote_path = template["remote_path"]
# if exists(remote_path):
# sudo("rm %s" % remote_path)
#psql("DROP DATABASE IF EXISTS %s;" % env.proj_name)
#psql("DROP USER IF EXISTS %s;" % env.proj_name)
|
[
"wogud86@gmail.com"
] |
wogud86@gmail.com
|
136b1182e8e9b3bb6006d82097af6a64457a1413
|
817965ef6ee70672eabedbbafe336ca07d6443ff
|
/0x0B-python-input_output/8-load_from_json_file.py
|
34f8ae593948ca8fc24e3410cf357a351c626b5f
|
[] |
no_license
|
julianfrancor/holbertonschool-higher_level_programming
|
f021086eb2a86b366c391452b13581c87587a3a8
|
bd2a291c725ba09d88e9a629d0b22cf4ed7122e7
|
refs/heads/master
| 2022-12-23T05:27:27.942300
| 2020-09-24T21:22:56
| 2020-09-24T21:22:56
| 257,935,813
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 662
|
py
|
#!/usr/bin/python3
"""
function that creates an Object from a “JSON file”
"""
import json
def load_from_json_file(filename):
"""
Args
filename: JSON file form where the string
is going to be read
json.dumps() method can convert a Python object into a JSON string.
json.dump() method can be used to write to file a JSON file directly.
can Write in an open file
json.loads() expects to get its text from a string object
json.load() expects to get the text from a file
can Read from an open file an convert
"""
with open(filename, mode="r", encoding="UTF8") as file:
return json.load(file)
|
[
"julianfrancor@gmail.com"
] |
julianfrancor@gmail.com
|
0fd5b6297580acd6887f5d68daa551292c7e1c7a
|
ab3e6cee73c76e1bda1ac8d4e9cb82286de757fe
|
/190605_Day7_Class_Model_CRUD/django/crud_review/boards/migrations/0001_initial.py
|
998b0a6ce4811fcda7f8db94df926fc4d27a1226
|
[] |
no_license
|
arara90/TIL_django
|
cc961535feba95e55d531c90a5d274cb5ec5f02e
|
5aa5fcb839dceb0abc9c5b09fdcb5a478dca34f4
|
refs/heads/master
| 2020-05-27T20:15:46.663200
| 2020-03-21T06:13:40
| 2020-03-21T06:13:40
| 188,775,950
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 676
|
py
|
# Generated by Django 2.2.1 on 2019-06-10 05:49
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Board',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=15)),
('content', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
|
[
"arara90@hotmail.com"
] |
arara90@hotmail.com
|
73d6c871a33247c5a769ff502a2741d904f94c16
|
8813753442439c5408db80ed07a97f54ee90a115
|
/check_memavail.py
|
3a7da71db15642d156598096f164d05ee7b87032
|
[
"Unlicense"
] |
permissive
|
Rattkener/Check_MemAvail
|
7a0e801e01ca9aa4677a9e9646b36c30881902da
|
e963636d7421533d0d0019c98805bfd810262af3
|
refs/heads/master
| 2021-06-20T14:16:11.478011
| 2021-05-06T20:37:21
| 2021-05-06T20:37:21
| 206,613,265
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,867
|
py
|
#!/usr/bin/env python
#import#
import paramiko
import argparse
#end import#
parser = argparse.ArgumentParser(
description='Remote Memory check for Linux servers. Intended for use on OpsView/Nagios monitoring systems.',
usage = '%(prog)s -n [--hostname] HOSTNAME -w [--warning] warning%% -c [--critical] critical%% -m [--metric] {commit,consumed,swap,hybrid} -v [--verbose] -s [--swap] swap_limit%%',
)
### define arguments to be used. secondary metric will be the only non-required metric for now given the progression of the script.
parser.add_argument("-n","--hostname", type=str, required=True, help='hostname which check should run against. Assumes passwordless access')
parser.add_argument("-w","--warning", type=int, required=False, default=85, help='Warning alert threshold in percent, defaults to 85')
parser.add_argument("-c","--critical", type=int, required=False, default=95, help='Critical alert thresehold in percent, defaults to 95')
parser.add_argument("-m","--metric", type=str, required=True, choices=('commit','consumed','swap','hybrid'), help='Select alert metric. If Hybrid you should supply \'-s\' otherwise default is 85%%')
parser.add_argument("-v","--verbose", action='store_true', help='Display more memory stats used in determining alert status.')
parser.add_argument("-s","--swap", type=int, required=False, default=85, help='Value that is only used in Hybrid mode. Percentage of swap used to trigger hybrid alert defaults to 85')
### define argument catchall for future use
args = parser.parse_args()
### Ensure that Critical is greater than Warning
if args.warning > args.critical:
parser.error("Warning threshold is higher than Critical threshold!")
### predefine metrics array
a = {}
####Paramiko SSH & SFTP link to target host####
tgt_client = paramiko.SSHClient() # create paramiko client
#tgt_client.load_system_host_keys() # load system host keys to allow recognition of known hosts
tgt_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # allow paramiko to add hosts. This opens the script up to man in the middle attacks but may be necessary for our enviornment.
tgt_client.connect(args.hostname, username='root') # open connection to host to allow SFTP link
tgt_sftp = tgt_client.open_sftp() # define SFTP method
meminfo = tgt_sftp.open('/proc/meminfo') # method for grabbing mem info
low_Watermark = int(tgt_sftp.open('/proc/sys/vm/min_free_kbytes').readline().strip()) # grab absolute minimum amount of memory system can run on
try:
for entry in map( lambda x: x.strip().split( 'kB' )[0].strip(), meminfo.readlines()):
a[ entry.split( ':' )[0].strip() ] = int( entry.split( ':' )[1].split( 'kB' )[0].strip() )
finally:
#close files we're working with. Don't trust garbage collectors
meminfo.close()
tgt_client.close()
### define metrics that aren't available on all systems ###
if 'MemAvailable' in a: #define what "memory available" looks like. Older OS's do not calculate this in /proc/meminfo
memAvail = a['MemAvailable'] # But if they do why not use it?
else:
memAvail = a['MemFree'] - low_Watermark + (a['Cached'] - min(a['Cached'] / 2, low_Watermark)) #and if they don't then we'll make our own. https://github.com/torvalds/linux/blob/master/mm/page_alloc.c#L5089
### set testing metrics ###
total = a['MemTotal'] # Set memory total
commit = a['Committed_AS'] # Define the current system committed memory. This is NOT memory in use, just committed
pressure = ((commit * 100.0) / total)
ptotal_used = (100.0 - (memAvail * 100.0 / total) )
pswap = (100.0 - (a['SwapFree'] * 100.0 / a['SwapTotal']))
### High verbosity output ###
if args.verbose:
print("Memory Available: " + str(memAvail) + " kb")
print("Lower Watermark: " + str(low_Watermark) + " kb")
print("Total Memory: " + str(total) + " kb")
print("Total Commit: " + str(commit) + " kb")
print("Total Memory Used: %.2f%%" % ptotal_used)
print("Swap Used: %.2f%%" % pswap)
### Alert logic based on primary metric. Start with highest check first
if args.metric == "commit":
if pressure >= args.critical:
print('CRITICAL - Commit: {0:.2f}'.format(pressure,))
exit(2)
elif pressure >= args.warning:
print('WARNING - Commit: {0:.2f}'.format(pressure,))
exit(1)
else:
print('OK - Commit: {0:.2f}'.format(pressure,))
exit(0)
elif args.metric == "consumed":
if ptotal_used >= args.critical:
print("CRITICAL - UsedMemory: {0:.2f}".format( ptotal_used, ) )
exit(2)
elif ptotal_used >= args.warning:
print("WARNING - UsedMemory: {0:.2f}".format( ptotal_used, ) )
exit(1)
else:
print("OK - UsedMemory: {0:.2f}".format( ptotal_used, ) )
exit(0)
elif args.metric == "swap":
if pswap >= args.critical:
print("CRITICAL - SwapUsed: {0:.2f}".format( pswap, ) )
exit(2)
elif pswap >= args.warning:
print("WARNING - SwapUsed: {0:.2f}".format( pswap, ) )
exit(1)
else:
print("OK - SwapUsed: {0:.2f}".format( pswap, ) )
exit(0)
elif args.metric == "hybrid":
if ptotal_used >= args.critical:
if pswap >= args.swap:
print("CRITICAL - UsedMemory: {0:.2f} -- UsedSwap: {1:.2f}".format( ptotal_used, pswap ) )
exit(2)
elif ptotal_used >= args.warning:
if pswap >= args.swap:
print("WARNING - UsedMemory: {0:.2f} -- UsedSwap: {1:.2f}".format( ptotal_used, pswap ) )
exit(1)
else:
print("OK - UsedMemory: {0:.2f} -- UsedSwap: {1:.2f}".format( ptotal_used, pswap ) )
exit(0)
|
[
"29561978+ProficientPanda@users.noreply.github.com"
] |
29561978+ProficientPanda@users.noreply.github.com
|
18bd370f71f589cf2bcef712de9b7795ea1f4538
|
d6a182d1ab766f47ccdfbb7862bf4cd4c1d5cf48
|
/delete.py
|
025abad5ef6777be447639a89ed1c3ee6a504fbe
|
[] |
no_license
|
mlnsvbd/CRUD_SqLite_Python
|
e7db43bf154776b92b27f5489e563f3caf968b25
|
18f88ecb036017a92ac308f6aac3df3294e5192f
|
refs/heads/master
| 2021-05-28T14:16:35.306800
| 2015-01-25T22:21:12
| 2015-01-25T22:21:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 236
|
py
|
import sqlite3 as lite
con = lite.connect('text.db')
cur = con.cursor()
sql = "DELETE FROM users WHERE id = '1'"
try:
cur.execute(sql)
con.commit()
print("Delete ok!!!")
except Exception as e:
print(e.args)
finally:
con.close()
|
[
"welser.m.r@gmail.com"
] |
welser.m.r@gmail.com
|
b9691e61dfe1e73f0cfed348461860d2ce4d6495
|
16ecabb5d9010c7fa4aebb8ab852f7c6a19193db
|
/src/0809.py
|
0ba2428a1bbf7638358e2412cd9b40399abf0b68
|
[] |
no_license
|
LeeSM0518/OpenCV-python
|
74ff0d899d291a35f9cd82d2ef37835a0c5ccdf2
|
46c234879f5d48876ca0888bdede8bfb347b7c30
|
refs/heads/master
| 2020-04-30T19:35:33.201278
| 2020-02-25T14:35:20
| 2020-02-25T14:35:20
| 177,043,146
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 535
|
py
|
# 0809.py
import cv2
import numpy as np
#1
src = cv2.imread('./data/momentTest.jpg')
gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
ret, bImage = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY)
#2
##M = cv2.moments(bImage)
M = cv2.moments(bImage, True)
for key, value in M.items():
print('{}={}'.format(key, value))
#3
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
dst = src.copy()
cv2.circle(dst, (cx, cy), 5, (0,0,255), 2)
cv2.imshow('dst', dst)
cv2.waitKey()
cv2.destroyAllWindows()
|
[
"nalsm98@naver.com"
] |
nalsm98@naver.com
|
9af53ed594299e5bca7f79a0631bb772ce8737c6
|
f2f96ef63c721dbc985dae99f294aa49e7c5fe48
|
/Server/database/__init__.py
|
5825c14821c76e90269bc986588ee40a40b21363
|
[
"MIT"
] |
permissive
|
Ricky-Hao/IMPK-Server
|
6e44e7ea81563908dfad3ea6347b2ca0da6cbb0c
|
786e24269e7cc506a82ae8aa0fa0d1df8c478f51
|
refs/heads/master
| 2018-09-25T05:17:24.551553
| 2018-06-07T05:42:20
| 2018-06-07T05:42:20
| 124,077,971
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 62
|
py
|
from Server.database.database import Database
db = Database()
|
[
"a471558277@gmail.com"
] |
a471558277@gmail.com
|
b08a51aeb6644672aa2d6a3f7fcc2d9b19c3f3a1
|
82b946da326148a3c1c1f687f96c0da165bb2c15
|
/sdk/python/pulumi_azure_native/insights/v20210401/data_collection_rule_association.py
|
e5cc8f03d180c23ad08149bb40a76e212462e4f5
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
morrell/pulumi-azure-native
|
3916e978382366607f3df0a669f24cb16293ff5e
|
cd3ba4b9cb08c5e1df7674c1c71695b80e443f08
|
refs/heads/master
| 2023-06-20T19:37:05.414924
| 2021-07-19T20:57:53
| 2021-07-19T20:57:53
| 387,815,163
| 0
| 0
|
Apache-2.0
| 2021-07-20T14:18:29
| 2021-07-20T14:18:28
| null |
UTF-8
|
Python
| false
| false
| 12,201
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = ['DataCollectionRuleAssociationArgs', 'DataCollectionRuleAssociation']
@pulumi.input_type
class DataCollectionRuleAssociationArgs:
def __init__(__self__, *,
resource_uri: pulumi.Input[str],
association_name: Optional[pulumi.Input[str]] = None,
data_collection_endpoint_id: Optional[pulumi.Input[str]] = None,
data_collection_rule_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a DataCollectionRuleAssociation resource.
:param pulumi.Input[str] resource_uri: The identifier of the resource.
:param pulumi.Input[str] association_name: The name of the association. The name is case insensitive.
:param pulumi.Input[str] data_collection_endpoint_id: The resource ID of the data collection endpoint that is to be associated.
:param pulumi.Input[str] data_collection_rule_id: The resource ID of the data collection rule that is to be associated.
:param pulumi.Input[str] description: Description of the association.
"""
pulumi.set(__self__, "resource_uri", resource_uri)
if association_name is not None:
pulumi.set(__self__, "association_name", association_name)
if data_collection_endpoint_id is not None:
pulumi.set(__self__, "data_collection_endpoint_id", data_collection_endpoint_id)
if data_collection_rule_id is not None:
pulumi.set(__self__, "data_collection_rule_id", data_collection_rule_id)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter(name="resourceUri")
def resource_uri(self) -> pulumi.Input[str]:
"""
The identifier of the resource.
"""
return pulumi.get(self, "resource_uri")
@resource_uri.setter
def resource_uri(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_uri", value)
@property
@pulumi.getter(name="associationName")
def association_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the association. The name is case insensitive.
"""
return pulumi.get(self, "association_name")
@association_name.setter
def association_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "association_name", value)
@property
@pulumi.getter(name="dataCollectionEndpointId")
def data_collection_endpoint_id(self) -> Optional[pulumi.Input[str]]:
"""
The resource ID of the data collection endpoint that is to be associated.
"""
return pulumi.get(self, "data_collection_endpoint_id")
@data_collection_endpoint_id.setter
def data_collection_endpoint_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "data_collection_endpoint_id", value)
@property
@pulumi.getter(name="dataCollectionRuleId")
def data_collection_rule_id(self) -> Optional[pulumi.Input[str]]:
"""
The resource ID of the data collection rule that is to be associated.
"""
return pulumi.get(self, "data_collection_rule_id")
@data_collection_rule_id.setter
def data_collection_rule_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "data_collection_rule_id", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of the association.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
class DataCollectionRuleAssociation(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
association_name: Optional[pulumi.Input[str]] = None,
data_collection_endpoint_id: Optional[pulumi.Input[str]] = None,
data_collection_rule_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
resource_uri: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Definition of generic ARM proxy resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] association_name: The name of the association. The name is case insensitive.
:param pulumi.Input[str] data_collection_endpoint_id: The resource ID of the data collection endpoint that is to be associated.
:param pulumi.Input[str] data_collection_rule_id: The resource ID of the data collection rule that is to be associated.
:param pulumi.Input[str] description: Description of the association.
:param pulumi.Input[str] resource_uri: The identifier of the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DataCollectionRuleAssociationArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Definition of generic ARM proxy resource.
:param str resource_name: The name of the resource.
:param DataCollectionRuleAssociationArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DataCollectionRuleAssociationArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
association_name: Optional[pulumi.Input[str]] = None,
data_collection_endpoint_id: Optional[pulumi.Input[str]] = None,
data_collection_rule_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
resource_uri: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DataCollectionRuleAssociationArgs.__new__(DataCollectionRuleAssociationArgs)
__props__.__dict__["association_name"] = association_name
__props__.__dict__["data_collection_endpoint_id"] = data_collection_endpoint_id
__props__.__dict__["data_collection_rule_id"] = data_collection_rule_id
__props__.__dict__["description"] = description
if resource_uri is None and not opts.urn:
raise TypeError("Missing required property 'resource_uri'")
__props__.__dict__["resource_uri"] = resource_uri
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:insights/v20210401:DataCollectionRuleAssociation"), pulumi.Alias(type_="azure-native:insights:DataCollectionRuleAssociation"), pulumi.Alias(type_="azure-nextgen:insights:DataCollectionRuleAssociation"), pulumi.Alias(type_="azure-native:insights/v20191101preview:DataCollectionRuleAssociation"), pulumi.Alias(type_="azure-nextgen:insights/v20191101preview:DataCollectionRuleAssociation")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(DataCollectionRuleAssociation, __self__).__init__(
'azure-native:insights/v20210401:DataCollectionRuleAssociation',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'DataCollectionRuleAssociation':
"""
Get an existing DataCollectionRuleAssociation resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = DataCollectionRuleAssociationArgs.__new__(DataCollectionRuleAssociationArgs)
__props__.__dict__["data_collection_endpoint_id"] = None
__props__.__dict__["data_collection_rule_id"] = None
__props__.__dict__["description"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
return DataCollectionRuleAssociation(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="dataCollectionEndpointId")
def data_collection_endpoint_id(self) -> pulumi.Output[Optional[str]]:
"""
The resource ID of the data collection endpoint that is to be associated.
"""
return pulumi.get(self, "data_collection_endpoint_id")
@property
@pulumi.getter(name="dataCollectionRuleId")
def data_collection_rule_id(self) -> pulumi.Output[Optional[str]]:
"""
The resource ID of the data collection rule that is to be associated.
"""
return pulumi.get(self, "data_collection_rule_id")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description of the association.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
Resource entity tag (ETag).
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The resource provisioning state.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.DataCollectionRuleAssociationProxyOnlyResourceResponseSystemData']:
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
|
[
"noreply@github.com"
] |
noreply@github.com
|
d1a7559941c43363cdb920c3cdf95dfd113e5caa
|
4ba0b403637e7aa3e18c9bafae32034e3c394fe4
|
/python/PyProfiler/profiler6/test.py
|
bfa05d76fd1e3862c546d51554804efb9d66d939
|
[] |
no_license
|
ASMlover/study
|
3767868ddae63ac996e91b73700d40595dd1450f
|
1331c8861fcefbef2813a2bdd1ee09c1f1ee46d6
|
refs/heads/master
| 2023-09-06T06:45:45.596981
| 2023-09-01T08:19:49
| 2023-09-01T08:19:49
| 7,519,677
| 23
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,620
|
py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2023 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
sys.path.append("../")
import py_profiler as pprof
from common import test_common as _tc
def test() -> None:
pprof.start_stats()
_tc.TestEntry().run()
pprof.print_stats()
if __name__ == "__main__":
test()
|
[
"ASMlover@126.com"
] |
ASMlover@126.com
|
59c2ab248f18fdadf951a4ecbbc12b55c6db470a
|
8e050e70e597102ccfebe9dce91cf804ae73260b
|
/cdd.py
|
4b16530762ea2f40ef1414f12a4fa8da9fbb5d2a
|
[] |
no_license
|
AngeloMendes/LogDel12
|
aac23176f9bb3357f38443692285d735009b8f20
|
266c99f3185242ac8e4b6e04d1ba9d4f50ed0634
|
refs/heads/master
| 2021-03-24T23:14:16.468273
| 2020-03-15T23:49:38
| 2020-03-15T23:49:38
| 247,571,030
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,951
|
py
|
#esse codigo eh para avaliar a qtd de grupos existem e agrupar os distribuidores
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
def get_name(names, lat):
for key in names['latitude']:
if names['latitude'][key] == lat:
return names['client_name'][key]
def elbow_curve():
K_clusters = range(1, 10)
kmeans = [KMeans(n_clusters=i) for i in K_clusters]
Y_axis = df[['latitude']]
X_axis = df[['longitude']]
score = [kmeans[i].fit(Y_axis).score(Y_axis) for i in range(len(kmeans))]
# Visualize
plt.plot(K_clusters, score)
plt.xlabel('Numero de Grupos')
plt.ylabel('Score')
plt.title('Elbow Curve')
plt.show()
def cluster(df):
names = df[['client_name', 'latitude']].to_dict()
df = df.drop(['client_name', 'date'], axis=1)
kmeans = KMeans(n_clusters=5, init='k-means++')
kmeans.fit(df[df.columns[0:6]])
df['cluster_label'] = kmeans.fit_predict(df[df.columns[0:6]])
centers = kmeans.cluster_centers_
labels = kmeans.predict(df[df.columns[0:6]])
# print centers
# print labels
length = len(df)
df.plot.scatter(x='latitude', y='longitude', c=labels, s=100, cmap='viridis')
center_x = []
center_y = []
for i in centers:
center_x.append(i[4])
for i in centers:
center_y.append(i[5])
# print(center_x)
# print(center_y)
plt.scatter(center_x, center_y, c='black', s=200, alpha=0.5)
# plt.scatter(centers[5:6, 0], centers[5:6, 1], c='black', s=200, alpha=0.5)
for i in range(0, length):
plt.annotate(get_name(names, df['latitude'][i]), (df['latitude'][i], df['longitude'][i]),
horizontalalignment='right', fontsize=13, verticalalignment='bottom')
plt.title("Grupos de Bares Moema -SP")
plt.show()
if __name__ == '__main__':
df = pd.read_csv('dist.csv')
elbow_curve()
cluster(df)
|
[
"contato.angelomendes@gmail.com"
] |
contato.angelomendes@gmail.com
|
722bf8448ff08e49ce1034f948b5d66e67fbe025
|
9eeddfe1707dfd5a899fab157432b77e4a4892b5
|
/code/get_embeddings.py
|
eeab87a1e8ff1b27af73502273d48d2a0e725ac9
|
[] |
no_license
|
ksenia007/humor_recognition
|
f523870945480c8ba4a83a7cabb49e40da4a3073
|
2f4077ace36f1e961a30f358eb73ed21ded1ff6f
|
refs/heads/master
| 2023-02-21T01:36:31.688257
| 2021-01-22T00:35:57
| 2021-01-22T00:35:57
| 261,538,901
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,111
|
py
|
from dataset import *
from train import *
from models import *
import torch.optim as optim
import pickle
import uuid
import warnings
from helper_functions import *
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=UserWarning)
dataset_function = BasicWeighted
folder_data = 'data/training_datasets/'
datafile_opt = ['humicroedit', 'puns','oneliners', 'short']
base_file = 'output/embeddings/'
bert_model = BertModel.from_pretrained('bert-base-uncased')
bert_model = bert_model.eval()
bert_model = bert_model.cuda()
for idata, dataf in enumerate(datafile_opt):
train_set = dataset_function(filename = folder_data+dataf+'_train.csv', maxlen = 30, weight=1)
print('Work with', dataf)
results = np.zeros((len(train_set), 768))
for i in range(len(train_set)):
tokens = train_set[i][0].unsqueeze(0).cuda()
attn_mask = train_set[i][1].unsqueeze(0).cuda()
_, cls_head = bert_model(tokens, attention_mask = attn_mask)
results[i, :] = cls_head.cpu().detach()
filename = base_file+dataf+'_embeddings.npy'
np.save(filename, results)
|
[
"26440954+ksenia007@users.noreply.github.com"
] |
26440954+ksenia007@users.noreply.github.com
|
d5a4535689e5faed501055cb510fae7e65574690
|
f4e7b66391205df44ea15e3bd9e93e4439393df0
|
/inputcheck.py
|
8037747f04d28cb4eb79fef72fd11160dbda0821
|
[] |
no_license
|
thakurakhil/chemical-NER
|
a2fcf93ad3bfaec95e3e6af42e75fe044354284d
|
9b47ab96f178e0e665688e4bcaf677f44db2919b
|
refs/heads/master
| 2021-08-08T20:36:15.448621
| 2017-11-11T04:01:12
| 2017-11-11T04:01:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
import csv
f = open('./inputtext/ninefilefeatures.txt', 'rb')
reader = csv.reader(f,delimiter='\t')
for row in reader:
if(len(row)!=8):
break
else:
for i in row:
if(i==''):
print row
break
|
[
"singhakhil33@gmail.com"
] |
singhakhil33@gmail.com
|
a2c60c899f14d1dd9b97de4c9161123df14940e5
|
753a569a2ce6466d236220d0ba8c61c39656cb87
|
/BP_gradient_descent/gradient_descent.py
|
6b569c8056faf552630e34d6a8c8f3d7eef9b218
|
[] |
no_license
|
RabbitTea/AI_DS-Learning
|
e26c5fa453bf5434ddbefbc323a94c74faaa282e
|
66db4e6079c1210447776b3324b30b6667af2172
|
refs/heads/master
| 2020-04-05T18:00:27.943196
| 2018-11-21T09:45:17
| 2018-11-21T09:45:17
| 157,084,907
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,524
|
py
|
#实现梯度下降算法
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import xlrd
#Some helper functions for plotting and drawing lines
def plot_points(X, y):#将两个类别的点按照标签绘图
admitted = X[np.argwhere(y==1)]
rejected = X[np.argwhere(y==0)]
plt.scatter([s[0][0] for s in rejected], [s[0][1] for s in rejected], s = 25, color = 'blue', edgecolor = 'k')
plt.scatter([s[0][0] for s in admitted], [s[0][1] for s in admitted], s = 25, color = 'red', edgecolor = 'k')
def display(m, b, color='g--'):#绘制当前的分割直线
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
x = np.arange(-10, 10, 0.1)
plt.plot(x, m*x+b, color)
# Activation (sigmoid) function
def sigmoid(x):#激活函数 采用sigmod函数
return 1 / (1 + np.exp(-x))
#实现计算sigmoid(w1*x2+w2*x2+b),即计算输出预测值
def output_formula(features, weights, bias):
return sigmoid(np.dot(features, weights) + bias) #dot是矩阵乘法,feature是2*n矩阵,weight是n*1
def error_formula(y, output):#计算误差函数 针对每一个yi计算
return - y*np.log(output) - (1 - y) * np.log(1-output)
def update_weights(x, y, weights, bias, learnrate):#权重更新方法,根据梯度下降法来更新
output = output_formula(x, weights, bias)
d_error = -(y - output)
weights -= learnrate * d_error * x
bias -= learnrate * d_error
return weights, bias
#训练函数,用于训练分界线
def train(features, targets, epochs, learnrate, graph_lines=False):
errors = []
n_records, n_features = features.shape#n_records=100,n_features=2
last_loss = None
weights = np.random.normal(scale=1 / n_features ** .5, size=n_features) #初始值用随机数生成权重 2*1
bias = 0
display(-weights[0] / weights[1], -bias / weights[1]) # 画当前求解出来的分界线
for e in range(epochs): #迭代1000次
del_w = np.zeros(weights.shape)
for x, y in zip(features, targets):#通过zip拉锁函数将X与y的每个点结合起来
output = output_formula(x, weights, bias) #计算输出预测值yi 其中x是1*2,weight是2*1
error = error_formula(y, output)#计算每一个yi的误差
weights, bias = update_weights(x, y, weights, bias, learnrate)
print(weights,bias)
print(e)#注意 每次迭代里都对xi即100组数进行计算都更新了权重,即更新了100*迭代次数次,每次迭代都是以上次的结果重新计算100组数
# Printing out the log-loss error on the training set
out = output_formula(features, weights, bias)#计算迭代后的预测值,这里feature是n*2,weight是2*1,out是n*1的一列预测值
loss = np.mean(error_formula(targets, out))#对每个预测值的误差做算术平均
errors.append(loss)
if e % (epochs / 10) == 0:
print("\n========== Epoch", e, "==========")
if last_loss and last_loss < loss:
print("Train loss: ", loss, " WARNING - Loss Increasing")
else:
print("Train loss: ", loss)
last_loss = loss
predictions = out > 0.5
accuracy = np.mean(predictions == targets)
print("Accuracy: ", accuracy)
if graph_lines :#and e % (epochs / 100) == 0
display(-weights[0] / weights[1], -bias / weights[1])#画当前求解出来的分界线
# Plotting the solution boundary
plt.title("Solution boundary")
display(-weights[0] / weights[1], -bias / weights[1], 'black')#画最后一根求解出来的分界线
# Plotting the data
plot_points(features, targets)
plt.show()
# Plotting the error
plt.title("Error Plot")
plt.xlabel('Number of epochs')
plt.ylabel('Error')
plt.plot(errors)
plt.show()
if __name__ == '__main__':
np.random.seed(44)
epochs = 100
learnrate = 0.01
data = xlrd.open_workbook('F:\工程实践\工作安排\work3_BPGradientDescent\data.xls')
X = []
table = data.sheets()[0] # 打开第一张表
X1 = table.col_values(0)
X2 = table.col_values(1)
X.append(X1)
X.append(X2)
X = np.array(X).T # 将X转换为100*2的矩阵
Y = np.array(table.col_values(2)) # 第三列数据:数据点的标签
plot_points(X,Y)
plt.show()
train(X, Y, epochs, learnrate, True)
|
[
"354496262@qq.com"
] |
354496262@qq.com
|
829fb8cdd606f109189879a5e3ad261af91f8278
|
ca5bac9deca017e02b8af87ffaaa91d1eb6c6d07
|
/Si_Nd/example_code/plot_2D_Seasonal.py
|
a194819969a902c3d8ba3f4bf7a50b45dd6fcae3
|
[] |
no_license
|
ndoyesiny/metrics_workshop
|
36dcc0b444a8ab3b8a0f897c81ada142a5ba6ad1
|
b74f062c27243eb0705eab367167d1fb9eaf0cd8
|
refs/heads/master
| 2020-06-14T10:29:58.282850
| 2017-03-30T11:20:19
| 2017-03-30T11:20:19
| 75,197,976
| 0
| 0
| null | 2016-11-30T15:04:27
| 2016-11-30T15:04:26
| null |
UTF-8
|
Python
| false
| false
| 1,738
|
py
|
'''
plot_Func.py
This function make some plot
Author: Siny NDOYE, December 2016
'''
import os
import iris
import iris.quickplot as qplt
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.basemap as bm
#import pdb
#def plot_Func(cube2plot,outpath,mnth,nlevc):
def plot_Func_SAT(cube2plot,figpath,mnth,nlevc,xstart,xend,ystart,yend,title_name):
# pdb.set_trace()
# print cube2plot.collapsed(['time', 'latitude','longitude'],iris.analysis.MIN), nlevc
#levels = np.linspace(iris.analysis.MIN(cube2plot),iris.analysis.MAX(cube2plot) , nlevc)
plt.clf()
levels=np.linspace(282,302,nlevc)
levels=np.linspace(8,32,nlevc)
qplt.contourf(cube2plot, levels = levels, extend = 'max')
m = bm.Basemap(projection='cyl', llcrnrlat=ystart, urcrnrlat=yend, llcrnrlon=xstart, urcrnrlon=xend, resolution='c') # coarse resolution for grid
#m = bm.Basemap(projection='cyl', llcrnrlat=8.0, urcrnrlat=16.0, llcrnrlon=-20.0, urcrnrlon=20.0, resolution='c') # coarse resolution for grid
m.drawcoastlines(linewidth=2)
m.drawcountries(linewidth=1)
plt.title(title_name)
if not os.path.exists(figpath):
os.makedirs(figpath)
if mnth == 0:
plt.savefig(figpath +'Seasonal_average_DJF.png' )
plt.show()
if mnth == 1:
plt.savefig(figpath +'Seasonal_average_MAM.png' )
plt.show()
if mnth == 2:
plt.savefig(figpath +'Seasonal_average_JJA.png' )
plt.show()
if mnth == 3:
plt.savefig(figpath +'Seasonal_average_SON.png' )
plt.show()
#if __name__== '__main__':
# plot_Func(cube2plot,outpath,mnth,nlevc)
#plot_Func(cube2plot,outpath,mnth,nlevc,xstart,xend,ystart,yend)
ny
"""
|
[
"siny@lodyn416.locean-ipsl.upmc.fr"
] |
siny@lodyn416.locean-ipsl.upmc.fr
|
94d547688e8c427036b8536f3210d9fa20b16541
|
792d26133b5504fef31ab56138db28a2c7f666db
|
/LINETCR/Api/Talk.py
|
be91375f5bef0da83ce54dbdd2d4ae6fbc4df023
|
[] |
no_license
|
GieVh4/aisya
|
333f18f7806ca99d242213ef41248335ac111c4c
|
6f14e06fa7c9df13d4830a435a11c1751b230038
|
refs/heads/master
| 2020-03-07T10:17:14.854975
| 2018-04-24T07:46:47
| 2018-04-24T07:46:47
| 127,427,855
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,662
|
py
|
# -*- coding: utf-8 -*-
import os, sys
path = os.path.join(os.path.dirname(__file__), '../lib/')
sys.path.insert(0, path)
import requests, rsa
from thrift.transport import THttpClient
from thrift.protocol import TCompactProtocol
from curve import LineService
from curve.ttypes import *
class Talk:
client = None
auth_query_path = "/api/v4/TalkService.do";
http_query_path = "/S4";
wait_for_mobile_path = "/Q";
host = "gd2.line.naver.jp";
port = 443;
UA = "Line/2018.07421.2455.Tanduri/760.1.6 WIN10/18.2.1"
LA = "CHROMEOS 8.3.2 HELLO-WORLD 12.1.1"
authToken = None
cert = None
def __init__(self):
self.transport = THttpClient.THttpClient('https://gd2.line.naver.jp:443'+self.auth_query_path)
self.transport.setCustomHeaders({
"User-Agent" : self.UA,
"X-Line-Application" : self.LA,
})
self.transport.open()
self.protocol = TCompactProtocol.TCompactProtocol(self.transport);
self.client = LineService.Client(self.protocol)
def login(self, mail, passwd, cert=None, callback=None):
self.transport.path = self.auth_query_path
rsakey = self.client.getRSAKeyInfo(IdentityProvider.LINE)
crypt = self.__crypt(mail, passwd, rsakey)
result = self.client.loginWithIdentityCredentialForCertificate(
IdentityProvider.LINE,
rsakey.keynm,
crypt,
True,
'127.0.0.1',
'http://dg.b9dm.com/KoenoKatachi.mp4',
cert
)
if result.type == 3:
callback(result.pinCode)
header = {"X-Line-Access": result.verifier}
r = requests.get(url="https://" + self.host + self.wait_for_mobile_path, headers=header)
result = self.client.loginWithVerifierForCerificate(r.json()["result"]["verifier"])
self.transport.setCustomHeaders({
"X-Line-Application" : self.LA,
"User-Agent" : self.UA,
"X-Line-Access" : result.authToken
})
self.authToken = result.authToken
self.cert = result.certificate
self.transport.path = self.http_query_path
elif result.type == 1:
self.authToken = result.authToken
self.cert = result.certificate
self.transport.setCustomHeaders({
"X-Line-Application" : self.LA,
"User-Agent" : self.UA,
"X-Line-Access" : result.authToken
})
self.transport.path = self.http_query_path
def TokenLogin(self, authToken):
self.transport.setCustomHeaders({
"X-Line-Application" : self.LA,
"User-Agent" : self.UA,
"X-Line-Access" : authToken,
})
self.authToken = authToken
self.transport.path = self.http_query_path
def qrLogin(self, callback):
self.transport.path = self.auth_query_path
qr = self.client.getAuthQrcode(True, "Bot")
callback("Copy Kode QR nya Plak\nJangan Lama2\nBatas 1 menit:\n line://au/q/" + qr.verifier)
r = requests.get("https://" + self.host + self.wait_for_mobile_path, headers={
"X-Line-Application": self.LA,
"X-Line-Access": qr.verifier,
})
vr = r.json()["result"]["verifier"]
lr = self.client.loginWithVerifierForCerificate(vr)
self.transport.setCustomHeaders({
"X-Line-Application" : self.LA,
"User-Agent" : self.UA,
"X-Line-Access": lr.authToken
})
self.authToken = lr.authToken
self.cert = lr.certificate
self.transport.path = self.http_query_path
def __crypt(self, mail, passwd, RSA):
message = (chr(len(RSA.sessionKey)) + RSA.sessionKey +
chr(len(mail)) + mail +
chr(len(passwd)) + passwd).encode('utf-8')
pub_key = rsa.PublicKey(int(RSA.nvalue, 16), int(RSA.evalue, 16))
crypto = rsa.encrypt(message, pub_key).encode('hex')
return crypto
|
[
"noreply@github.com"
] |
noreply@github.com
|
1e342c9a885841dca5ceca8cad3414989c843045
|
abd2a91cb26dd7ca8d3fca6f9c4f5ef9dea2f066
|
/logReg.py
|
95a8eee77371997300560c19e27f423c142fc9fc
|
[] |
no_license
|
Saniewski/multiclass-perceptron
|
dd0018ce7cde93bec978c24e920853e19e16d938
|
36a475dc4c2f5142b5205259a69ee403248d6eea
|
refs/heads/master
| 2022-04-15T07:13:44.429956
| 2020-04-08T20:20:12
| 2020-04-08T20:20:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,638
|
py
|
import numpy as np
import matplotlib.pylab as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split
from scipy.special import expit
from plotka import plot_decision_regions
class LogisticRegressionGD(object):
def __init__(self, learningRate=0.05, epochs=100, random_state=1):
self.lr = learningRate
self.epochs = epochs
self.random_state = random_state
def fit(self, X, y):
rgen = np.random.RandomState(self.random_state)
self.weights = rgen.normal(loc=0.0, scale=0.01, size=X.shape[1])
self.bias = rgen.normal(loc=0.0, scale=0.01)
self.costs = []
for i in range(self.epochs):
net_input = self.net_input(X)
output = expit(net_input)
errors = (y - output)
self.weights += self.lr * X.T.dot(errors)
self.bias += self.lr * errors.sum()
cost = (-y.dot(np.log(output)) - ((1 - y).dot(np.log(1 - output))))
self.costs.append(cost)
return self
def net_input(self, X):
return np.dot(X, self.weights) + self.bias
def predict(self, X):
return np.where(self.net_input(X) >= 0.0, 1, 0)
class Multiclass(object):
def __init__(self, reg1, reg2):
self.reg1 = reg1
self.reg2 = reg2
def predict(self, X):
result = []
for data in X:
if self.reg1.predict(data) == 1:
result.append(0)
elif self.reg2.predict(data) == 1:
result.append(1)
else:
result.append(2)
return np.array(result)
def main():
r8 = float(input('Learning rate: '))
epochs = int(input('Epochs: '))
iris = datasets.load_iris()
X = iris.data[:, [1, 3]]
y = iris.target
y1 = y.copy()
y2 = y.copy()
y3 = y.copy()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1, stratify=y)
y1[(y1 != 0)] = -3
y1[y1 == 0] = 1
y1[y1 == -3] = 0
y3[(y3 != 2)] = -3
y3[y3 == 2] = 1
y3[y3 == -3] = 0
reg1 = LogisticRegressionGD(r8, epochs, 1)
reg1.fit(X, y1)
reg3 = LogisticRegressionGD(r8, epochs, 1)
reg3.fit(X, y3)
multi = Multiclass(reg1, reg3)
print(multi.predict(X_test))
print(reg1.predict(X_test))
plot_decision_regions(X=X_test, y=y_test, classifier=multi)
plt.xlabel(r'$x_1$')
plt.ylabel(r'$x_2$')
plt.legend(loc='upper left')
plt.show()
if __name__ == '__main__':
main()
|
[
"pawel.san16@gmail.com"
] |
pawel.san16@gmail.com
|
8d63e564dff2869969a823b0cef0bf2bc6eef4ef
|
064a954c8dd7d50720aa8fa748d24e8495b8f7d9
|
/OpenCv/字符投影.py
|
0258d496027be7b77d2b2ad6e748db532e8445a9
|
[] |
no_license
|
xianyichi/keras
|
73169c248dde73f0e49e19f117b21080d1b3ba14
|
7ca5ab7e0ef1291b97b985e5ec9c78785e2ff3ec
|
refs/heads/master
| 2021-06-10T23:02:02.354669
| 2021-05-20T12:59:41
| 2021-05-20T12:59:41
| 182,005,230
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,386
|
py
|
import cv2
import numpy
img = cv2.imread ('/Users/apple/PycharmProjects/keras/image/data/images/0_00h_0.png', cv2.COLOR_BGR2GRAY)
height, width = img.shape [ :2 ]
# print height, width
# resized = cv2.resize(img, (2*width,2*height), interpolation=cv2.INTER_CUBIC)
gray = cv2.cvtColor (img, cv2.COLOR_BGR2GRAY)
(_, thresh) = cv2.threshold (gray, 140, 255, cv2.THRESH_BINARY)
# 使文字增长成块
kernel = cv2.getStructuringElement (cv2.MORPH_RECT, (2, 2)) # 形态学处理,定义矩形结构
closed = cv2.erode (thresh, None, iterations=7)
# cv2.imshow('erode',closed)
height, width = closed.shape [ :2 ]
# print height, width
z = [ 0 ] * height
v = [ 0 ] * width
hfg = [ [ 0 for col in range (2) ] for row in range (height) ]
lfg = [ [ 0 for col in range (2) ] for row in range (width) ]
box = [ 0, 0, 0, 0 ]
# 水平投影
a = 0
emptyImage1 = numpy.zeros ((height, width, 3), numpy.uint8)
for y in range (0, height):
for x in range (0, width):
cp = closed [ y, x ]
# if np.any(closed[y,x]):
if cp == 0:
a = a + 1
else:
continue
z [ y ] = a
# print z[y]
a = 0
# 根据水平投影值选定行分割点
inline = 1
start = 0
j = 0
for i in range (0, height):
if inline == 1 and z [ i ] >= 150: # 从空白区进入文字区
start = i # 记录起始行分割点
# print i
inline = 0
elif (i - start > 3) and z [ i ] < 150 and inline == 0: # 从文字区进入空白区
inline = 1
hfg [ j ] [ 0 ] = start - 2 # 保存行分割位置
hfg [ j ] [ 1 ] = i + 2
j = j + 1
# 对每一行垂直投影、分割
a = 0
for p in range (0, j):
for x in range (0, width):
for y in range (hfg [ p ] [ 0 ], hfg [ p ] [ 1 ]):
cp1 = closed [ y, x ]
if cp1 == 0:
a = a + 1
else:
continue
v [ x ] = a # 保存每一列像素值
a = 0
# print width
# 垂直分割点
incol = 1
start1 = 0
j1 = 0
z1 = hfg [ p ] [ 0 ]
z2 = hfg [ p ] [ 1 ]
for i1 in range (0, width):
if incol == 1 and v [ i1 ] >= 20: # 从空白区进入文字区
start1 = i1 # 记录起始列分割点
incol = 0
elif (i1 - start1 > 3) and v [ i1 ] < 20 and incol == 0: # 从文字区进入空白区
incol = 1
lfg [ j1 ] [ 0 ] = start1 - 2 # 保存列分割位置
lfg [ j1 ] [ 1 ] = i1 + 2
l1 = start1 - 2
l2 = i1 + 2
j1 = j1 + 1
cv2.rectangle (img, (l1, z1), (l2, z2), (255, 0, 0), 2)
cv2.imshow ('result', img)
cv2.waitKey (0)
|
[
"1369362296@qq.com"
] |
1369362296@qq.com
|
7dd4a52eccf53f87ba02f0c31ca36819d8d641e7
|
6cd799da036e019e61ca25351c3c2b368aeda6f4
|
/lossScreenTest.py
|
30158367f332718455312c2b49e234747e0a7977
|
[] |
no_license
|
Tammon23/IceCream-Jump-recreate
|
d78f3c7c6352d8bef540df4cc5b182fdd76f543e
|
bfcdf2bb823b9ebae6e9e399c974f358d8d2c61e
|
refs/heads/master
| 2021-05-09T20:56:00.489803
| 2018-01-28T03:47:05
| 2018-01-28T03:47:05
| 118,714,755
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,336
|
py
|
import math, random
import pygame
from pygame.locals import *
from settings import *
from functions import *
lossScreen = True
while lossScreen:
pygame.event.pump()
k = pygame.key.get_pressed()
if k[pygame.K_q] or k[pygame.K_ESCAPE]:
break
#start Splash screen
screen.fill(sBackground)
line1 = font2.render("You Lose!", True, BLACK)
line2 = font5.render("Your final score was: " + str(points), True, BLACK)
screen.blit(line1, (90, 100))
screen.blit(line2, (90, 210))
line3 = font1.render("- By Ikenna Uduh", True, BLACK)
screen.blit(line3, (w - 150, h - 25))
x,y = pygame.mouse.get_pos()
pygame.draw.circle(screen, sPAgainButtonClr, (int(w/2), int(h/2 + 50)), RAD3)
pygame.draw.circle(screen, BLACK, (int(w/2), int(h/2 + 50)), RAD3, 10)
line3 = font3.render("PLAY", True, BLACK)
line4 = font3.render("AGAIN", True, BLACK)
screen.blit(line3, (int(w/2) - 120, 400))
screen.blit(line4, (int(w/2) - 120, 500))
# Checking to see if the clicked mouse is pressing the PLAY or HELP buttons
if checkInCir(int(w/2), int(h/2 + 50), y, x, RAD3):
sPAgainButtonClr = sButtonClrPressed
if pygame.mouse.get_pressed()[0]:
gameStart = True
else:
sPAgainButtonClr = sButtonClr
pygame.display.flip()
pygame.quit()
|
[
"Tammon2000@gmail.com"
] |
Tammon2000@gmail.com
|
1c6a094af068444ca3d28073d89315729267ff26
|
e57613c79e9a7a014ae67c00ccaf7c8014011954
|
/lab3/Ast.py
|
fbe23583ed7d7db393eef7caeaf51eec4008e320
|
[] |
no_license
|
szymon-rogus/CompilersLabs
|
cfebbab381e8ded24a122b03baba23c1a011b60b
|
d0f878bdaf8cf584ff28cd2449e2fe2dd4aa6c90
|
refs/heads/master
| 2021-04-02T15:28:58.725704
| 2020-06-10T09:01:59
| 2020-06-10T09:01:59
| 248,289,803
| 0
| 0
| null | 2020-04-30T11:44:18
| 2020-03-18T16:51:10
|
Python
|
UTF-8
|
Python
| false
| false
| 6,214
|
py
|
class Node(object):
def __init__(self, type, children=None, leaf=None):
self.type = type
self.leaf = leaf
if children:
self.children = children
else:
self.children = []
class BinaryExpression(Node):
def __init__(self, left, operator, right):
super().__init__(self.__class__, [left, right], operator)
self.left = left
self.operator = operator
self.right = right
def __repr__(self):
return '{} {} {}'.format(self.left, self.operator, self.right)
class UnaryExpression(Node):
def __init__(self, operator, operand, left=True):
super().__init__(self.__class__, [operand], operator)
self.operator = operator
self.operand = operand
self.left = left
def __repr__(self):
order = [self.operator, self.operand] if self.left else [self.operand, self.operator]
return '{}{}'.format(order[0], order[1])
class Negation(UnaryExpression):
def __init__(self, operand):
super().__init__('-', operand)
class Transposition(UnaryExpression):
def __init__(self, operand):
super().__init__('\'', operand, False)
class Assignment(BinaryExpression):
pass
class Function(Node):
def __init__(self, name, argument):
super().__init__(self.__class__, [argument], name)
self.name = name
self.argument = argument
def __repr__(self):
return "{}({})".format(self.name, self.argument)
class Variable(Node):
def __init__(self, name):
super().__init__(self.__class__, [], name)
self.name = name
def __repr__(self):
return '{}'.format(self.name)
class If(Node):
def __init__(self, condition, expression, else_expression=None):
super().__init__(self.__class__, [condition, expression, else_expression], ["IF", "THEN", "ELSE"])
self.condition = condition
self.expression = expression
self.else_expression = else_expression
if else_expression == None:
self.children = self.children[:-1]
self.leaf = self.leaf[:-1]
def __repr__(self):
representation = 'IF {} THEN {}'.format(self.condition, self.expression)
result = representation + ' ELSE {}'.format(self.else_expression) \
if self.else_expression else representation
return result
class While(Node):
def __init__(self, condition, body):
super().__init__(self.__class__, [condition, body], "WHILE")
self.condition = condition
self.body = body
def __repr__(self):
return 'WHILE {} DO {}'.format(self.condition, self.body)
class Range(Node):
def __init__(self, start, end, step=1):
super().__init__(self.__class__, [start, end, step], "RANGE")
if step == 1: self.children = self.children[:-1]
self.start = start
self.end = end
self.step = step
def __repr__(self):
return '{}:{}:{}'.format(self.start, self.end, self.step)
class For(Node):
def __init__(self, id, range, body):
super().__init__(self.__class__, [id, range, body], "FOR")
self.id = id
self.range = range
self.body = body
def __repr__(self):
return 'FOR {} IN {} DO {}'.format(self.id, self.range, self.body)
class Break(Node):
def __init__(self):
super().__init__(self.__class__, [], "BREAK")
def __repr__(self):
return 'BREAK'
class Continue(Node):
def __init__(self):
super().__init__(self.__class__, [], "CONTINUE")
def __repr__(self):
return 'CONTINUE'
class Return(Node):
def __init__(self, result):
super().__init__(self.__class__, [result], "RETURN")
self.result = result
def __repr__(self):
return 'RETURN( {} )'.format(self.result)
class Print(Node):
def __init__(self, expression):
super().__init__(self.__class__, [expression], "PRINT")
self.expression = expression
def __repr__(self):
return 'PRINT( {} )'.format(self.expression)
class VariableAttribute(Node):
def __init__(self, variable, key):
super().__init__(self.__class__, [variable, key], "REF")
self.variable = variable
self.key = key
def __repr__(self):
return '{}[{}]'.format(self.variable, self.key)
class Error(Node):
pass
class CodeBlock(Node):
def __init__(self, instruction):
super().__init__(self.__class__, [instruction])
self.instructions = self.children
def __repr__(self):
return "{\n" + "\n".join(map(str, self.instructions)) + "\n}"
class Program(Node):
def __init__(self, program):
super().__init__(self.__class__, [program])
self.program = program
def __repr__(self):
return str(self.program)
class Instruction(Node):
def __init__(self, line):
super().__init__(self.__class__, [line])
self.line = line
def __repr__(self):
return str(self.line)
class Matrix(Node):
def __init__(self, rows):
super().__init__(self.__class__, [rows], "MATRIX")
self.dims = len(rows), len(rows[0])
self.rows = rows
def __repr__(self):
return str(self.rows)
class Value(Node):
def __init__(self, val):
super().__init__(self.__class__, [], val)
self.val = val
def __repr__(self):
return "{}({})".format(type(self.val).__name__, self.val)
class Rows(Node):
def __init__(self, sequence):
super().__init__(self.__class__, [sequence])
self.row_list = self.children
def __repr__(self):
return "[" + ", ".join(map(str, self.row_list)) + "]"
def __len__(self):
return len(self.row_list)
def __getitem__(self, item):
return self.row_list[item]
class Sequence(Node):
def __init__(self, expression):
super().__init__(self.__class__, [expression], "SEQ")
self.expressions = self.children
def __repr__(self):
return "[" + ", ".join(map(str, self.expressions)) + "]"
def __len__(self):
return len(self.expressions)
def __getitem__(self, item):
return self.expressions[item]
|
[
"benroszko@gmail.com"
] |
benroszko@gmail.com
|
0572ff0cec28243d6b72452f3f61deda3e6df64b
|
5be2bbf713c09e4f03f29a1c2fd071f3a8e90b5f
|
/src/main/local.py
|
684bfa9db184d454a15afb02d248836c50bdb193
|
[
"MIT"
] |
permissive
|
amrutadharmapurikar/hmr2.0
|
47a4c5ebfb64ce6349ad4e6446d84a033c8f0e05
|
a566fe424618f4cbdefe6441f8b91c9efeaa1219
|
refs/heads/master
| 2023-06-26T08:06:10.593071
| 2021-07-12T00:58:49
| 2021-07-12T00:58:49
| 382,423,981
| 0
| 0
|
MIT
| 2021-07-02T17:54:05
| 2021-07-02T17:54:04
| null |
UTF-8
|
Python
| false
| false
| 1,265
|
py
|
import os
from datetime import datetime
from main.config import Config
from main.model import Model
class LocalConfig(Config):
ROOT_DATA_DIR = os.path.abspath(os.path.join(__file__, '..', '..', '..'))
LOG_DIR = os.path.join(ROOT_DATA_DIR, 'logs', datetime.now().strftime("%d%m%Y-%H%M%S"))
DATA_DIR = os.path.join(ROOT_DATA_DIR, 'src', 'tests', 'files')
SMPL_DATA_DIR = os.path.join(ROOT_DATA_DIR, 'src', 'tests', 'files')
SMPL_MODEL_PATH = os.path.join(ROOT_DATA_DIR, 'models', 'neutral_smpl_coco_regressor.pkl')
SMPL_MEAN_THETA_PATH = os.path.join(ROOT_DATA_DIR, 'models', 'neutral_smpl_mean_params.h5')
CUSTOM_REGRESSOR_PATH = os.path.join(ROOT_DATA_DIR, 'src', 'tests', 'files', 'regressors')
CUSTOM_REGRESSOR_IDX = {
0: 'regressor_test.npy',
}
DATASETS = ['dataset']
SMPL_DATASETS = ['smpl']
BATCH_SIZE = 2
JOINT_TYPE = 'cocoplus'
NUM_KP2D = 19
NUM_KP3D = 14
def __init__(self):
super(LocalConfig, self).__init__()
self.SEED = 1
self.NUM_TRAINING_SAMPLES = 1
self.NUM_TRAIN_SMPL_SAMPLES = 4
self.NUM_VALIDATION_SAMPLES = 1
self.NUM_TEST_SAMPLES = 1
if __name__ == '__main__':
LocalConfig()
model = Model()
model.train()
|
[
"alessandro.russo@allianz.de"
] |
alessandro.russo@allianz.de
|
bc0b91140f22fc81bcbba5bcd8f3452133cf725e
|
207f0427e0ffb10941db14d8de08ccbeac83dac1
|
/gmail.py
|
45dc9d762624648a1e30049e1f655efb972a3d08
|
[] |
no_license
|
appollo88/py
|
0d9182b64928bcda6be0a3a36906b6144371acd7
|
1644d3f45a9b948a76f2a08df046db05d2f329a3
|
refs/heads/master
| 2021-01-20T14:39:24.128069
| 2017-02-22T05:46:33
| 2017-02-22T05:46:33
| 82,765,152
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 901
|
py
|
"""import smtplib
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login("liuxun931@gmail.com", "lx061511")
msg = "YOUR MESSAGE!"
server.sendmail("liuxun931@gmail.com", "liuxun931@163.com", msg)
server.quit()
"""
# smtplib module send mail
import smtplib
TO = 'liuxun931@163.com'
SUBJECT = 'TEST MAIL'
TEXT = 'Here is a message from python.'
# Gmail Sign In
gmail_sender = 'liuxun931@gmail.com'
gmail_passwd = 'lx061511'
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.login(gmail_sender, gmail_passwd)
BODY = '\r\n'.join(['To: %s' % TO,
'From: %s' % gmail_sender,
'Subject: %s' % SUBJECT,
'', TEXT])
try:
server.sendmail(gmail_sender, [TO], BODY)
print ('email sent')
except:
print ('error sending mail')
server.quit()
|
[
"noreply@github.com"
] |
noreply@github.com
|
339289d6118565d385d545357077d0aeb36d8cc1
|
2a2def196a68319147631a4af93095d1a03de754
|
/MuZero/game/gym_wrappers.py
|
62ee3e3e4cc0c785f3b6090d3fd5fecc49ca4076
|
[] |
no_license
|
colindbrown/columbia-deep-learning-project
|
8b7d2dc791046426ff6030ec52d1c9dddc99de3c
|
9046552bd631270838b0e49a2b8c9c524d40f1ed
|
refs/heads/master
| 2023-05-25T14:39:55.978535
| 2020-04-29T20:16:59
| 2020-04-29T20:16:59
| 248,585,231
| 2
| 2
| null | 2022-06-22T01:52:03
| 2020-03-19T19:13:58
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 631
|
py
|
import gym
import numpy as np
class ScalingObservationWrapper(gym.ObservationWrapper):
"""
Wrapper that apply a min-max scaling of observations.
"""
def __init__(self, env, low=None, high=None):
super().__init__(env)
assert isinstance(env.observation_space, gym.spaces.Box)
low = np.array(self.observation_space.low if low is None else low)
high = np.array(self.observation_space.high if high is None else high)
self.mean = (high + low) / 2
self.max = high - self.mean
def observation(self, observation):
return (observation - self.mean) / self.max
|
[
"jayantsubramanian2020@Jayants-MacBook-Air.local"
] |
jayantsubramanian2020@Jayants-MacBook-Air.local
|
980bdcafecbd81a687de64b1aa498e359f541eb6
|
6a90c88cd3898a0936f83c7d2a8f713943d440db
|
/POSTagging/rnn_easy.py
|
245cb834e3e35727bcafae2471703f73190745f7
|
[
"Apache-2.0"
] |
permissive
|
LindgeW/POS-Tagging
|
3be4bc5da30444b22722a15e3e39350231d42c76
|
358570047e8ad8403bcab4a1e9e3b082b9bea5fc
|
refs/heads/master
| 2022-02-17T23:21:58.504742
| 2019-07-25T09:11:03
| 2019-07-25T09:11:03
| 186,325,676
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,426
|
py
|
import torch
import torch.nn as nn
'''
LSTMCell
输入: input, (h_0, c_0)
input (seq_len, batch, input_size): 包含输入序列特征的Tensor。也可以是packed variable
h_0 (batch, hidden_size): 保存着batch中每个元素的初始化隐状态的Tensor
c_0 (batch, hidden_size): 保存着batch中每个元素的初始化细胞状态的Tensor
输出:h_1, c_1
h_1 (batch, hidden_size): 下一个时刻的隐状态。
c_1 (batch, hidden_size): 下一个时刻的细胞状态。
LSTM
输入: input, (h_0, c_0)
input (seq_len, batch, input_size): 包含输入序列特征的Tensor。也可以是packed variable ,详见 [pack_padded_sequence](#torch.nn.utils.rnn.pack_padded_sequence(input, lengths, batch_first=False[source])
h_0 (num_layers * num_directions, batch, hidden_size):保存着batch中每个元素的初始化隐状态的Tensor
c_0 (num_layers * num_directions, batch, hidden_size): 保存着batch中每个元素的初始化细胞状态的Tensor
输出: output, (h_n, c_n)
output (seq_len, batch, hidden_size * num_directions): 保存RNN最后一层的输出的Tensor。 如果输入是torch.nn.utils.rnn.PackedSequence,那么输出也是torch.nn.utils.rnn.PackedSequence。
h_n (num_layers * num_directions, batch, hidden_size): Tensor,保存着RNN最后一个时间步的隐状态。
c_n (num_layers * num_directions, batch, hidden_size): Tensor,保存着RNN最后一个时间步的细胞状态。
'''
class RNNEncoder(nn.Module):
def __init__(self, input_size=0, hidden_size=0, num_layers=1, batch_first=False, bidirectional=False, dropout=0.0, rnn_type='lstm'):
super(RNNEncoder, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.batch_first = batch_first
self.bidirectional = bidirectional
self.dropout = dropout
self.num_directions = 2 if self.bidirectional else 1
self._rnn_types = ['RNN', 'LSTM', 'GRU']
self.rnn_type = rnn_type.upper()
assert self.rnn_type in self._rnn_types
# 获取torch.nn对象中相应的的构造函数
self._rnn_cell = getattr(nn, self.rnn_type+'Cell') # getattr获取对象的属性或者方法
# ModuleList是Module的子类,当在Module中使用它的时候,就能自动识别为子module
# 当添加 nn.ModuleList作为nn.Module对象的一个成员时(即当我们添加模块到我们的网络时),
# 所有nn.ModuleList内部的nn.Module的parameter也被添加作为我们的网络的parameter
self.fw_cells, self.bw_cells = nn.ModuleList(), nn.ModuleList()
for layer_i in range(self.num_layers):
layer_input_size = self.input_size if layer_i == 0 else self.num_directions * self.hidden_size
self.fw_cells.append(self._rnn_cell(input_size=layer_input_size, hidden_size=self.hidden_size))
if self.bidirectional:
self.bw_cells.append(self._rnn_cell(input_size=layer_input_size, hidden_size=self.hidden_size))
# self.cell = nn.LSTMCell(
# input_size=self.input_size, # 输入的特征维度
# hidden_size=self.hidden_size # 隐层的维度
# )
def init_hidden(self, batch_size=1, retain=True, device=torch.device('cpu')):
if retain: # 是否保证每轮迭代都初始化隐层
torch.manual_seed(3357)
# hidden = torch.randn(batch_size, self.hidden_size, device=device)
hidden = torch.zeros(batch_size, self.hidden_size, device=device)
if self.rnn_type == 'LSTM':
hidden = (hidden, hidden)
return hidden
def _forward_mask(self, cell, inputs, lens, init_hidden, drop_mask=None):
out_fw = []
seq_len = inputs.size(0)
hx_fw = init_hidden
assert torch.is_tensor(lens)
for xi in range(seq_len):
# print('data in device: ', inputs.device, hx_fw.device)
# print('cell: ', next(cell.parameters()).is_cuda)
hidden = cell(input=inputs[xi], hx=hx_fw)
if self.rnn_type == 'LSTM':
h_next, c_next = hidden
mask = (xi < lens).float().unsqueeze(1).expand_as(h_next)
# mask = torch.tensor((xi < lens), dtype=torch.float, device=inputs.device).unsqueeze(1).expand_as(h_next)
h_next = h_next * mask + init_hidden[0] * (1 - mask)
c_next = c_next * mask + init_hidden[1] * (1 - mask)
out_fw.append(h_next)
if drop_mask is not None: # 循环层使用dropout
h_next = h_next * drop_mask
hx_next = (h_next, c_next)
else:
h_next = hidden
mask = (xi < lens).float().unsqueeze(1).expand_as(h_next)
# mask = torch.tensor((xi < lens), dtype=torch.float, device=inputs.device).unsqueeze(1).expand_as(h_next)
h_next = h_next * mask + init_hidden * (1 - mask)
out_fw.append(h_next)
if drop_mask is not None: # 循环层使用dropout
h_next = h_next * drop_mask
hx_next = h_next
hx_fw = hx_next
out_fw = torch.stack(tuple(out_fw), dim=0)
return out_fw, hx_fw
def _backward_mask(self, cell, inputs, lens, init_hidden, drop_mask=None):
out_bw = []
seq_len = inputs.size(0)
hx_bw = init_hidden
assert torch.is_tensor(lens)
for xi in reversed(range(seq_len)):
hidden = cell(input=inputs[xi], hx=hx_bw)
if self.rnn_type == 'LSTM':
h_next, c_next = hidden
mask = (xi < lens).float().unsqueeze(1).expand_as(h_next)
# mask = torch.tensor((xi < lens), dtype=torch.float, device=inputs.device).unsqueeze(1).expand_as(h_next)
h_next = h_next * mask + init_hidden[0] * (1 - mask)
c_next = c_next * mask + init_hidden[1] * (1 - mask)
out_bw.append(h_next)
if drop_mask is not None: # 循环层使用dropout
h_next = h_next * drop_mask
hx_next = (h_next, c_next)
else:
h_next = hidden
mask = (xi < lens).float().unsqueeze(1).expand_as(h_next)
# mask = torch.tensor((xi < lens), dtype=torch.float, device=inputs.device).unsqueeze(1).expand_as(h_next)
h_next = h_next * mask + init_hidden * (1 - mask)
out_bw.append(h_next)
if drop_mask is not None: # 循环层使用dropout
h_next = h_next * drop_mask
hx_next = h_next
hx_bw = hx_next
out_bw.reverse()
out_bw = torch.stack(tuple(out_bw), dim=0)
return out_bw, hx_bw
def forward(self, inputs, seq_lens, init_hidden=None):
if self.batch_first:
inputs = inputs.transpose(0, 1)
batch_size = inputs.size(1)
if init_hidden is None:
init_hidden = self.init_hidden(batch_size, device=inputs.device)
# init_hidden = inputs.data.new(batch_size, self.hidden_size).zero_()
# if self.rnn_type == 'LSTM':
# init_hidden = (init_hidden, init_hidden)
hx = init_hidden
hn, cn = [], []
for layer in range(self.num_layers):
input_drop_mask, hidden_drop_mask = None, None
seq_len, batch_size, input_size = inputs.size()
if self.training:
# print('use dropout...')
if layer != 0:
input_drop_mask = torch.zeros(batch_size, input_size, device=inputs.device).fill_(1 - self.dropout)
# 在相同的设备上创建一个和inputs数据类型相同的tensor
# input_drop_mask = inputs.data.new(batch_size, input_size).fill_(1 - self.dropout)
input_drop_mask = torch.bernoulli(input_drop_mask)
input_drop_mask = torch.div(input_drop_mask, (1 - self.dropout))
input_drop_mask = input_drop_mask.unsqueeze(-1).expand((-1, -1, seq_len)).permute((2, 0, 1))
inputs = inputs * input_drop_mask
hidden_drop_mask = torch.zeros(batch_size, self.hidden_size, device=inputs.device).fill_(1 - self.dropout)
# hidden_drop_mask = inputs.data.new(batch_size, self.hidden_size).fill_(1 - self.dropout)
hidden_drop_mask = torch.bernoulli(hidden_drop_mask) # 以输入值为概率p输出1,(1-p)输出0
hidden_drop_mask = torch.div(hidden_drop_mask, (1 - self.dropout)) # 保证训练和预测时期望值一致
# print('data is in cuda: ', inputs.device, mask.device, hx.device, hidden_drop_mask.device)
out_fw, (hn_f, cn_f) = self._forward_mask(cell=self.fw_cells[layer], inputs=inputs, lens=seq_lens, init_hidden=hx, drop_mask=hidden_drop_mask)
# print(out_fw.shape, hn_f.shape, cn_f.shape)
out_bw, hn_b, cn_b = None, None, None
if self.bidirectional:
out_bw, (hn_b, cn_b) = self._backward_mask(cell=self.bw_cells[layer], inputs=inputs, lens=seq_lens, init_hidden=hx, drop_mask=hidden_drop_mask)
# print(out_bw.shape, hn_b.shape, cn_b.shape)
hn.append(torch.cat((hn_f, hn_b), dim=1) if self.bidirectional else hn_f)
cn.append(torch.cat((cn_f, cn_b), dim=1) if self.bidirectional else cn_f)
inputs = torch.cat((out_fw, out_bw), dim=2) if self.bidirectional else out_fw
# print('input shape:', inputs.shape) # (6, 3, 10)
hn = torch.stack(tuple(hn), dim=0)
cn = torch.stack(tuple(cn), dim=0)
output = inputs.transpose(0, 1) if self.batch_first else inputs
return output, (hn, cn)
# 默认inputs: [seq_len, batch_size, input_size]
# batch_first: [batch_size, seq_len, input_size]
# def forward(self, inputs, init_hidden=None):
# assert torch.is_tensor(inputs) and inputs.dim() == 3
#
# if self.batch_first:
# inputs = inputs.permute(1, 0, 2)
#
# batch_size = inputs.size(1)
# if init_hidden is None:
# init_hidden = self.init_hidden(batch_size)
#
# hx = init_hidden
#
# hn, cn = [], []
# for layer in range(self.num_layers):
# input_drop_mask, hidden_drop_mask = None, None
# seq_len, batch_size, input_size = inputs.size()
# if self.training:
# print('use dropout...')
# if layer != 0:
# input_drop_mask = torch.empty(batch_size, input_size).fill_(1 - self.dropout)
# input_drop_mask = torch.bernoulli(input_drop_mask)
# input_drop_mask = torch.div(input_drop_mask, (1 - self.dropout))
# input_drop_mask = input_drop_mask.unsqueeze(-1).expand((-1, -1, seq_len)).permute((2, 0, 1))
# inputs = inputs * input_drop_mask
#
# hidden_drop_mask = torch.empty(batch_size, self.hidden_size).fill_(1 - self.dropout)
# hidden_drop_mask = torch.bernoulli(hidden_drop_mask) # 以输入值为概率p输出1,(1-p)输出0
# hidden_drop_mask = torch.div(hidden_drop_mask, (1 - self.dropout)) # 保证训练和预测时期望值一致
#
# out_fw, (hn_f, cn_f) = RNNEncoder._forward(cell=self.fw_cells[layer], inputs=inputs, init_hidden=hx, drop_mask=hidden_drop_mask)
# # print(out_fw.shape, hn_f.shape, cn_f.shape)
#
# out_bw, hn_b, cn_b = None, None, None
# if self.bidirectional:
# out_bw, (hn_b, cn_b) = RNNEncoder._backward(cell=self.bw_cells[layer], inputs=inputs, init_hidden=hx, drop_mask=hidden_drop_mask)
# # print(out_bw.shape, hn_b.shape, cn_b.shape)
#
# hn.append(torch.cat((hn_f, hn_b), dim=1) if self.bidirectional else hn_f)
# cn.append(torch.cat((cn_f, cn_b), dim=1) if self.bidirectional else cn_f)
#
# inputs = torch.cat((out_fw, out_bw), dim=2) if self.bidirectional else out_fw
# # print('input shape:', inputs.shape) # (6, 3, 10)
#
# hn = torch.stack(tuple(hn), dim=0)
# cn = torch.stack(tuple(cn), dim=0)
#
# output = inputs.permute((1, 0, 2)) if self.batch_first else inputs
#
# return output, (hn, cn)
if __name__ == '__main__':
# [batch_size, seq_len, input_size]
inputs = torch.rand(3, 6, 20)
mask = torch.zeros(3, 6)
mask[0, :3] = torch.ones(3)
mask[1, :2] = torch.ones(2)
lstm = RNNEncoder(input_size=20, hidden_size=100, num_layers=3, batch_first=True, bidirectional=True, dropout=0.2)
# h0, c0 = torch.randn(3, 10), torch.randn(3, 10)
# out, (hn, cn) = lstm(inputs, (h0, c0))
out, (hn, cn) = lstm(inputs, mask)
print(out.shape) # [6, 3, 20]
print(hn.shape, cn.shape) # [2, 3, 20] [2, 3, 20]
|
[
"ncu151wlz@qq.com"
] |
ncu151wlz@qq.com
|
4e98ab90157e2164c540617da24de059870e5e34
|
3071ce441681abbfea11c9cc5a5ba853aecff2d2
|
/game_over.py
|
56bb93d1293913866d464c7cc38a5f883a36e269
|
[] |
no_license
|
xodapi/python_learning
|
d75ffc7c8312f52be3c5123fd003537943d75fe7
|
afd7ff56b8ccdfea42ccb3dc52ef25dfd44d3d68
|
refs/heads/master
| 2016-09-11T04:58:55.524656
| 2015-04-21T10:51:28
| 2015-04-21T10:51:28
| 28,742,488
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20
|
py
|
print('Game over')
|
[
"faneropi@gmail.com"
] |
faneropi@gmail.com
|
5d8cd2c7638647e1cdd05a42eaf90febc0a95726
|
5ebe757ed6a2a339525c349922a3218b9d2b3f94
|
/lstm-language-model/preprocess.py
|
3930b2bf16a8a4194f5abff4da1756b269b70a3c
|
[] |
no_license
|
esun0087/self_parser
|
aa3ef6103c470c5f85627fe59e6d82239bcd63d6
|
cae1f45be1c954839980334e16d343bfae27dbe6
|
refs/heads/master
| 2020-03-21T10:27:18.247597
| 2018-08-07T08:26:29
| 2018-08-07T08:26:29
| 138,451,449
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,516
|
py
|
import torch
import argparse
import data
def preprocess(opt):
print('Begin preprocessing')
train_dataset = data.DataSet(opt.train_data, display_freq=opt.display_freq)
train_dataset.max_dict = opt.dict_size
train_dataset.build_dict()
print('Save training data')
torch.save(train_dataset, opt.train_data + '.prep.train.pt')
val_dataset = data.DataSet(opt.val_data, display_freq=opt.display_freq)
val_dataset.change_dict(train_dataset.dictionary)
print('Save validation data')
torch.save(val_dataset, opt.val_data + '.prep.val.pt')
print('Preprocessing done')
if __name__ == '__main__':
parser = argparse.ArgumentParser('Preprocessing')
parser.add_argument('--train_data', type=str, default='data/penn/train.txt',
help='Training data path')
parser.add_argument('--val_data', type=str, default='data/penn/valid.txt',
help='Validation data path')
parser.add_argument('--dict_size', type=int, default=50000,
help='Reduce dictionary if overthis size')
parser.add_argument('--display_freq', type=int, default=100000,
help='Display progress every this number of sentences, 0 for no diplay')
parser.add_argument('--max_len', type=int, default=100,
help='Maximum length od=f sentence')
parser.add_argument('--trunc_len',type=int, default=100,
help='Truncate the sentence that longer than maximum length')
opt = parser.parse_args()
preprocess(opt)
|
[
"a1a2a3a4a5"
] |
a1a2a3a4a5
|
f555f4db1b57f5a3fdb41deb28cc1c6151bd4ea2
|
6cde76beabb943b4de4ab9f7516ebffca51f6da6
|
/generate.py
|
ea8517289fa84e335e308416701d7c7449ebf6f2
|
[] |
no_license
|
A1exRey/ReflectionOfNAR
|
801bf23eb098f03e663f89f553355f43eb6a7d9e
|
79ed86873322d45cbfc28f98a4e224c961d5bad2
|
refs/heads/main
| 2023-05-13T23:00:54.703916
| 2021-06-01T11:01:02
| 2021-06-01T11:01:02
| 372,795,504
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,600
|
py
|
#!/usr/bin/env python3 -u
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
"""
Translate pre-processed data with a trained model.
"""
import torch
from fairseq import bleu, checkpoint_utils, options, progress_bar, tasks, utils
from fairseq.meters import StopwatchMeter, TimeMeter
import re
from interactive import translate_corpus, parse_head_pruning_descriptors, mask_heads
def main(args):
assert args.path is not None, '--path required for generation!'
assert not args.sampling or args.nbest == args.beam, \
'--sampling requires --nbest to be equal to --beam'
assert args.replace_unk is None or args.raw_text, \
'--replace-unk requires a raw text dataset (--raw-text)'
utils.import_user_module(args)
if args.max_tokens is None and args.max_sentences is None:
args.max_tokens = 12000
print(args)
use_cuda = torch.cuda.is_available() and not args.cpu
# Load dataset splits
task = tasks.setup_task(args)
task.load_dataset(args.gen_subset)
# Set dictionaries
try:
src_dict = getattr(task, 'source_dictionary', None)
except NotImplementedError:
src_dict = None
tgt_dict = task.target_dictionary
# Load ensemble
print('| loading model(s) from {}'.format(args.path))
models, _model_args = checkpoint_utils.load_model_ensemble(
args.path.split(':'),
arg_overrides=eval(args.model_overrides),
task=task,
)
to_prune = {'E': {}, 'A': {}, 'D': {}}
to_prune = {'E': {0: {0, 1, 2, 3, 4, 5, 6, 7}, 1: {2, 4, 7}, 2: {3}}, 'A': {}, 'D': {}}
to_prune = {'E': {0: {0, 1, 2, 3, 4, 5, 6, 7}, 1: {2, 4, 7}, 2: {3, 7}, 4: {0, 3, 7}, 5: {0, 7}, 11: {2, 5}, 6: {0}, 9: {3}, 3: {0}}, 'A': {}, 'D': {}}
to_prune = {'E': {0: {0, 1, 2, 3, 4, 5, 6, 7}, 1: {0, 1, 2, 4, 6, 7}, 2: {3, 5, 7}, 3: {0, 1, 4}, 4: {0, 2, 3, 7}, 5: {0, 7}, 6: {0, 1}, 9: {3}, 11: {2, 5}, 10: {0, 3}}, 'A': {0: {1}}, 'D': {}}
to_prune = {'E': {0: {0, 1, 2, 3, 4, 5, 6, 7}, 1: {0, 1, 2, 4, 6, 7}, 2: {3, 5, 7}, 3: {0, 1, 4, 6}, 4: {0, 2, 3, 7}, 5: {0, 3, 7}, 6: {0, 1, 2}, 9: {1, 3, 6}, 10: {0, 3, 5}, 11: {2, 5, 7}, 8: {3, 4, 5, 7}}, 'A': {0: {1}}, 'D': {}}
to_prune = {'E': {0: {0, 1, 2, 3, 4, 5, 6, 7}, 1: {0, 1, 2, 4, 5, 6, 7}, 2: {0, 3, 5, 7}, 3: {0, 1, 4, 5, 6}, 4: {0, 1, 2, 3, 7}, 5: {0, 2, 3, 4, 5, 7}, 6: {0, 1, 2, 3, 6}, 8: {3, 4, 5, 7}, 9: {1, 3, 6}, 10: {0, 3, 5}, 11: {2, 5, 7}, 7: {2, 4}}, 'A': {0: {1}}, 'D': {}}
to_prune = {'E': {0: {0, 1, 2, 3, 4, 5, 6, 7}, 1: {0, 1, 2, 4, 5, 6, 7}, 2: {0, 2, 3, 4, 5, 6, 7}, 3: {0, 1, 4, 5, 6}, 4: {0, 1, 2, 3, 6, 7}, 5: {0, 2, 3, 4, 5, 7}, 6: {0, 1, 2, 3, 6}, 7: {2, 4, 6}, 8: {0, 3, 4, 5, 6, 7}, 9: {1, 3, 6}, 10: {0, 1, 3, 5, 7}, 11: {0, 2, 5, 7}}, 'A': {0: {1}}, 'D': {0: {1, 4}}}
to_prune = {'E': {0: {0, 1, 2, 3, 4, 5, 6, 7}, 1: {0, 1, 2, 3, 4, 5, 6, 7}, 2: {0, 2, 3, 4, 5, 6, 7}, 3: {0, 1, 3, 4, 5, 6, 7}, 4: {0, 1, 2, 3, 4, 5, 6, 7}, 5: {0, 2, 3, 4, 5, 6, 7}, 6: {0, 1, 2, 3, 6, 7}, 7: {0, 2, 3, 4, 6}, 8: {0, 3, 4, 5, 6, 7}, 9: {1, 2, 3, 6, 7}, 10: {0, 1, 3, 5, 7}, 11: {0, 2, 5, 7}}, 'A': {0: {1}}, 'D': {0: {1, 4}}}
to_prune = {'E': {0: {0, 1, 2, 3, 4, 5, 6, 7}, 1: {0, 1, 2, 3, 4, 5, 6, 7}, 2: {0, 1, 2, 3, 4, 5, 6, 7}, 3: {0, 1, 3, 4, 5, 6, 7}, 4: {0, 1, 2, 3, 4, 5, 6, 7}, 5: {0, 1, 2, 3, 4, 5, 6, 7}, 6: {0, 1, 2, 3, 5, 6, 7}, 7: {0, 2, 3, 4, 6, 7}, 8: {0, 1, 3, 4, 5, 6, 7}, 9: {1, 2, 3, 6, 7}, 10: {0, 1, 2, 3, 5, 7}, 11: {0, 2, 5, 6, 7}}, 'A': {0: {1, 4, 7}}, 'D': {0: {0, 1, 4, 7}}}
#to_prune = {'E': {0: {0, 1, 2, 3, 4, 5, 6, 7}, 1: {0, 1, 2, 3, 4, 5, 6, 7}, 2: {0, 1, 2, 3, 4, 5, 6, 7}, 3: {0, 1, 2, 3, 4, 5, 6, 7}, 4: {0, 1, 2, 3, 4, 5, 6, 7}, 5: {0, 1, 2, 3, 4, 5, 6, 7}, 6: {0, 1, 2, 3, 4, 5, 6, 7}, 7: {0, 2, 3, 4, 5, 6, 7}, 8: {0, 1, 2, 3, 4, 5, 6, 7}, 9: {1, 2, 3, 4, 5, 6, 7}, 10: {0, 1, 2, 3, 5, 7}, 11: {0, 2, 5, 6, 7}}, 'A': {0: {0, 1, 2, 3, 4, 5, 6, 7}}, 'D': {0: {0, 1, 4, 7}}}
# Optimize ensemble for generation
for model in models:
mask_heads(model, to_prune, False)
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if args.fp16:
model.half()
if use_cuda:
model.cuda()
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
align_dict = utils.load_align_dict(args.replace_unk)
# Load dataset (possibly sharded)
itr = task.get_batch_iterator(
dataset=task.dataset(args.gen_subset),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=utils.resolve_max_positions(
task.max_positions(),
*[model.max_positions() for model in models]
),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
num_shards=args.num_shards,
shard_id=args.shard_id,
num_workers=args.num_workers,
).next_epoch_itr(shuffle=False)
# Initialize generator
gen_timer = StopwatchMeter()
generator = task.build_generator(models,args)
# Generate and compute BLEU score
if args.sacrebleu:
scorer = bleu.SacrebleuScorer()
else:
scorer = bleu.Scorer(tgt_dict.pad(), tgt_dict.eos(), tgt_dict.unk())
num_sentences = 0
has_target = True
with progress_bar.build_progress_bar(args, itr) as t:
wps_meter = TimeMeter()
for sample in t:
sample = utils.move_to_cuda(sample) if use_cuda else sample
if 'net_input' not in sample:
continue
prefix_tokens = None
if args.prefix_size > 0:
prefix_tokens = sample['target'][:, :args.prefix_size]
gen_timer.start()
hypos = task.inference_step(generator, models, sample, prefix_tokens)
num_generated_tokens = sum(len(h[0]['tokens']) for h in hypos)
gen_timer.stop(num_generated_tokens)
for i, sample_id in enumerate(sample['id'].tolist()):
has_target = sample['target'] is not None
# Remove padding
src_tokens = utils.strip_pad(sample['net_input']['src_tokens'][i, :], tgt_dict.pad())
target_tokens = None
if has_target:
target_tokens = utils.strip_pad(sample['target'][i, :], tgt_dict.pad()).int().cpu()
# Either retrieve the original sentences or regenerate them from tokens.
if align_dict is not None:
src_str = task.dataset(args.gen_subset).src.get_original_text(sample_id)
target_str = task.dataset(args.gen_subset).tgt.get_original_text(sample_id)
else:
if src_dict is not None:
src_str = src_dict.string(src_tokens, args.remove_bpe)
else:
src_str = ""
if has_target:
target_str = tgt_dict.string(target_tokens, args.remove_bpe, escape_unk=True)
if not args.quiet:
if src_dict is not None:
print('S-{}\t{}'.format(sample_id, src_str))
if has_target:
print('T-{}\t{}'.format(sample_id, target_str))
# Process top predictions
for j, hypo in enumerate(hypos[i][:args.nbest]):
hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
hypo_tokens=hypo['tokens'].int().cpu(),
src_str=src_str,
alignment=hypo['alignment'].int().cpu() if hypo['alignment'] is not None else None,
align_dict=align_dict,
tgt_dict=tgt_dict,
remove_bpe=args.remove_bpe,
)
if not args.quiet:
print('H-{}\t{}\t{}'.format(sample_id, hypo['score'], hypo_str))
print('P-{}\t{}'.format(
sample_id,
' '.join(map(
lambda x: '{:.4f}'.format(x),
hypo['positional_scores'].tolist(),
))
))
if args.print_alignment:
print('A-{}\t{}'.format(
sample_id,
' '.join(map(lambda x: str(utils.item(x)), alignment))
))
# Score only the top hypothesis
if has_target and j == 0:
if align_dict is not None or args.remove_bpe is not None:
# Convert back to tokens for evaluation with unk replacement and/or without BPE
target_tokens = tgt_dict.encode_line(target_str, add_if_not_exist=True)
if hasattr(scorer, 'add_string'):
if args.dehyphenate:
print('dehyphenating')
target_str = dehyphenate(target_str)
hypo_str = dehyphenate(hypo_str)
scorer.add_string(target_str, hypo_str)
else:
scorer.add(target_tokens, hypo_tokens)
wps_meter.update(num_generated_tokens)
t.log({'wps': round(wps_meter.avg)})
num_sentences += sample['nsentences']
print('| Translated {} sentences ({} tokens) in {:.1f}s ({:.2f} sentences/s, {:.2f} tokens/s)'.format(
num_sentences, gen_timer.n, gen_timer.sum, num_sentences / gen_timer.sum, 1. / gen_timer.avg))
if has_target:
print('| Generate {} with beam={}: {}'.format(args.gen_subset, args.beam, scorer.result_string()))
return scorer
def cli_main():
parser = options.get_generation_parser()
args = options.parse_args_and_arch(parser)
main(args)
def dehyphenate(sent):
return re.sub(r'(\S)-(\S)', r'\1 ##AT##-##AT## \2', sent).replace('##AT##', '@')
if __name__ == '__main__':
cli_main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
a60cce92b01defcbf4760f93cdbc9f636e0e3cef
|
1503bb33834c463657977765e821620f189a4685
|
/p007.py
|
79f340ac96bd0e0708ffbde2fc2002a0b35e7944
|
[] |
no_license
|
JackPound/Euler-Problems
|
94a2ff36d92cc28c4a23586847698d33710f24b0
|
fac5975d4fa323b3f992daedc12aec1246dbdb82
|
refs/heads/master
| 2020-03-22T20:53:26.655150
| 2018-07-12T22:51:57
| 2018-07-12T22:51:57
| 140,639,403
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 555
|
py
|
# By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13.
# What is the 10 001st prime number?
def is_prime(number_to_check):
prime = True
for x in range (2, number_to_check):
if number_to_check % x == 0:
prime = False
break
return prime
def prime_position(at_position):
prime_list = []
count = 2
while len(prime_list) < at_position:
if is_prime(count):
prime_list.append(count)
count += 1
else:
count += 1
print(prime_list[-1])
prime_position(10001)
|
[
"jackpound@live.com"
] |
jackpound@live.com
|
4edfcf8e234bf582b8a3e06752421dff27a5d562
|
679b923d6ba62d00ab5ad8aef3f82f42df71a58c
|
/Server_Kapfumvuti_Patel.py
|
3a730631bb548c6f480757df43a85d6b5b03bea9
|
[] |
no_license
|
GurenMarkV/Go-Back-N-Protocol
|
957086dbca5e4c60ed18ff2ee418016cb102e8f6
|
949c3db7bd38cc9e09a847853bc45531517a3620
|
refs/heads/master
| 2020-03-18T22:30:12.789811
| 2018-05-29T20:21:56
| 2018-05-29T20:21:56
| 135,348,768
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,023
|
py
|
# Project 1: Implementation of Go-Back-N Protocol
# Group Member: Daksh Patel ID: 104 030 031
# Group Member: Nyasha Kapfumvuti ID: 104 121 166
# Date: Mar 30th, 2018
import socket
import numpy
import time
import json
from random import randint
acked = [] # acknowledged packets
unAcked = [] # unacknowledged packets
ticker = 0 # 0.2 loss rate = 1/5 packets get "lost" => placed in unAcked
lostItem = 5 # every 5th item gets placed in unacked
returnVals = [] # array of values to be returned as acks/unacks
timer = time.localtime
packets = []
packet = ''
server_address = ('localhost', 10000)
serverSocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
serverSocket.bind(server_address)
serverSocket.listen(1)
print('The server is ready to receive')
while True:
print('waiting for a connection')
connection, client_address = serverSocket.accept()
try:
print('client connected:', client_address)
while True:
data = connection.recv(1024) # data arrives as a string. Need to convert this back to an array
newPack = int(data)
if(randint(0,5) == 5):
print('packet was lost/corrupted')
connection.sendto(str(newPack).encode(), server_address)
else:
if newPack not in acked:
acked.append(newPack)
print('recieved sequence # ', str(newPack), ' successfully. Sending ack')
connection.sendto(str(newPack).encode(), server_address)
print('sent')
ticker += 1 # loss rate leads to every nth item getting lost
if data:
# send acknowledgement
# connection.sendto(str(newPack).encode(), server_address)
print('')
else:
break
finally:
connection.close()
print(acked)
|
[
"noreply@github.com"
] |
noreply@github.com
|
8177accba9ea1009914fc04bc9522e187b19a4bc
|
82008bbe06f77d17898565e20d08bf34bf28c313
|
/test/functional/wallet_scriptaddress2.py
|
4c431a5ed81f6d5e0aac1749f833e51d0f3e9782
|
[
"MIT"
] |
permissive
|
hantobi-europe/aox
|
6e6884c852fcb08f8c5d89384c9ae60746d3f149
|
74cd6d07a0d4058648dbb5bc42d829a04a0e5327
|
refs/heads/main
| 2023-02-07T20:36:36.487504
| 2020-12-28T18:11:13
| 2020-12-28T18:11:13
| 323,902,355
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,968
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test new aox multisig prefix functionality.
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
)
class ScriptAddress2Test(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.setup_clean_chain = True
self.extra_args = [['-addresstype=legacy', '-deprecatedrpc=accounts', '-txindex=1'], [], ['-txindex=1']]
def setup_network(self, split=False):
self.setup_nodes()
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 0)
self.sync_all()
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
cnt = self.nodes[0].getblockcount()
# Mine some blocks
self.nodes[1].generate(101)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 101):
raise AssertionError("Failed to mine 100 blocks")
addr = self.nodes[0].getnewaddress()
addr2 = self.nodes[0].getnewaddress()
multisig_addr = self.nodes[0].addmultisigaddress(2, [addr, addr2], "multisigaccount")['address']
assert_equal(multisig_addr[0], 'Q')
# Send to a new multisig address
txid = self.nodes[1].sendtoaddress(multisig_addr, 1)
self.nodes[1].generate(101)
self.sync_all()
tx = self.nodes[0].getrawtransaction(txid, 1)
dest_addrs = [tx["vout"][0]['scriptPubKey']['addresses'][0],
tx["vout"][1]['scriptPubKey']['addresses'][0]]
assert(multisig_addr in dest_addrs)
# Spend from the new multisig address
addr3 = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr3, 0.8)
self.nodes[0].generate(2)
self.sync_all()
assert(self.nodes[0].getbalance("*", 1) < 0.2)
assert(self.nodes[1].listtransactions()[-1]['address'] == addr3)
# Send to an old multisig address. The api addmultisigaddress
# can only generate a new address so we manually compute
# multisig_addr_old beforehand using an old client.
priv_keys = ["cU7eeLPKzXeKMeZvnEJhvZZ3tLqVF3XGeo1BbM8dnbmV7pP3Qg89",
"cTw7mRhSvTfzqCt6MFgBoTBqwBpYu2rWugisXcwjv4cAASh3iqPt"]
addrs = ["mj6gNGRXPXrD69R5ApjcsDerZGrYKSfb6v",
"mqET4JA3L7P7FoUjUP3F6m6YsLpCkyzzou"]
self.nodes[0].importprivkey(priv_keys[0])
self.nodes[0].importprivkey(priv_keys[1])
multisig_addr_new = self.nodes[0].addmultisigaddress(2, addrs, "multisigaccount2")['address']
assert_equal(multisig_addr_new, 'QZ974ZrPrmqMmm1PSVp4m8YEgo3bCQZBbe')
multisig_addr_old = "2N5nLwYz9qfnGdaFLpPn3gS6oYQbmLTWPjq"
# Let's send to the old address. We can then find it in the
# new address with the new client. So basically the old
# address and the new one are the same thing.
txid = self.nodes[1].sendtoaddress(multisig_addr_old, 1)
self.nodes[1].generate(1)
self.sync_all()
tx = self.nodes[2].getrawtransaction(txid, 1)
dest_addrs = [tx["vout"][0]['scriptPubKey']['addresses'][0],
tx["vout"][1]['scriptPubKey']['addresses'][0]]
assert(multisig_addr_new in dest_addrs)
assert(multisig_addr_old not in dest_addrs)
# Spend from the new multisig address
addr4 = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr4, 0.8)
self.nodes[0].generate(2)
self.sync_all()
assert(self.nodes[0].getbalance("*", 1) < 0.4)
assert(self.nodes[1].listtransactions()[-1]['address'] == addr4)
if __name__ == '__main__':
ScriptAddress2Test().main()
|
[
"hantobieurope@gmail.com"
] |
hantobieurope@gmail.com
|
d670fc71f610fb31b49e00a8c5c71b54ca6ed4ef
|
83a59e255f681e85828399c6c2323f2cf0997e10
|
/kibble/scanners/scanners/git-evolution.py
|
8f4a83698faccdae147d2985f32bfb605884f6ff
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
kaxil/kibble
|
f4ab6f1039086adcb37c544c60bbbc27e8538128
|
96959acec06fed4d91d5da73fee1aa1200ffbb3c
|
refs/heads/main
| 2023-02-01T03:14:53.813091
| 2020-12-16T23:04:45
| 2020-12-16T23:04:45
| 320,881,184
| 1
| 0
|
Apache-2.0
| 2020-12-12T17:04:54
| 2020-12-12T17:04:54
| null |
UTF-8
|
Python
| false
| false
| 8,447
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Git Evolution scanner """
import calendar
import datetime
import hashlib
import os
import subprocess
import time
from kibble.configuration import conf
from kibble.scanners.utils import sloc
title = "Git Evolution Scanner"
version = "0.1.0"
def accepts(source):
""" Do we accept this source? """
if source["type"] == "git":
return True
# There are cases where we have a github repo, but don't wanna analyze the code, just issues
if source["type"] == "github" and source.get("issuesonly", False) == False:
return True
return False
def get_first_ref(gpath):
try:
return subprocess.check_output(
"cd %s && git log `git rev-list --max-parents=0 HEAD` --pretty=format:%%ct"
% gpath,
shell=True,
)
except: # pylint: disable=bare-except
print("Could not get first ref, exiting!")
return None
def acquire(kibble_bit, source):
source["steps"]["evolution"] = {
"time": time.time(),
"status": "Evolution scan started at "
+ time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()),
"running": True,
"good": True,
}
kibble_bit.update_source(source)
def release(kibble_bit, source, status, exception=None, good=False):
source["steps"]["evolution"] = {
"time": time.time(),
"status": status,
"running": False,
"good": good,
}
if exception:
source["steps"]["evolution"].update({"exception": exception})
kibble_bit.update_source(source)
def check_branch(gpath, date, branch):
try:
subprocess.check_call(
'cd %s && git rev-list -n 1 --before="%s" %s' % (gpath, date, branch),
shell=True,
)
return True
except: # pylint: disable=bare-except
return False
def checkout(gpath, date, branch):
# print("Ready to cloc...checking out %s " % date)
try:
ref = (
subprocess.check_output(
'cd %s && git rev-list -n 1 --before="%s" "%s"' % (gpath, date, branch),
shell=True,
stderr=subprocess.STDOUT,
)
.decode("ascii", "replace")
.strip()
)
subprocess.check_output(
"cd %s && git checkout %s -- " % (gpath, ref),
shell=True,
stderr=subprocess.STDOUT,
)
except subprocess.CalledProcessError as err:
print(err.output)
def find_branch(date, gpath):
try:
os.chdir(gpath)
subprocess.check_call(
'cd %s && git rev-list -n 1 --before="%s" master' % (gpath, date),
shell=True,
stderr=subprocess.DEVNULL,
)
return "master"
except: # pylint: disable=bare-except
os.chdir(gpath)
try:
return (
subprocess.check_output(
"cd %s && git rev-parse --abbrev-ref HEAD" % gpath,
shell=True,
stderr=subprocess.DEVNULL,
)
.decode("ascii", "replace")
.strip()
.strip("* ")
)
except: # pylint: disable=bare-except
# print("meh! no branch")
return None
def scan(kibble_bit, source):
rid = source["sourceID"]
rootpath = "%s/%s/git" % (
conf.get("scanner", "scratchdir"),
source["organisation"],
)
gpath = os.path.join(rootpath, rid)
gname = source["sourceID"]
kibble_bit.pprint("Doing evolution scan of %s" % gname)
inp = get_first_ref(gpath)
if inp:
ts = int(inp.split()[0])
ts -= ts % 86400
date = time.strftime("%Y-%b-%d 0:00", time.gmtime(ts))
# print("Starting from %s" % date)
now = time.time()
rid = source["sourceID"]
url = source["sourceURL"]
rootpath = "%s/%s/git" % (
conf.get("scanner", "scratchdir"),
source["organisation"],
)
gpath = os.path.join(rootpath, rid)
if source["steps"]["sync"]["good"] and os.path.exists(gpath):
acquire(kibble_bit, source)
branch = find_branch(date, gpath)
if not branch:
release(
source,
"Could not do evolutionary scan of code",
"No default branch was found in this repository",
)
return
branch_exists = check_branch(gpath, date, branch)
if not branch_exists:
kibble_bit.pprint("Not trunk either (bad repo?), skipping")
release(
source,
"Could not do evolutionary scan of code",
"No default branch was found in this repository",
)
return
try:
d = time.gmtime(now)
year = d[0]
quarter = d[1] - (d[1] % 3)
if quarter <= 0:
quarter += 12
year -= 1
while now > ts:
pd = (
datetime.datetime(year, quarter, 1)
.replace(tzinfo=datetime.timezone.utc)
.timetuple()
)
date = time.strftime("%Y-%b-%d 0:00", pd)
unix = calendar.timegm(pd)
# Skip the dates we've already processed
dhash = hashlib.sha224(
(source["sourceID"] + date).encode("ascii", "replace")
).hexdigest()
found = kibble_bit.exists("evolution", dhash)
if not found:
checkout(gpath, date, branch)
kibble_bit.pprint(
"Running cloc on %s (%s) at %s"
% (gname, source["sourceURL"], date)
)
languages, codecount, comment, blank, years, cost = sloc.count(
gpath
)
js = {
"time": unix,
"sourceID": source["sourceID"],
"sourceURL": source["sourceURL"],
"organisation": source["organisation"],
"loc": codecount,
"comments": comment,
"blank": blank,
"years": years,
"cost": cost,
"languages": languages,
}
kibble_bit.index("evolution", dhash, js)
quarter -= 3
if quarter <= 0:
quarter += 12
year -= 1
# decrease month by 3
now = time.mktime(datetime.date(year, quarter, 1).timetuple())
except Exception as e:
kibble_bit.pprint(e)
release(
kibble_bit,
source,
"Evolution scan failed at "
+ time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()),
str(e),
)
return
release(
kibble_bit,
source,
"Evolution scan completed at "
+ time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()),
good=True,
)
|
[
"noreply@github.com"
] |
noreply@github.com
|
e0c97f958b39a77c224ebe75cd5b1fe26876f2f1
|
0c265021768e72b91b40d77e0c7d78fcf0e70935
|
/Recursion/Module1/SumOfNnumbers.py
|
6ea101cae243b483a2db6144bc28d7b927e62a97
|
[] |
no_license
|
pawarvishal/cninjads_python_problems
|
0b49fb987cb3b8571ff0fe2e6f617174d36fc7d6
|
380fea5e9e507087dbb5743a30770cae2d9bc0ae
|
refs/heads/master
| 2020-12-12T12:33:34.759314
| 2020-02-02T06:24:53
| 2020-02-02T06:24:53
| 234,127,793
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 191
|
py
|
# Calculate sum of n numbers
def sum_n(n):
if n == 0:
return 0
small_output = sum_n(n-1)
output = small_output + n
return output
num = int(input())
print(sum_n(num))
|
[
"openstack.vishal@gmail.com"
] |
openstack.vishal@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.