blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 246 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b31cc65cdb5b6ea03050d5d5c46b68e09160387e | 850341339b20e8f31bd9464f8029452fb104ee03 | /pymc3_hierarchical_tutorial/scripts/clean_raw_data.py | 8120eeb1d223704fc0a114c32dc19e6bb4e3ce52 | [] | no_license | ariutta/pymc3_hierarchical_tutorial_parent | 7dde1c399e83a94b50082887cac4601ed2487ba4 | 03444649beecd91c1c38ca9a58b6fa33fc1ab4e8 | refs/heads/master | 2021-08-07T19:38:08.508832 | 2019-09-12T23:09:36 | 2019-09-12T23:09:36 | 205,431,021 | 1 | 0 | null | 2020-07-28T04:44:00 | 2019-08-30T17:48:52 | Nix | UTF-8 | Python | false | false | 1,302 | py | #!/usr/bin/env python3
"""
Data munging code taken almost verbatim from
<https://github.com/fonnesbeck/multilevel_modeling>
USAGE:
python3 clean_raw_data.py [srrs2.dat] [cty.dat] > [clean_data.tsv]
"""
import sys
import pandas as pd
import numpy as np
srrs2_path = sys.argv[1]
cty_path = sys.argv[2]
srrs2 = pd.read_csv(srrs2_path)
srrs2.columns = srrs2.columns.map(str.strip)
srrs2.rename(columns={'cntyfips': 'ctfips'}, inplace=True)
cty = pd.read_csv(cty_path)
data = srrs2.merge(cty[['stfips', 'ctfips', 'Uppm']],
on=['stfips', 'ctfips'])
data.county = data.county.str.strip().str.replace(' ', '_').str.replace('.', '')
data['state_county'] = data.state + '-' + data.county
data['is_basement'] = (data.floor == 0)
data.drop_duplicates(subset=['idnum'], inplace=True)
data.county = data.county.apply(str.strip)
data.dropna(subset=['county'], inplace=True)
# counties = data.state_county.unique()
# county_lookup = dict(zip(counties, range(len(counties))))
# data['county_idx'] = data.state_county.replace(county_lookup)
data.rename(columns={'Uppm': 'county_uranium', 'activity': 'radon'},
inplace=True)
(data[['state', 'state_county',
# 'county_idx',
'county_uranium', 'is_basement', 'radon']]
.to_csv(sys.stdout, sep='\t', index=False))
| [
"git@andersriutta.com"
] | git@andersriutta.com |
f7e5bfb4b78221a0682bbdf5be4a896892b59ecf | a27fc866f01dad5aae92e0fbffcbc4ebbf7c8d3a | /cancer/urls.py | c77f9d6cb558a03c6b6859f02438d2a559cfadff | [] | no_license | skeerthiswaoop4301/Cancer_prediction | 6f90a5f9366154d51ccf62597103059bec9f054f | 067db80bf93515f83dab33cb0af67bab9d2d6ddf | refs/heads/master | 2023-06-29T05:01:56.671336 | 2021-08-03T14:34:04 | 2021-08-03T14:34:04 | 392,345,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('detect.urls'))
]
| [
"vigneshdugyala980@gmail.com"
] | vigneshdugyala980@gmail.com |
30e695755c1dd9210013905b1f2924907d50dd99 | cf14b6ee602bff94d3fc2d7e712b06458540eed7 | /gs64/gs64/urls.py | 534e0ccf1552e3ee0aed99ffb66b12f599c32ff3 | [] | no_license | ManishShah120/Learning-Django | 8b0d7bfe7e7c13dcb71bb3d0dcdf3ebe7c36db27 | 8fe70723d18884e103359c745fb0de5498b8d594 | refs/heads/master | 2023-03-29T09:49:47.694123 | 2021-03-28T16:04:34 | 2021-03-28T16:04:34 | 328,925,596 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | from django.contrib import admin
from django.urls import path
from enroll import views
urlpatterns = [
path('admin/', admin.site.urls),
path('signup/', views.sign_up, name='signup'),
path('login/', views.user_login, name='login'),
path('profile/', views.user_profile, name='profile'),
path('logout/', views.user_logout, name='logout'),
path('changepass/', views.user_change_pass, name='changepass'),
path('changepass1/', views.user_change_pass1, name='changepass1'),
]
| [
"mkshah141@gmail.com"
] | mkshah141@gmail.com |
b201783f01576c2ee718c08177725bdf596ad82e | 31cedd498711cec95bc045ed4f441899659f363c | /app/core/tests/test_admin.py | 18aa66715b3f29076274373923e28b773240098e | [] | no_license | jsewai/recipi-api-app | d02207e38f99fd3ff15f242f42193c0cb9a41d75 | 5fb62535d80cdedbefd9d648a1c649ce2f360880 | refs/heads/master | 2022-04-26T13:18:22.431729 | 2020-01-30T22:04:27 | 2020-01-30T22:04:27 | 229,966,348 | 0 | 0 | null | 2022-04-22T22:54:48 | 2019-12-24T15:43:45 | Python | UTF-8 | Python | false | false | 1,334 | py | from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='admin@test.com',
password='test123'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='test@test.com',
password='testqqq',
name='Test user full name'
)
def test_users_listed(self):
"""Test that users are listed on user page"""
url = reverse('admin:core_user_changelist')
#/admin/core/user/
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
"""Test that the user edit page works"""
url = reverse('admin:core_user_change', args=[self.user.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
"""Test that the create user page works"""
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| [
"junyasewai@gmail.com"
] | junyasewai@gmail.com |
75de8f81a41930929ed9d5cba1698f1a0e450792 | cbed6cc260259616fa786d55f1587ee7aa463a49 | /c_to_f.py | af2c3b8358e53cc8d11e88529e26ade0b448fd2a | [] | no_license | IamMasonL/celsius-to-fahrenheit | 449bd86e8ac3fd544e3853710c6dfc578ca642f2 | 310f6e323b704f20922bffde8397baff2a836d1c | refs/heads/main | 2023-06-07T06:26:46.375519 | 2021-06-30T04:10:11 | 2021-06-30T04:10:11 | 381,569,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 126 | py | celsius = input("請輸入攝氏溫度: ")
fahrenheit = (float(celsius) * 9 / 5 + 32)
print("華氏溫度為: ", fahrenheit)
| [
"mason19950217@gmail.com"
] | mason19950217@gmail.com |
71c684ff83b63d36e51b76bc98514f36cd96d176 | 064c190e9d0dcf0de8c3d70defe94b7718566bf0 | /flaskr/blog.py | d7a1c37b93ab568748b3ef9082246feaf05a7d80 | [] | no_license | caiorrs/flask-tutorial | 7fac1a8c91957d7511d6974bf6226f7b20411ac6 | 7790624ea5f26c6a1ecf46e229c101bdda41fbee | refs/heads/master | 2020-05-24T12:11:27.473458 | 2019-05-17T18:39:28 | 2019-05-17T18:39:28 | 187,262,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,555 | py | from flask import (
Blueprint, flash, g, redirect, render_template, request, url_for
)
from werkzeug.exceptions import abort
from flaskr.auth import login_required
from flaskr.db import get_db
bp = Blueprint('blog', __name__)
@bp.route('/')
def index():
db = get_db()
posts = db.execute(
'SELECT p.id, title, body, created, author_id, username'
' FROM post p JOIN user u ON p.author_id = u.id'
' ORDER BY created DESC'
).fetchall()
return render_template('blog/index.html', posts=posts)
@bp.route('/create', methods=('GET', 'POST'))
@login_required
def create():
if request.method == 'POST':
title = request.form['title']
body = request.form['body']
error = None
if not title:
error = 'Title is required'
if error is not None:
flash(error)
else:
db = get_db()
db.execute(
'INSERT INTO post (title, body, author_id)'
' VALUES (?, ?, ?)',
(title, body, g.user['id'])
)
db.commit()
return redirect(url_for('blog.index'))
return render_template('blog/create.html')
def get_post(id, check_author=True):
post = get_db().execute(
'SELECT p.id, title, body, created, author_id, username'
' FROM post p JOIN user u ON p.author_id = u.id'
' WHERE p.id = ?',
(id,)
).fetchone()
if post is None:
abort(404, "Post id {0} doesn't exist.".format(id))
if check_author and post['author_id'] != g.user['id']:
abort(403)
return post
@bp.route('/<int:id>/update', methods=('GET', 'POST'))
@login_required
def update(id):
post = get_post(id)
if request.method == 'POST':
title = request.form['title']
body = request.form['body']
error = None
if not title:
error = 'Title is required'
if error is not None:
flash(error)
else:
db = get_db()
db.execute(
'UPDATE post SET title = ?, body = ?'
' WHERE id = ?',
(title, body, id)
)
db.commit()
return redirect(url_for('blog.index'))
return render_template('blog/update.html', post=post)
@bp.route('/<int:id>/delete', methods=('POST',))
@login_required
def delete(id):
get_post(id)
db = get_db()
db.execute('DELETE FROM post WHERE id = ?', (id,))
db.commit()
return redirect(url_for('blog.index'))
| [
"caiorrs@gmail.com"
] | caiorrs@gmail.com |
d941fdc1ef243b8f367d6f6f07b9aea767ceb95e | f789867a1503c5f2917227b77fd8f8d8953e18d6 | /.venv/bin/jupyter | 2db52def8eba69ed9a1395b06a7c86386054194f | [] | no_license | taras0024/dw.project | 9902f75d33d856721c42fcf83cd9cd8e6d470522 | 2fcd5b8f6fef5c0194930d9962d66d59ef8e09e0 | refs/heads/master | 2023-03-23T14:38:46.182456 | 2021-03-12T10:24:57 | 2021-03-12T10:24:57 | 344,570,324 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | #!/home/taras0024/Datawiz.io/les03/blog/.venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from jupyter_core.command import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"shyichuk.taras@gmail.com"
] | shyichuk.taras@gmail.com | |
29fe44c9d25dd16a579cda1752070e50666bac8b | e92bf0ccadde0b968e387b9edfe58383e99506eb | /0x0B-python-input_output/9-add_item.py | 790d43935007bb4366b4a361e6e8b7e67620912e | [] | no_license | Achilik/holbertonschool-higher_level_programming-2 | 047f31c24405457e87ce3bc328f06e713b688ab6 | 63c644b0e2317ef4387f3596dc3dd542b3adcb84 | refs/heads/master | 2023-03-18T06:37:57.894474 | 2020-11-06T17:17:00 | 2020-11-06T17:17:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 526 | py | #!/usr/bin/python3
""" adds all arguments to a list, and then save them to a file """
import sys
import os
save_to_json_file = __import__('7-save_to_json_file').save_to_json_file
load_from_json_file = __import__('8-load_from_json_file').load_from_json_file
filename = 'add_item.json'
if os.path.isfile(filename):
argument_list = load_from_json_file(filename)
else:
argument_list = []
for argument in range(1, len(sys.argv)):
argument_list.append(sys.argv[argument])
save_to_json_file(argument_list, filename)
| [
"exploitpnk@gmail.com"
] | exploitpnk@gmail.com |
429fa0629162224f974ab4f8f324ab6c02c5e645 | c95dcf6b385c6619e1e7213b040db211b767225f | /cnn_experiment/cnn_multiclass.py | dec63c6fe10ab47bbdfff2d2f0ab8965a17a8649 | [] | no_license | agusrdlv/deeplearning | 04a8df673b8ac16341537c9d146d71636661df89 | 9ca16b186a4c520c451d6a748aab3602ac07187d | refs/heads/main | 2023-03-24T04:04:35.585655 | 2021-03-22T21:43:36 | 2021-03-22T21:43:36 | 349,267,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,297 | py | import argparse
import gzip
import json
import logging
import mlflow
import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics import balanced_accuracy_score
from torch.utils.data import DataLoader
from tqdm import tqdm, trange
from .dataset import MeliChallengeDataset
from .utils import PadSequences
logging.basicConfig(
format="%(asctime)s: %(levelname)s - %(message)s",
level=logging.INFO
)
class CNNClassifier(nn.Module):
def __init__(self,
pretrained_embeddings_path,
token_to_index,
n_labels,
dropout=0.3,
vector_size=300,
freeze_embedings=True,
FILTERS_LENGTH = [2, 3, 4],
FILTERS_COUNT = 100,
cats=632):
super().__init__()
with gzip.open(token_to_index, "rt") as fh:
token_to_index = json.load(fh)
embeddings_matrix = torch.randn(len(token_to_index), vector_size)
embeddings_matrix[0] = torch.zeros(vector_size)
with gzip.open(pretrained_embeddings_path, "rt") as fh:
next(fh)
for line in fh:
word, vector = line.strip().split(None, 1)
if word in token_to_index:
embeddings_matrix[token_to_index[word]] =\
torch.FloatTensor([float(n) for n in vector.split()])
self.embeddings = nn.Embedding.from_pretrained(embeddings_matrix,
freeze=freeze_embedings,
padding_idx=0)
self.convs = []
for filter_lenght in FILTERS_LENGTH:
self.convs.append(
nn.Conv1d(vector_size, FILTERS_COUNT, filter_lenght)
)
self.dropout_ = nn.Dropout(dropout)
self.convs = nn.ModuleList(self.convs)
self.fc = nn.Linear(FILTERS_COUNT * len(FILTERS_LENGTH), cats)
self.vector_size = vector_size
@staticmethod
def conv_global_max_pool(x, conv):
return F.relu(conv(x).transpose(1, 2).max(1)[0])
def forward(self, x):
x = self.embeddings(x).transpose(1, 2) # Conv1d takes (batch, channel, seq_len)
x = [self.conv_global_max_pool(x, conv) for conv in self.convs]
x = torch.cat(x, dim=1)
x = F.softmax(self.fc(x))
return x
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--train-data",
help="Path to the the training dataset",
required=True)
parser.add_argument("--token-to-index",
help="Path to the the json file that maps tokens to indices",
required=True)
parser.add_argument("--pretrained-embeddings",
help="Path to the pretrained embeddings file.",
required=True)
parser.add_argument("--language",
help="Language working with",
required=True)
parser.add_argument("--test-data",
help="If given, use the test data to perform evaluation.")
parser.add_argument("--validation-data",
help="If given, use the validation data to perform evaluation.")
parser.add_argument("--embeddings-size",
default=300,
help="Size of the vectors.",
type=int)
parser.add_argument("--dropout",
help="Dropout to apply to each hidden layer",
default=0.3,
type=float)
parser.add_argument("--epochs",
help="Number of epochs",
default=5,
type=int)
parser.add_argument("--FILTERS_LENGTH",
help="filters lenght",
default=[2, 3, 4],
type=int)
args = parser.parse_args()
FILTERS_LENGTH = [2, 3, 4]
pad_sequences = PadSequences(
min_length=max(FILTERS_LENGTH)
)
logging.info("Building training dataset")
train_dataset = MeliChallengeDataset(
dataset_path=args.train_data,
random_buffer_size=2048 # This can be a hypterparameter
)
train_loader = DataLoader(
train_dataset,
batch_size=128, # This can be a hyperparameter
shuffle=False,
collate_fn=pad_sequences,
drop_last=False
)
if args.validation_data:
logging.info("Building validation dataset")
validation_dataset = MeliChallengeDataset(
dataset_path=args.validation_data,
random_buffer_size=1
)
validation_loader = DataLoader(
validation_dataset,
batch_size=128,
shuffle=False,
collate_fn=pad_sequences,
drop_last=False
)
else:
validation_dataset = None
validation_loader = None
if args.test_data:
logging.info("Building test dataset")
test_dataset = MeliChallengeDataset(
dataset_path=args.test_data,
random_buffer_size=1
)
test_loader = DataLoader(
test_dataset,
batch_size=128,
shuffle=False,
collate_fn=pad_sequences,
drop_last=False
)
else:
test_dataset = None
test_loader = None
mlflow.set_experiment(f"diplodatos.{args.language}")
with mlflow.start_run():
logging.info("Starting experiment")
# Log all relevent hyperparameters
mlflow.log_params({
"model_type": "CNN",
"embeddings": args.pretrained_embeddings,
"dropout": args.dropout,
"embeddings_size": args.embeddings_size,
"epochs": args.epochs
})
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
logging.info("Building classifier")
model = CNNClassifier(
pretrained_embeddings_path=args.pretrained_embeddings,
token_to_index=args.token_to_index,
n_labels=train_dataset.n_labels,
dropout=args.dropout,
vector_size=args.embeddings_size,
freeze_embedings=True,
FILTERS_LENGTH = [2, 3, 4],
FILTERS_COUNT = 100, # This can be a hyperparameter
)
model = model.to(device)
loss = nn.CrossEntropyLoss()
optimizer = optim.Adam(
model.parameters(),
lr=1e-3, # This can be a hyperparameter
weight_decay=1e-5 # This can be a hyperparameter
)
logging.info("Training classifier")
for epoch in trange(args.epochs):
model.train()
running_loss = []
for idx, batch in enumerate(tqdm(train_loader)):
optimizer.zero_grad()
data = batch["data"].to(device)
target = batch["target"].to(device)
output = model(data)
loss_value = loss(output, target)
loss_value.backward()
optimizer.step()
running_loss.append(loss_value.item())
mlflow.log_metric("train_loss", sum(running_loss) / len(running_loss), epoch)
if validation_dataset:
logging.info("Evaluating model on validation")
model.eval()
running_loss = []
targets = []
predictions = []
with torch.no_grad():
for batch in tqdm(validation_loader):
data = batch["data"].to(device)
target = batch["target"].to(device)
output = model(data)
running_loss.append(
loss(output, target).item()
)
targets.extend(batch["target"].numpy())
predictions.extend(output.argmax(axis=1).detach().cpu().numpy())
mlflow.log_metric("validation_loss", sum(running_loss) / len(running_loss), epoch)
mlflow.log_metric("validation_bacc", balanced_accuracy_score(targets, predictions), epoch)
if test_dataset:
logging.info("Evaluating model on test")
model.eval()
running_loss = []
targets = []
predictions = []
with torch.no_grad():
for batch in tqdm(test_loader):
data = batch["data"].to(device)
target = batch["target"].to(device)
output = model(data)
running_loss.append(
loss(output, target).item()
)
targets.extend(batch["target"].numpy())
predictions.extend(output.argmax(axis=1).detach().cpu().numpy())
mlflow.log_metric("test_loss", sum(running_loss) / len(running_loss), epoch)
mlflow.log_metric("test_bacc", balanced_accuracy_score(targets, predictions), epoch)
| [
"agustin@invera.com.ar"
] | agustin@invera.com.ar |
025c21a68f6ef201669824d5acdb336f1c8df155 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_fossil.py | 01002db1c7d13f979004d6f2dcdb901e5f504ea7 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | py |
#calss header
class _FOSSIL():
def __init__(self,):
self.name = "FOSSIL"
self.definitions = [u'the shape of a bone, a shell, or a plant or animal that has been preserved in rock for a very long period', u'an old person, especially one who will not accept new ideas']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
c49ebc6508d44273a7fac48410a2ea89d2782c27 | ef942bf633d406e3f467287651fc101720c0adab | /src/shader-playground/glslhelpers/__init__.py | a44051fa5e053287b5f6be3d8e43bc568b2d2625 | [
"MIT"
] | permissive | wroldwiedbwe/pywonderland | c64055b19006f39baf1b4e4230ef90e34b16df68 | f9eceba86cc79f476fe99d3dcbe40906a3273713 | refs/heads/master | 2021-07-31T20:58:28.260743 | 2021-07-15T01:41:56 | 2021-07-15T01:41:56 | 177,250,782 | 0 | 0 | MIT | 2019-03-23T06:02:12 | 2019-03-23T06:02:11 | null | UTF-8 | Python | false | false | 135 | py | from .shader import Shader
from .framebuffer import FrameBuffer
from .texture import create_texture_from_ndarray, create_image_texture
| [
"mathzhaoliang@gmail.com"
] | mathzhaoliang@gmail.com |
c385bafd7dd66cb50043465256666c414a8b9171 | 13ef33cb9067419fae743be1edb46471374c3a64 | /finance/migrations/0002_auto_20161220_1952.py | d1f50a74c4d02eb11d4e8572caa94694a40ed2ba | [] | no_license | andrewidya/littleerp | 8c33ad0ee4dac2a85bea4e540b748a47d61f3886 | 0cf8fb1be8ac3c27304807ed7aac7eb0032c2cb6 | refs/heads/master | 2021-01-24T00:42:26.962248 | 2019-07-22T01:53:58 | 2019-07-22T01:53:58 | 68,295,804 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,092 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('finance', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='invoice',
name='fee',
field=models.DecimalField(decimal_places=3, max_digits=12, blank=True, help_text='Fee value must be decimal, ex: input 12\\% / as 0.12', null=True, verbose_name='Management Fee'),
),
migrations.AddField(
model_name='invoice',
name='pph21',
field=models.DecimalField(decimal_places=2, max_digits=12, blank=True, help_text='PPh21 value must be decimal, ex: input 12\\% / as 0.12', null=True, verbose_name='PPh21'),
),
migrations.AddField(
model_name='invoice',
name='ppn',
field=models.DecimalField(decimal_places=2, max_digits=12, blank=True, help_text='PPN value must be decimal, ex: input 12\\% / as 0.12', null=True, verbose_name='PPN'),
),
]
| [
"andrywidyaputra@gmail.com"
] | andrywidyaputra@gmail.com |
d59efa46b4a4985fee91c373b24141bb3231df57 | a0d9a807f997528d569633086d3ca4bc50bd5574 | /recipes/xorg-proto/all/conanfile.py | ad8e696b97953bdc1beb8ad00c26f01b8d845b66 | [
"MIT"
] | permissive | syoliver/conan-center-index | 017d3ed58c2933139c35c7854bcb2394d61fc876 | ccf1568efd045ca9af92f0e1bb64680b82985efa | refs/heads/master | 2023-03-15T20:18:14.246479 | 2022-09-28T19:24:24 | 2022-09-28T19:24:24 | 249,145,375 | 0 | 0 | MIT | 2022-07-06T11:31:15 | 2020-03-22T08:51:43 | null | UTF-8 | Python | false | false | 4,654 | py | from conan import ConanFile
from conan.tools.files import rmdir, mkdir, save, load
from conans import AutoToolsBuildEnvironment, tools
import contextlib
import glob
import os
import re
import yaml
required_conan_version = ">=1.33.0"
class XorgProtoConan(ConanFile):
name = "xorg-proto"
description = "This package provides the headers and specification documents defining " \
"the core protocol and (many) extensions for the X Window System."
topics = ("conan", "xproto", "header", "specification")
license = "X11"
homepage = "https://gitlab.freedesktop.org/xorg/proto/xorgproto"
url = "https://github.com/conan-io/conan-center-index"
settings = "os", "arch", "compiler", "build_type"
generators = "PkgConfigDeps"
_autotools = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _settings_build(self):
return getattr(self, "settings_build", self.settings)
@property
def _user_info_build(self):
return getattr(self, "user_info_build", self.deps_user_info)
def build_requirements(self):
self.build_requires("automake/1.16.3")
self.build_requires("xorg-macros/1.19.3")
self.build_requires("pkgconf/1.7.4")
if self._settings_build.os == "Windows" and not tools.get_env("CONAN_BASH_PATH"):
self.build_requires("msys2/cci.latest")
def requirements(self):
if hasattr(self, "settings_build"):
self.requires("xorg-macros/1.19.3")
def package_id(self):
# self.info.header_only() would be fine too, but keep the os to add c3i test coverage for Windows.
del self.info.settings.arch
del self.info.settings.build_type
del self.info.settings.compiler
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
@contextlib.contextmanager
def _build_context(self):
if self.settings.compiler == "Visual Studio":
with tools.vcvars(self.settings):
env = {
"CC": "{} cl -nologo".format(self._user_info_build["automake"].compile).replace("\\", "/"),
}
with tools.environment_append(env):
yield
else:
yield
def _configure_autotools(self):
if self._autotools:
return self._autotools
self._autotools = AutoToolsBuildEnvironment(self, win_bash=self._settings_build.os == "Windows")
self._autotools.libs = []
self._autotools.configure(configure_dir=self._source_subfolder)
return self._autotools
def build(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
with self._build_context():
autotools = self._configure_autotools()
autotools.make()
@property
def _pc_data_path(self):
return os.path.join(self.package_folder, "res", "pc_data.yml")
def package(self):
self.copy("COPYING-*", src=self._source_subfolder, dst="licenses")
with self._build_context():
autotools = self._configure_autotools()
autotools.install()
pc_data = {}
for fn in glob.glob(os.path.join(self.package_folder, "share", "pkgconfig", "*.pc")):
pc_text = load(self, fn)
filename = os.path.basename(fn)[:-3]
name = next(re.finditer("^Name: ([^\n$]+)[$\n]", pc_text, flags=re.MULTILINE)).group(1)
version = next(re.finditer("^Version: ([^\n$]+)[$\n]", pc_text, flags=re.MULTILINE)).group(1)
pc_data[filename] = {
"version": version,
"name": name,
}
mkdir(self, os.path.dirname(self._pc_data_path))
save(self, self._pc_data_path, yaml.dump(pc_data))
rmdir(self, os.path.join(self.package_folder, "share"))
def package_info(self):
for filename, name_version in yaml.safe_load(open(self._pc_data_path)).items():
self.cpp_info.components[filename].filenames["pkg_config"] = filename
self.cpp_info.components[filename].libdirs = []
if hasattr(self, "settings_build"):
self.cpp_info.components[filename].requires = ["xorg-macros::xorg-macros"]
self.cpp_info.components[filename].version = name_version["version"]
self.cpp_info.components[filename].set_property("pkg_config_name", filename)
self.cpp_info.components["xproto"].includedirs.append(os.path.join("include", "X11"))
| [
"noreply@github.com"
] | syoliver.noreply@github.com |
99385d068d559cd7f2af650d1277a77ba93f6938 | a82c0a872ad80992305556e9f11b897779c601f3 | /other/mnist_cgan.py | e5191bc0ce274a0a78fc85aa60f98d5a70df8249 | [] | no_license | fishfishin/procrustrean | 8a01164614bb8d40ce016648feb666ac3f69a415 | ab484131b446de4c65c892e71b373599b6311abd | refs/heads/master | 2021-07-23T23:45:25.441333 | 2020-08-28T18:50:03 | 2020-08-28T18:50:03 | 205,784,419 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,754 | py | import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.pyplot import imsave
import matplotlib.gridspec as gridspec
import os
from tensorflow.keras.optimizers import Adam
from collections import OrderedDict
def save(saver, sess, logdir, step):
model_name = 'model'
checkpoint_path = os.path.join(logdir, model_name)
saver.save(sess, checkpoint_path, global_step=step)
def xavier_init(size):
in_dim = size[0]
xavier_stddev = 1./ tf.sqrt(in_dim/2.)
return tf.random.normal(shape=size, stddev = xavier_stddev)
#mnist = input_data.read_data_sets('../../MNIST_data', one_hot=True)
mb_size = 128
Z_dim = 100
unrolling =0
##[kernel h , kernel w, in ,out]
D_W1=tf.Variable(tf.random.normal(shape=[784+10,128],stddev=0.02 ))
D_b1=tf.Variable(tf.zeros(shape=[128]))
D_W2=tf.Variable(xavier_init([128,1]))
D_b2=tf.Variable(tf.zeros(shape=[1]))
para_D = [D_W1, D_W2, D_b1, D_b2]
#########################################################
G_W1 = tf.Variable(xavier_init([Z_dim+10, 128]))
G_b1 = tf.Variable(tf.zeros(shape=[128]))
G_W2 = tf.Variable(xavier_init([128, 784]))
G_b2 = tf.Variable(tf.zeros(shape=[784]))
para_G = [G_W1, G_W2, G_b1, G_b2]
X = tf.compat.v1.placeholder(tf.float32, shape=[None, 28*28])
y = tf.compat.v1.placeholder(tf.float32, shape=[None, 10])
Z = tf.compat.v1.placeholder(tf.float32, shape=[None, Z_dim])
def sample_Z(m, n):
return np.random.uniform(-1., 1., size=[m, n])
def generator(z,lab):
z = tf.concat([z,lab],axis=1)
G_h1 = tf.nn.relu(tf.matmul(z, G_W1) + G_b1)#784
#G_h1 = tf.concat([G_h1,lab],axis=1)
G_h2 = tf.matmul(G_h1, G_W2) + G_b2#(img[0]//4)*(img[1]//4)*128
G_log_prob = G_h2
G_prob = tf.nn.sigmoid(G_log_prob)
return G_prob
def discriminator(x,lab, reuse=tf.AUTO_REUSE):
with tf.variable_scope("discriminator", reuse=reuse):
x = tf.concat([x , lab], axis=1)
D_h1 = tf.matmul(x, D_W1) + D_b1
D_h1 = tf.layers.batch_normalization(D_h1)
D_h1 = tf.nn.leaky_relu(D_h1)
# D_h1 = tf.concat([D_h1 , lab], axis=1)
D_logit = tf.matmul(D_h1, D_W2) + D_b2
D_prob = tf.nn.sigmoid(D_logit)
return D_prob, D_logit
def plot(samples):
fig = plt.figure(figsize=(4, 4))
gs = gridspec.GridSpec(4, 4)
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample.reshape([28,28]), cmap='Greys_r')
return fig
G_sample = generator(Z,y)
#G_sample = tf.reshape(G_sample, shape=[-1,29,29,1])
D_real, D_logit_real = discriminator(X,y)
D_fake, D_logit_fake = discriminator(G_sample,y,reuse=True)
D_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_real, labels=tf.ones_like(D_logit_real)))
D_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_fake, labels=tf.zeros_like(D_logit_fake)))
D_loss = D_loss_real + D_loss_fake
G_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_fake, labels=tf.ones_like(D_logit_fake)))
t = tf.compat.v1.summary.scalar("loss", G_loss+D_loss)
d_loss_sum = tf.compat.v1.summary.scalar("d_loss", D_loss)
g_loss_sum = tf.compat.v1.summary.scalar("g_loss", G_loss)
summary_writer = tf.compat.v1.summary.FileWriter('snapshots/', graph=tf.compat.v1.get_default_graph())
cgan_d = tf.compat.v1.train.AdamOptimizer(0.001, beta1=0.5)
cgan_g =tf.compat.v1.train.AdamOptimizer(0.001, beta1=0.5)
G_solver = cgan_g.minimize(loss =G_loss , var_list=para_G)
D_solver = cgan_g.minimize(loss =D_loss , var_list=para_D)
mnist = input_data.read_data_sets('../../MNIST_data', one_hot=True)
sess = tf.compat.v1.Session()
initial= tf.compat.v1.global_variables_initializer()
sess.run(initial)
if not os.path.exists('out/'):
os.makedirs('out/')
if not os.path.exists('snapshots/'):
os.makedirs('snapshots/')
saver = tf.compat.v1.train.Saver(var_list=tf.compat.v1.global_variables(), max_to_keep=50)
ii = 0
n = 16
lab_sam = mnist.train.labels[20:20+n]
#lab_sam=lab_sam*0.99
for it in range(1):
with tf.device('/cpu:0'):
for i in range(int(100000)):
#X_mb, x_label = databatchset(mb_size)
X_mb, x_label = mnist.train.next_batch(mb_size)
_, D_loss_curr ,d_loss_sum_value= sess.run([D_solver, D_loss,d_loss_sum], feed_dict={X: X_mb, y:x_label,Z:sample_Z(mb_size, Z_dim)})
_, G_loss_curr,g_loss_sum_value = sess.run([G_solver, G_loss,g_loss_sum], feed_dict={Z: sample_Z(mb_size, Z_dim),y:x_label})
if i % 500 == 0:
summary_writer.add_summary(d_loss_sum_value, i)
summary_writer.add_summary(g_loss_sum_value, i)
samples = sess.run(G_sample, feed_dict={ Z: sample_Z(n, Z_dim),y:lab_sam})
fig = plot(samples)
plt.savefig('out/{}.png'.format(str(ii).zfill(3)), bbox_inches='tight')
ii += 1
print('Iter: {}'.format(i))
print('D loss: {:.4}'. format(D_loss_curr))
print('G_loss: {:.4}'.format(G_loss_curr))
print()
save(saver, sess, 'snapshots/', i)
| [
"noreply@github.com"
] | fishfishin.noreply@github.com |
56b2d515029b523b0b4b05a48db3be2b80e206ef | 43fe51c3261d0604dae053dbff7cad37a454b493 | /esfspider/middlewares.py | 8663bad86b0c430852b99dab0fcdb99a16311ea7 | [] | no_license | Stanton-chenlang/lianjia_ershoufang | 0408c01ea89d25d963efb75d77358075fc8f56fe | d927cf9b40fb35a397f46182c2c05eaf5b303b8f | refs/heads/main | 2023-03-07T19:41:47.237629 | 2021-02-23T08:02:35 | 2021-02-23T08:02:35 | 341,471,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,985 | py | # Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
import random
import scrapy
from scrapy import signals
# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter
class EsfspiderSpiderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, or item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request or item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class EsfspiderDownloaderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class ProxyMiddleware:
def __init__(self, ip):
self.ip = ip
@classmethod
def from_crawler(cls, crawler):
return cls(ip=crawler.settings.get('PROXIES'))
def process_request(self, request, spider):
ip = random.choice(self.ip)
request.meta['proxy'] = ip
| [
"noreply@github.com"
] | Stanton-chenlang.noreply@github.com |
612a972e66080c0ccd9fd54b8e43498fa2610dca | ab3cc28aa2d5ba8d69fe4a77b91882caa47dd498 | /pat_pyramid.py | 11e1ddb750add364dbc31eecb1613dec6bbe778c | [] | no_license | hariketsheth/PyGrams | 90151e327dd98b9e672acd7c0def7092c1b0b1a8 | 0da21d25ed2dc59cddf0f43db9137fde542e3218 | refs/heads/master | 2022-03-25T01:21:30.518555 | 2019-04-05T12:38:05 | 2019-04-05T12:38:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | def main():
num = int(raw_input())
for i in range(num):
s = ''
for j in range(num-(i+1)):
print ' ',
for k in range((i*2)+1):
s += ' *'
print s.strip()
main()
| [
"bborade@aurusinc.com"
] | bborade@aurusinc.com |
b97539904ac34c0adff62c9b36d5fa505cac1d19 | 0d067f73d0ca9fbb6c7803d243917ed91e18f287 | /tests/python/test_while.py | c87fead60cf60e81fd566a9c8d01e4e92713e0d5 | [
"MIT"
] | permissive | ericyao2013/3D-taichi | 4addc71f8324734545d1645a4dbb15cbf0e0bd96 | 3e775cac122e2dadbb5c2392ed78a0ce99c1bc9b | refs/heads/master | 2020-11-28T09:07:37.265645 | 2019-12-23T03:47:54 | 2019-12-23T03:47:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 543 | py | import taichi as ti
@ti.all_archs
def test_while():
x = ti.var(ti.f32)
N = 1
@ti.layout
def place():
ti.root.dense(ti.i, N).place(x)
@ti.kernel
def func():
i = 0
s = 0
while i < 10:
s += i
i += 1
x[0] = s
func()
assert x[0] == 45
@ti.all_archs
def test_break():
ti.cfg.print_ir = True
ret = ti.var(ti.i32, shape=())
@ti.kernel
def func():
i = 0
s = 0
while True:
s += i
i += 1
if i > 10:
break
ret[None] = s
func()
print(ret[None])
| [
"yuanmhu@gmail.com"
] | yuanmhu@gmail.com |
6d465675aebb12efbed82c2dea1046cefad57b88 | c906f4726bf55db9f0d64a755c1414a75b909d6d | /dev/test_board.py | 02efc0d4cc33eb5e180c0a2e0f4268cff0005d52 | [] | no_license | filipwodnicki/wood-cutting | 14773afb34e21d02c0ee9c074962760d9d10a189 | f3ecffced163f5128a5ee9e66ff79e8c19e4da5d | refs/heads/master | 2020-03-22T03:26:21.859381 | 2018-07-05T13:56:39 | 2018-07-05T13:56:39 | 139,432,199 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,740 | py | from unittest import TestCase
from dev.board import Board
class TestBoard(TestCase):
#test "insert" Class method
def test_insert_type(self):
b = Board()
self.assertRaises(Exception, b.insert, 'car') # assert string fails
self.assertRaises(Exception, b.insert, [0,1,2] ) # assert [] fails
try:
b.insert(100) # assert int OK
except TypeError:
self.fail("insert() raised TypeError unexpectedly!")
try:
b.insert(100.0) # assert float OK
except TypeError:
self.fail("insert() raised TypeError unexpectedly!")
def test_insert_size(self): #assert can't insert something big. (max board size = 2050)
b = Board()
with self.assertRaises(Exception):
b.insert(1000)
b.insert(1000)
b.insert(51)
b2 = Board()
self.assertRaises(Exception, b2.insert, 2051)
def test_insert_space(self): # assert space remaining works as planned
b = Board()
b.insert(100)
self.assertEqual(b.space_remaining, 1950)
b.insert(850)
self.assertEqual(b.space_remaining, 1100)
# test "remove" Class method
def test_remove(self):
b = Board()
b.insert(50)
# assert can't remove something that doesn't exist
self.assertRaises(Exception, b.remove, 100)
# assert space remaining works correctly
self.assertEquals(b.space_remaining, 2000)
b.remove(50)
# assert can't remove something that was recently removed
self.assertRaises(Exception, b.remove, 50)
# assert space remaining works correctly
self.assertEquals(b.space_remaining, 2050) | [
"31481575+bananalytics@users.noreply.github.com"
] | 31481575+bananalytics@users.noreply.github.com |
4a16bd846b3d70edc39f06e69194b96432e5b4db | 3f2afe894e38b3d82e456e0c32573fa5ea2b1498 | /2020/python/day-04.py | 2ecf068ab7d414fa484bf91ba16d2fa5fd06d357 | [
"MIT"
] | permissive | luciferchase/advent-of-code | 5900c40dd27a94c0cfda23dd1b52fa6cf19b7949 | 5320e9a4447017ed1bbe1fb75b652971dbde088c | refs/heads/main | 2023-06-04T14:36:28.328928 | 2021-06-18T04:38:08 | 2021-06-18T04:38:08 | 376,226,748 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,084 | py | import re
with open("input.txt", "r") as input_file:
_input = input_file.read().splitlines()
# Add the EOF newline
_input.append("")
# Format input
temp_input = []
passport = []
for i in _input:
if not i:
temp_input.append(passport)
passport = []
else:
passport += i.split()
_input = temp_input
# Part 1
def part_1(_input):
required_fields = ["byr", "iyr", "eyr", "hgt", "hcl", "ecl", "pid"]
valid_passports = []
for passport in _input:
fields_in_passport = [field.split(":")[0] for field in passport]
for field in required_fields:
if (field not in fields_in_passport):
break
else:
valid_passports.append(passport)
return valid_passports
# Part 2
# Compile regex beforehand
byr = re.compile("^(19[2-9]\\d)$|^(200[0-2])$")
iyr = re.compile("^(201\\d)$|^(2020)$")
eyr = re.compile("^(202\\d)$|^(2030)$")
hgt = re.compile("^(1[5-8]\\dcm)$|^(19[0-3]cm)$|^(59in)$|^(6\\din)$|^(7[0-6]in)$")
hcl = re.compile("^#([\\da-f]){6}$")
pid = re.compile("^\\d{9}$")
def validate(key, value):
valid = True
if (key == "byr"):
valid = bool(byr.match(value))
elif (key == "ecl" and value not in ["amb", "blu", "brn", "gry", "grn", "hzl", "oth"]):
valid = False
elif (key == "eyr"):
valid = bool(eyr.match(value))
elif (key == "hcl"):
valid = bool(hcl.match(value))
elif (key == "hgt"):
valid = bool(hgt.match(value))
elif (key == "iyr"):
valid = bool(iyr.match(value))
elif (key == "pid"):
valid = bool(pid.match(value))
return valid
def part_2(valid_passports):
count = 0
for passport in valid_passports:
for i in passport:
key, value = i.split(":")
if not validate(key, value):
break
else:
count += 1
return count
# Print answers
valid_passports = part_1(_input)
print(len(valid_passports))
print(part_2(valid_passports))
# [Finished in 301ms] | [
"udit2702@gmail.com"
] | udit2702@gmail.com |
461239fbc31618a5919392bafbc9f02663fe2c5d | 7c7a504092c66876ff37821812fac2269709a25b | /src/utils/qgis/algorithms/buffer_from_line.py | 932a7ebc68568f71b57b572ec59afdb6d367e6a7 | [
"MIT"
] | permissive | ibiroos/FloodTool | 4eef1cc4d1fbdcd7850e7fbe40c2ae4da85fb5fb | 5b6af01a1d1648e666909df6427ec1cfd1cc668a | refs/heads/main | 2023-06-25T14:52:08.088699 | 2021-07-16T10:16:32 | 2021-07-16T10:16:32 | 385,207,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,217 | py | # -*- coding: utf-8 -*-
"""
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from PyQt5.QtCore import QCoreApplication, QVariant
from qgis.core import (QgsProcessing,
QgsFeatureSink,
QgsProcessingException,
QgsProcessingAlgorithm,
QgsCoordinateReferenceSystem,
QgsCoordinateTransform,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterNumber,
QgsProcessingParameterFeatureSink,
QgsGeometry,
QgsPointXY,
QgsFields,
QgsField,
QgsFeature)
import processing
import numpy as np
from scipy.ndimage import gaussian_filter1d
from osgeo import ogr
class ExampleProcessingAlgorithm(QgsProcessingAlgorithm):
"""
This is an example algorithm that takes a vector layer and
creates a new identical one.
It is meant to be used as an example of how to create your own
algorithms and explain methods and variables used to do it. An
algorithm like this will be available in all elements, and there
is not need for additional work.
All Processing algorithms should extend the QgsProcessingAlgorithm
class.
"""
# Constants used to refer to parameters and outputs. They will be
# used when calling the algorithm from another algorithm, or when
# calling from the QGIS console.
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
def tr(self, string):
"""
Returns a translatable string with the self.tr() function.
"""
return QCoreApplication.translate('Processing', string)
def createInstance(self):
return ExampleProcessingAlgorithm()
def name(self):
"""
Returns the algorithm name, used for identifying the algorithm. This
string should be fixed for the algorithm, and must not be localised.
The name should be unique within each provider. Names should contain
lowercase alphanumeric characters only and no spaces or other
formatting characters.
"""
return 'bufferbylines'
def displayName(self):
"""
Returns the translated algorithm name, which should be used for any
user-visible display of the algorithm name.
"""
return self.tr('Buffer by perpendicular lines')
def group(self):
"""
Returns the name of the group this algorithm belongs to. This string
should be localised.
"""
return self.tr('MyCoast')
def groupId(self):
"""
Returns the unique ID of the group this algorithm belongs to. This
string should be fixed for the algorithm, and must not be localised.
The group id should be unique within each provider. Group id should
contain lowercase alphanumeric characters only and no spaces or other
formatting characters.
"""
return 'mycoast'
def shortHelpString(self):
"""
Returns a localised short helper string for the algorithm. This string
should provide a basic description about what the algorithm does and the
parameters and outputs associated with it..
"""
return self.tr("This algorithm generates polygon buffer from simplified line")
def initAlgorithm(self, config=None):
"""
Here we define the inputs and output of the algorithm, along
with some other properties.
"""
# We add the input vector features source. It can have any kind of
# geometry.
self.addParameter(
QgsProcessingParameterFeatureSource(
self.INPUT,
self.tr('Input layer'),
[QgsProcessing.TypeVectorAnyGeometry]
)
)
self.addParameter(
QgsProcessingParameterNumber(
name='Radius',
description=self.tr('Buffer radius (m)'),
type=QgsProcessingParameterNumber.Integer,
defaultValue=20,
optional=False
)
)
self.addParameter(
QgsProcessingParameterNumber(
name='Length',
description=self.tr('Length of each polygon of the buffer'),
type=QgsProcessingParameterNumber.Integer,
defaultValue=100,
optional=False
)
)
# We add a feature sink in which to store our processed features (this
# usually takes the form of a newly created vector layer when the
# algorithm is run in QGIS).
self.addParameter(
QgsProcessingParameterFeatureSink(
self.OUTPUT,
self.tr('Output layer')
)
)
def processAlgorithm(self, parameters, context, feedback):
"""
Here is where the processing itself takes place.
"""
# Retrieve the feature source and sink. The 'dest_id' variable is used
# to uniquely identify the feature sink, and must be included in the
# dictionary returned by the processAlgorithm function.
source = self.parameterAsSource(
parameters,
self.INPUT,
context
)
radio = self.parameterAsDouble(
parameters,
'Radius',
context
)
feedback.pushInfo('Radius: %f' % radio)
longitud = self.parameterAsInt(
parameters,
'Length',
context
)
feedback.pushInfo('Length: %i' % longitud)
# If source was not found, throw an exception to indicate that the algorithm
# encountered a fatal error. The exception text can be any string, but in this
# case we use the pre-built invalidSourceError method to return a standard
# helper text for when a source cannot be evaluated
if source is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.INPUT))
# Fields to add to the resulting layer:
campos = QgsFields()
campos.append( QgsField('id',QVariant.Int) )
campos.append( QgsField('X_centroid', QVariant.Double) )
campos.append( QgsField('Y_centroid', QVariant.Double) )
campos.append( QgsField('Lon_centroid', QVariant.Double) )
campos.append( QgsField('Lat_centroid', QVariant.Double) )
(sink, dest_id) = self.parameterAsSink(
parameters,
self.OUTPUT,
context,
campos, # source.fields(),
3, # = QGis.WKBPolygon (estaba a source.wkbType())
source.sourceCrs()
)
# Send some information to the user
crs_id = int(source.sourceCrs().authid().split(':')[1])
feedback.pushInfo('CRS is {}'.format(crs_id))
#proyector = QgsCoordinateTransform(QgsCoordinateReferenceSystem(23029), QgsCoordinateReferenceSystem(4326), 23029, 4326)
proyector = QgsCoordinateTransform(QgsCoordinateReferenceSystem(crs_id), QgsCoordinateReferenceSystem(4326), crs_id, 4326)
# If sink was not created, throw an exception to indicate that the algorithm
# encountered a fatal error. The exception text can be any string, but in this
# case we use the pre-built invalidSinkError method to return a standard
# helper text for when a sink cannot be evaluated
if sink is None:
raise QgsProcessingException(self.invalidSinkError(parameters, self.OUTPUT))
features = source.getFeatures()
x = []
y = []
# Accedemos a la linea para obtener los vertices:
for current, feature in enumerate(features):
for punto in feature.geometry().vertices():
x.append( punto.x() )
y.append( punto.y() )
x = np.array(x)
y = np.array(y)
feedback.pushInfo('Got coordinates')
# Número de vertices que contiene la linea:
n = len(x)
feedback.pushInfo('Number of line vertices: %i' % n)
lineas = []
R = radio
for i in range(0,n-1):
# Test perpendicular:
# Dos puntos
x0, y0 = x[i] ,y[i]
x1, y1 = x[i+1],y[i+1]
# El punto medio del segmento:
x2, y2 = (x0+x1)/2, (y0+y1)/2
# feedback.pushInfo('Punto medio del segmento: (%f, %f)' % (x2,y2))
# Pendiente de la recta perpendicular (-1/m de la original m):
d = np.sqrt((y1-y0)**2 + (x1-x0)**2)
sin = (y1-y0)/d
cos = (x1-x0)/d
# m = -(x1-x0)/(y1-y0) # tan
# Intercept para que pase por el punto medio del segemento:
# b = y2 - m*x2
# X = np.linspace(-10,10,2000) + x2
# Y = m*X + b
# Coordenadas de los puntos extremos:
# lineas.append( [(sin*R + x2, -cos*R + y2), (-sin*R + x2, cos*R + y2)] )
# lineas.append( [(sin*R + x2, -cos*R + y2), (x2, y2)] ) # Interior
lineas.append( [(x2, y2), (-sin*R + x2, cos*R + y2)] ) # Exterior
feedback.pushInfo('Number of perpendicular lines: %i' % len(lineas))
# Construimos los poligonos:
nl = longitud
poligonos = [lineas[i*nl:(i+1)*nl+1] for i in range(len(lineas)//nl)]
# Compute the number of steps to display within the progress bar and
# get features from source
total = 100.0 / len(poligonos) if len(poligonos) else 0
for i, poligono in enumerate(poligonos):
# Stop the algorithm if cancel button has been clicked
if feedback.isCanceled():
break
puntos = []
for P1, P2 in poligono:
puntos.append(P1)
for P1, P2 in poligono[::-1]:
puntos.append(P2)
ring = ogr.Geometry(ogr.wkbLinearRing)
for Xp,Yp in puntos:
ring.AddPoint(Xp,Yp)
poligono = ogr.Geometry(ogr.wkbPolygon)
poligono.AddGeometry(ring)
geom = QgsGeometry.fromWkt(poligono.ExportToWkt())
feature = QgsFeature()
feature.setGeometry(geom)
centroide_x = feature.geometry().centroid().asPoint().x()
centroide_y = feature.geometry().centroid().asPoint().y()
proyectado = proyector.transform(centroide_x, centroide_y)
feature.setAttributes([int(i), centroide_x, centroide_y, proyectado.x(), proyectado.y() ])
sink.addFeature(feature)
feedback.setProgress(int(i * total))
if False:
for current, feature in enumerate(features):
# Stop the algorithm if cancel button has been clicked
if feedback.isCanceled():
break
# Add a feature in the sink
sink.addFeature(feature, QgsFeatureSink.FastInsert)
# Update the progress bar
feedback.setProgress(int(current * total))
# To run another Processing algorithm as part of this algorithm, you can use
# processing.run(...). Make sure you pass the current context and feedback
# to processing.run to ensure that all temporary layer outputs are available
# to the executed algorithm, and that the executed algorithm can send feedback
# reports to the user (and correctly handle cancelation and progress reports!)
if False:
buffered_layer = processing.run("native:buffer", {
'INPUT': dest_id,
'DISTANCE': 1.5,
'SEGMENTS': 5,
'END_CAP_STYLE': 0,
'JOIN_STYLE': 0,
'MITER_LIMIT': 2,
'DISSOLVE': False,
'OUTPUT': 'memory:'
}, context=context, feedback=feedback)['OUTPUT']
# Return the results of the algorithm. In this case our only result is
# the feature sink which contains the processed features, but some
# algorithms may return multiple feature sinks, calculated numeric
# statistics, etc. These should all be included in the returned
# dictionary, with keys matching the feature corresponding parameter
# or output names.
return {self.OUTPUT: dest_id}
| [
"krinchy@gmail.com"
] | krinchy@gmail.com |
7b8afb4ccbc57e42415d6107a4c730f40e2acaba | bf4709f5cb6a13c9954a8bdce18715d6eb1ff685 | /sensorDataFileIO.py | b6cd8b81fc85d1baa2c2c80cbcb64f6d4ce73043 | [] | no_license | johnmonash/compsci-jmss-2016 | 670faab36c004be4cb82b55084fe8542b9756453 | 5f8752a91aee4b55f62bebacc228faf6c9de1585 | refs/heads/master | 2021-01-21T04:47:24.653999 | 2016-07-18T04:09:04 | 2016-07-18T04:09:04 | 51,118,365 | 0 | 32 | null | 2016-02-05T02:29:11 | 2016-02-05T01:27:51 | null | UTF-8 | Python | false | false | 442 | py | # read in and process temperature data from Blackburn Sensor for March
data = open("visdata.csv")
headers = data.readline()
print (headers)
templist=[]
humlist=[]
for line in data:
line = line.strip()
datalist = line.split(",")
templist.append(float(datalist[1]))
templist.append(float(datalist[2]))
print(templist)
# write code to separate the data into days, and that finds the maximum temp and humidity for each day. | [
"linda.mciver@jmss.vic.edu.au"
] | linda.mciver@jmss.vic.edu.au |
af6f722c68536974e119047083e150a08ffdfdf0 | 6933b96b9c10ca70da57b1b384126e20fa21d9b2 | /ftp/FTP4.0/linkFTP2012-11-8/mysql.py | 2b49cd406f624fc83d9f1ed13cd5c521b4c1ca2d | [] | no_license | Dawson0x00/scan | 5bb2e85756b8e86ba43f6d63182a7e806c560bfc | e9f274e26ac924a47cf3216e707dc1a724937775 | refs/heads/master | 2021-01-19T14:12:59.107938 | 2017-02-22T02:51:27 | 2017-02-22T02:51:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,655 | py | #!/usr/local/bin/python
#-*- coding: UTF-8 -*-
##################################################
#qq:316118740
#BLOG:http://hi.baidu.com/alalmn
# MYSQL 添加 删除 修改 查询
# 刚学写的不好请大家见谅
#网上搜索到一个http://www.technicalbard.com/files/MySQL-python-1.2.2.win32-py2.6.exe
##################################################
import time, MySQLdb
import ConfigParser #INI读取数据
def mysql_open(): #连接数据库
Server="localhost"
Username="root"
password="316118740"
db="urldata"
try:
config = ConfigParser.ConfigParser()
config.readfp(open("Server.ini"))
Server = config.get("DATA","Server")
Username = config.get("DATA","Username")
password = config.get("DATA","password")
db = config.get("DATA","db")
except:
print u"读取INI错误"
return 0
try:
global conn #声明全局变量
conn=MySQLdb.connect(host=Server,user=Username,passwd=password,db=db,init_command="set names utf8")
global cursor #声明全局变量
cursor = conn.cursor()
#print u"服务器:",Server,u"用户名:",Username,u"密码:",password,u"连接数据库:",db,u"登录服务器成功"
#print u"mysql:---登录服务器成功"
except:
print u"###服务器:",Server,u"用户名:",Username,u"密码:",password,u"连接数据库:",db,u"登录服务器失败###"
return 0
def mysql_S(): #保存数据
try:
conn.commit() #提交 这句害死我了
except:
print u"保存数据异常"
return 0
def mysql_close(): #关闭数据库
try:
conn.close()
except:
print u"关闭数据异常"
return 0
def mysql_select(data): #查询数据
try:
n = cursor.execute(data)
cursor.scroll(0)
for row in cursor.fetchall():
#print '%s-%s-%s'%(row[0],row[1],row[2])
return row[0]
except:
return "null123456"
#def mysql_select(): #查询数据
# n = cursor.execute("select * from url")
# cursor.scroll(0)
# for row in cursor.fetchall():
# print '%s-%s-%s'%(row[0],row[1],row[2])
# #print row[0]
# #print row[1]
# #print row[2]
def mysql_insert(data): #添加数据
try:
return cursor.execute(data)
mysql_S() #保存数据
except:
return 0
def mysql_update(data): #修改数据
try:
return cursor.execute(data)
mysql_S() #保存数据
except:
return 0
def mysql_delete(data): #删除数据
try:
return cursor.execute(data)
mysql_S() #保存数据
except:
return 0
#if __name__=='__main__':
# mysql_open() #连接数据库
######################################################
# sql = "insert into url(url,time,ftpsend) values('%s','%s','%s')"%("ttttoo","2222","33333")
# if mysql_insert(sql): #添加
# print "添加成功"
# else:
# print "添加失败"
######################################################
# sql = "update url set time='<----',ftpsend='---->' where url='%s'"%("111")
# if mysql_update(sql): #修改数据
# print "修改成功"
# else:
# print "修改失败"
######################################################
# sql = "delete from url where url='%s'"%("111")
# if mysql_delete(sql): #删除数据
# print "删除成功"
# else:
# print "删除失败"
######################################################
# mysql_S() #保存数据
# mysql_select() #查询数据
# mysql_close() #关闭数据库 | [
"voilet@voiletdeMacBook-Pro-2.local"
] | voilet@voiletdeMacBook-Pro-2.local |
ba8cb3646c5fa07225a17de7ff38d8260a00b236 | fa78d8b79ed0aeeba2d089a741195ee56bda98f9 | /DataIngestion/Sony/build/lib/Sony/settings.py | 390436f85dda801dcbdc131bfa68ed988c166f40 | [] | no_license | michaelgreis/GameInfo | d40bc2d4d526bb470feb721eafac43c2d126b8c3 | 8222b352dfb4a420f78c6b88faf01dabad42554c | refs/heads/master | 2021-01-16T18:26:05.357392 | 2019-02-03T19:34:35 | 2019-02-03T19:34:35 | 100,079,218 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,221 | py | # -*- coding: utf-8 -*-
# Scrapy settings for Sony project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'Sony'
SPIDER_MODULES = ['Sony.spiders']
NEWSPIDER_MODULE = 'Sony.spiders'
ITEM_PIPELINES = {
'Sony.pipelines.SonyPipeline': 300,
}
DUPEFILTER_CLASS = 'scrapy.dupefilters.RFPDupeFilter'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'Sony (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 2
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'Sony.middlewares.SonySpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'Sony.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'Sony.pipelines.SonyPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"michaelgreis@gmail.com"
] | michaelgreis@gmail.com |
2cdd6efb3875bec9432a48027643d9c449e8b838 | a65d7a87078a3990f6a39e716f8598a5218a96cf | /Files/KalmanFilterTest.py | cf8c906bf47cc99ff374cb03d043cbc0e63a2a1b | [
"MIT"
] | permissive | austindkoenig/Beach-Weather-Station-Kalman-Filter | 13147273fe7482846cac4fdf2d0ae265d00f7131 | 8079d2d07be095f780b9f817ceebffbeea7de5bd | refs/heads/master | 2022-04-24T11:13:56.901889 | 2020-04-16T20:31:55 | 2020-04-16T20:31:55 | 256,322,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,758 | py | import pandas as pd
import KalmanFilter
import matplotlib.pyplot as plt
plt.style.use('ggplot')
data_dir = '../Data/Beach_Weather_Stations_-_Automated_Sensors.csv'
weather_df = pd.read_csv(data_dir)
weather_df = weather_df[~weather_df['Measurement Timestamp'].str.contains('2015')] # remove year 2015 because format is weird
weather_df['Measurement Timestamp'] = pd.to_datetime(weather_df['Measurement Timestamp'], infer_datetime_format = True)
weather_df.index = weather_df['Measurement Timestamp']
weather_df = weather_df.drop(['Measurement Timestamp'], axis = 1)
stations = weather_df['Station Name'].unique()
for s in stations:
curr_df = weather_df[weather_df['Station Name'] == s][['Air Temperature',
'Wet Bulb Temperature', 'Humidity',
'Rain Intensity', 'Wind Speed',
'Barometric Pressure', 'Solar Radiation',
'Battery Life']]
print(s)
kf = KalmanFilter.KalmanFilter(dataframe = curr_df)
kf.filter()
fig, (temp_ax, err_ax) = plt.subplots(nrows = 2, ncols = 1, figsize = (15, 10))
err_ax.plot(range(len(kf.post_fit_residuals['Humidity'][-500:])), kf.post_fit_residuals['Humidity'][-500:], color = 'red', label = 'Estimated Humidity Residuals')
err_ax.legend()
temp_ax.plot(range(len(curr_df['Humidity'][-500:])), curr_df['Humidity'][-500:], '--', label = 'Measured Humidity')
temp_ax.plot(range(len(kf.state_estimates['Humidity'][-500:])), kf.state_estimates['Humidity'][-500:], ':', label = 'Estimated Humidity')
temp_ax.legend()
plt.show()
break | [
"austindkoenig@gmail.com"
] | austindkoenig@gmail.com |
b975846d7e30a0a08796a7fd56ea90d115cd15c9 | 5e464a322726348922cbabd50e62fbeef8684209 | /one_shot_kg_app.py | 06ef47c62bd3e11e8a4fdc756461540caf248837 | [] | no_license | Yimsun97/TuRBOPlus | 6d95a0ce710f4cb5db48203b68f5749306ffb5d5 | 82bcfc815572549600080b731b4ed7712eaf94f7 | refs/heads/main | 2023-03-02T01:31:33.888412 | 2021-02-10T03:28:16 | 2021-02-10T03:28:16 | 337,601,241 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,213 | py | #!/usr/bin/env python3
# coding: utf-8
# The one-shot Knowledge Gradient acquisition function
import torch
import numpy as np
from botorch.fit import fit_gpytorch_model
from botorch.models import SingleTaskGP
from botorch.utils import standardize
from gpytorch.mlls import ExactMarginalLogLikelihood
from botorch.acquisition import qKnowledgeGradient, PosteriorMean
from botorch.test_functions import Ackley, Hartmann
from botorch.optim import optimize_acqf
from botorch.utils.sampling import manual_seed
from torch.quasirandom import SobolEngine
from botorch.utils.transforms import unnormalize
from matplotlib import pyplot as plt
SEED = 1024
np.random.seed(SEED)
torch.manual_seed(SEED)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
dtype = torch.double
dim = 6
fun = Hartmann(dim=dim, negate=True).to(dtype=dtype, device=device)
fun.bounds[0, :].fill_(0)
fun.bounds[1, :].fill_(1)
lb, ub = fun.bounds
Initial_Samples = 10
N_TRIALS = 20
def get_initial_points(dim, n_pts):
sobol = SobolEngine(dimension=dim, scramble=True)
X_init = sobol.draw(n=n_pts).to(dtype=dtype, device=device)
return X_init
def eval_objective(x):
"""This is a helper function we use to unnormalize and evalaute a point"""
return fun(unnormalize(x, fun.bounds))
with manual_seed(1234):
X_okg = get_initial_points(dim, Initial_Samples)
Y_okg = torch.tensor(
[eval_objective(x) for x in X_okg], dtype=dtype, device=device
).unsqueeze(-1)
for i in range(1, N_TRIALS + 1):
model = SingleTaskGP(X_okg, Y_okg)
mll = ExactMarginalLogLikelihood(model.likelihood, model)
fit_gpytorch_model(mll)
qKG = qKnowledgeGradient(model, num_fantasies=64)
argmax_pmean, max_pmean = optimize_acqf(
acq_function=PosteriorMean(model),
bounds=fun.bounds,
q=1,
num_restarts=4,
raw_samples=256,
)
qKG_proper = qKnowledgeGradient(
model,
num_fantasies=64,
sampler=qKG.sampler,
current_value=max_pmean,
)
candidates_proper, acq_value_proper = optimize_acqf(
acq_function=qKG_proper,
bounds=fun.bounds,
q=1,
num_restarts=4,
raw_samples=256,
)
candidates_proper_y = torch.tensor(
[eval_objective(x) for x in candidates_proper], dtype=dtype, device=device
).unsqueeze(-1)
X_okg = torch.cat((X_okg, candidates_proper), dim=0)
Y_okg = torch.cat((Y_okg, candidates_proper_y), dim=0)
print(np.maximum.accumulate(Y_okg.cpu())[-1])
if i < N_TRIALS:
X_new = get_initial_points(dim, 2)
Y_new = torch.tensor(
[eval_objective(x) for x in X_new], dtype=dtype, device=device
).unsqueeze(-1)
X_okg = torch.cat((X_okg, X_new), dim=0)
Y_okg = torch.cat((Y_okg, Y_new), dim=0)
# ## Sobol
X_Sobol = (SobolEngine(dim, scramble=True).draw(len(X_okg)).to(dtype=dtype, device=device))
Y_Sobol = torch.tensor([eval_objective(x) for x in X_Sobol],
dtype=dtype, device=device).unsqueeze(-1)
names = ["OKG", "Sobol"]
runs = [Y_okg, Y_Sobol]
# %% Plot the results
fig, ax = plt.subplots(figsize=(8, 6))
fx = np.maximum.accumulate(Y_okg.cpu())
for name, run in zip(names, runs):
fx = np.maximum.accumulate(run.cpu())
plt.plot(fx, marker="", lw=3)
plt.plot([0, len(Y_okg)], [fun.optimal_value, fun.optimal_value], "k--", lw=3)
plt.xlabel("Function value", fontsize=12)
plt.xlabel("Number of evaluations", fontsize=12)
plt.title(f"{dim}D {fun._get_name()}", fontsize=14)
plt.xlim([0, len(Y_okg)])
# plt.ylim([4, 5])
plt.grid(True)
plt.tight_layout()
plt.legend(
names + ["Global optimal value"],
# loc="lower center",
# bbox_to_anchor=(0, -0.01, 1, 1),
# bbox_transform=plt.gcf().transFigure,
# ncol=4,
fontsize=12,
)
plt.show()
max_idx = Y_okg.argmax()
X_max = X_okg[max_idx]
X_max_unnorm = unnormalize(X_max, fun.bounds).cpu().detach().numpy()
print(f"The distance to the optimum is "
f"{np.sqrt(((X_max_unnorm-np.array(fun._optimizers))**2.0).sum())}")
| [
"1668159091@qq.com"
] | 1668159091@qq.com |
e4575ff2a9c490c29305dce49d9640353abdff5c | 9e7ea743f052e1837a26ad76e6b642a240853950 | /path.py | fbbb37eec2a5bf0c6374db3975ed7d6bff06f313 | [] | no_license | yumianhuli2/cmake_generator | 46e8c1e85a3b67cae7f385ef9b659253f145c208 | 2fb6a9733d49cf5550f639021527b709a69d0316 | refs/heads/master | 2023-02-25T03:22:57.207453 | 2021-01-27T11:24:27 | 2021-01-27T11:24:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 441 | py | from inspect import stack
from pathlib import Path
# ----------------------------------------------------------------
def abs_path_str_from_rel_to_this_file( path : str ) :
caller_frame = stack()[1]
caller_file_path = caller_frame.filename
caller_directory = Path( caller_file_path ).parent
full_path = caller_directory / path
abs_path = full_path.resolve()
abs_path_str = abs_path.as_posix()
return abs_path_str | [
"berryvansomeren@gmail.com"
] | berryvansomeren@gmail.com |
435ce544d208455758f15a9719e1dcb841a558d7 | faacfe864e04c78a816f19dbdfb74b3ed6cb55b2 | /Locatel_Master/config.py | 65577971297827df61447c9899e974294f945e72 | [] | no_license | TheBaxes/Puntos-Locatel | 6051ef39b64404b8cd07c69a2871c32c713b15b0 | febef3f1c0e2939a16fffec510ddafda50f8b0fb | refs/heads/master | 2020-05-29T17:51:10.136119 | 2019-06-04T05:34:17 | 2019-06-04T05:34:17 | 189,285,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,107 | py | import urllib
import os
# Statement for enabling the development environment
DEBUG = True
# Define the application directory
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
# Define the database - we are working with
# params = urllib.parse.quote_plus(
# "Driver={ODBC Driver 13 for SQL Server};Server=tcp:topicossoftware.database.windows.net,1433;Database=Software;Uid=softwareadmin@topicossoftware;Pwd=pass123*;Encrypt=yes;TrustServerCertificate=no;Connection Timeout=30;")
# SQLALCHEMY_DATABASE_URI = "mssql+pyodbc:///?odbc_connect=%s" % params
SQLALCHEMY_DATABASE_URI = 'sqlite:///../../students.sqlite3'
DATABASE_CONNECT_OPTIONS = {}
# Application threads. A common general assumption is
# using 2 per available processor cores - to handle
# incoming requests using one and performing background
# operations using the other.
THREADS_PER_PAGE = 2
# Enable protection agains *Cross-site Request Forgery (CSRF)*
CSRF_ENABLED = True
# Use a secure, unique and absolutely secret key for
# signing the data.
CSRF_SESSION_KEY = "secret"
# Secret key for signing cookies
SECRET_KEY = "secret"
| [
"sebas1475@gmail.com"
] | sebas1475@gmail.com |
e039219f4d442484234712c3cbba56c16a5220c2 | 5872119b3607cbbacf7b026a6a46e567ef5a2a70 | /test.py | 3868fffe30229d74b60e004534baa8272a98ad13 | [] | no_license | houpeng99/whalerun | 35163464133505780796bbbb19f9f6048d4a1e92 | 4e264a8f200bd5df8b2af00f9b8812e9194366f3 | refs/heads/master | 2021-07-04T17:42:00.706282 | 2017-09-26T08:25:00 | 2017-09-26T08:25:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,591 | py | from flask import Flask, redirect, url_for, session, request, jsonify
from flask_oauthlib.client import OAuth
app = Flask(__name__)
app.debug = True
app.secret_key = 'development'
oauth = OAuth(app)
github = oauth.remote_app(
'github',
consumer_key='a11a1bda412d928fb39a',
consumer_secret='92b7cf30bc42c49d589a10372c3f9ff3bb310037',
request_token_params={'scope': 'user:email'},
base_url='https://api.github.com/',
request_token_url=None,
access_token_method='POST',
access_token_url='https://github.com/login/oauth/access_token',
authorize_url='https://github.com/login/oauth/authorize'
)
@app.route('/')
def index():
if 'github_token' in session:
me = github.get('user')
return jsonify(me.data)
return redirect(url_for('login'))
@app.route('/login')
def login():
return github.authorize(callback=url_for('authorized', _external=True))
@app.route('/logout')
def logout():
session.pop('github_token', None)
return redirect(url_for('index'))
@app.route('/login/authorized')
def authorized():
resp = github.authorized_response()
if resp is None or resp.get('access_token') is None:
return 'Access denied: reason=%s error=%s resp=%s' % (
request.args['error'],
request.args['error_description'],
resp
)
session['github_token'] = (resp['access_token'], '')
me = github.get('user')
return jsonify(me.data)
@github.tokengetter
def get_github_oauth_token():
return session.get('github_token')
if __name__ == '__main__':
app.run()
| [
"hanwuji99@outlook.com"
] | hanwuji99@outlook.com |
06fe6e0f1c1bc7166c8e270110fd34b8cf27f314 | dd97a08267b2197a73c7b19f630ab2742ada837c | /python/ThirteenTeV/HSCPgluino_M_2000_TuneCUETP8M1_13TeV_pythia8_cff.py | 4d0eeea7bec491f0baea7b770cc3b44316ddc69d | [] | no_license | Mohammed2/genproductions | 48dc93d15c070a02c9ce7c70060909587115e8f8 | 9e18bbd3be45e57b0ecaf3cbea94c8f50df939fa | refs/heads/master | 2020-06-16T16:59:50.959353 | 2017-06-06T16:43:27 | 2017-06-06T16:43:27 | 92,869,604 | 1 | 0 | null | 2017-05-30T19:40:46 | 2017-05-30T19:40:46 | null | UTF-8 | Python | false | false | 2,893 | py | FLAVOR = 'gluino'
COM_ENERGY = 13000. # GeV
MASS_POINT = 2000 # GeV
PROCESS_FILE = 'SimG4Core/CustomPhysics/data/RhadronProcessList.txt'
PARTICLE_FILE = 'Configuration/Generator/data/particles_%s_%d_GeV.txt' % (FLAVOR, MASS_POINT)
SLHA_FILE ='Configuration/Generator/data/HSCP_%s_%d_SLHA.spc' % (FLAVOR, MASS_POINT)
PDT_FILE = 'Configuration/Generator/data/hscppythiapdt%s%d.tbl' % (FLAVOR, MASS_POINT)
USE_REGGE = False
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
pythiaPylistVerbosity = cms.untracked.int32(0),
filterEfficiency = cms.untracked.double(-1),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(COM_ENERGY),
crossSection = cms.untracked.double(-1),
maxEventsToPrint = cms.untracked.int32(0),
SLHAFileForPythia8 = cms.string('%s' % SLHA_FILE),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'SUSY:all = off',
'SUSY:gg2gluinogluino = on',
'SUSY:qqbar2gluinogluino = on',
'RHadrons:allow = on',
'RHadrons:allowDecay = off',
'RHadrons:setMasses = on',
'RHadrons:probGluinoball = 0.1',
),
parameterSets = cms.vstring(
'pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters'
)
)
)
generator.hscpFlavor = cms.untracked.string(FLAVOR)
generator.massPoint = cms.untracked.int32(MASS_POINT)
generator.particleFile = cms.untracked.string(PARTICLE_FILE)
generator.slhaFile = cms.untracked.string(SLHA_FILE)
generator.processFile = cms.untracked.string(PROCESS_FILE)
generator.pdtFile = cms.FileInPath(PDT_FILE)
generator.useregge = cms.bool(USE_REGGE)
dirhadrongenfilter = cms.EDFilter("MCParticlePairFilter",
Status = cms.untracked.vint32(1, 1),
MinPt = cms.untracked.vdouble(0., 0.),
MinP = cms.untracked.vdouble(0., 0.),
MaxEta = cms.untracked.vdouble(100., 100.),
MinEta = cms.untracked.vdouble(-100, -100),
ParticleCharge = cms.untracked.int32(0),
ParticleID1 = cms.untracked.vint32(1000993,1009213,1009313,1009323,1009113,1009223,1009333,1091114,1092114,1092214,1092224,1093114,1093214,1093224,1093314,1093324,1093334),
ParticleID2 = cms.untracked.vint32(1000993,1009213,1009313,1009323,1009113,1009223,1009333,1091114,1092114,1092214,1092224,1093114,1093214,1093224,1093314,1093324,1093334)
)
ProductionFilterSequence = cms.Sequence(generator*dirhadrongenfilter)
| [
"sheffield@physics.rutgers.edu"
] | sheffield@physics.rutgers.edu |
1f5e5c283bfdc3792a29cb1368b77d31bbf14e6d | 311e8519f62c8bcb7421b7b5a59bf3f5597710f8 | /nessusapi/scan.py | 5db01a7d8b3025f484ced5491364a61e5bd5591c | [
"MIT"
] | permissive | sait-berkeley-infosec/pynessus-api | ef4b9b075c8c0198c2c41d18822522fccee9b31f | 3e0c74fd5dc7df90e10a9aac3f9282d4f8b12372 | refs/heads/master | 2021-01-10T21:13:17.403362 | 2015-06-04T21:30:33 | 2015-06-04T21:30:33 | 20,785,281 | 1 | 3 | null | 2015-02-02T18:20:53 | 2014-06-12T22:46:16 | Python | UTF-8 | Python | false | false | 1,121 | py | # coding=utf-8
class Scan(object):
def __init__(self, nessus, target, scan_name, policy):
self.nessus = nessus
self.target = target
self.name = scan_name
self.policy = policy
self.uuid = self.nessus.request_single('scan/new', 'scan', 'uuid',
target=self.target,
scan_name=self.name,
policy_id=self.policy)
def stop(self):
if self.changeStatus('stop') == 'stopping':
self.uuid = None
return True
return False
def pause(self):
return self.changeStatus('pause') == 'pausing'
def resume(self):
return self.changeStatus('resume') == 'resuming'
def changeStatus(self, status):
if not self.uuid:
raise BadRequestError('Scan not started')
return self.nessus.request_single('scan/{0}'.format(status),
'scan', 'status',
scan_uuid=self.uuid)
class BadRequestError(Exception):
pass
| [
"ajaska@berkeley.edu"
] | ajaska@berkeley.edu |
8756ccb5c70e7e6d62e58aa47351827dbf95a137 | 920733e8fdedb436f35123d63335a280ae4c30a3 | /minor.py | 40e04d8a5b00007ed46d867f68d5b915fb0268c7 | [] | no_license | ritvikbhatia/Early-Prediction-of-Lifestyle-Diseases | 04977fdfc73888f5806be595d7d83384110c8c45 | 6361a70b17e50ebcd3be0f1e72ba7e3025b818f2 | refs/heads/master | 2021-07-14T09:53:54.281863 | 2020-07-23T12:37:37 | 2020-07-23T12:37:37 | 176,802,250 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,922 | py | from flask import Flask,render_template,url_for,request
from flask_material import Material
# EDA PKg
import pandas as pd
import numpy as np
# ML Pkg
from sklearn.externals import joblib
import requests
import dweepy
import os
app = Flask(__name__)
Material(app)
@app.route('/')
def index():
return render_template("index.html")
@app.route('/preview')
def preview():
# df = pd.read_csv("data/d_sih.csv")
df2 = pd.read_csv("C:\\Users\\Ritvik\\Desktop\\try-sih\\data\\d_sih.csv")
return render_template("preview.html",df_view = df2)
@app.route('/',methods=["POST"])
def analyze():
if request.method == 'POST':
age_input = request.form['age_input']
# height_input = request.form['height_input']
# bmi = (weight_input/(height_input/100)**2)
gender_choice = request.form['gender_choice']
smoking_input = request.form['smoking_input']
exercise_input = request.form['exercise_input']
drinking_input = request.form['drinking_input']
bmi_input = request.form['bmi_input']
# idhr se input le rhe hai html se
sleep_input = request.form['sleep_input']
# model_choice = request.form['model_choice']
# weight_input = request.form['weight_input']
junk_input = request.form['junk_input']
# h = float(height_input)
# w = float(weight_input)
#bmi = (w/(h/100)**2)
age = float(age_input);
sex = float(gender_choice)
bmi= float(bmi_input)
smoking = float(smoking_input)
excercise = float(exercise_input)
sleep = float(sleep_input)
drinking = float(drinking_input)
junk = float(junk_input)
a=0;
b=0;
c=0;
x=0;
y=0;
z=0;
if(smoking==1):
a=a+15.2;
b=b+15.2;
c=c+13.1;
else:
a=a-3.3;
b=b-3.3;
c=c-1.1;
if(sleep==1):
a=a+20;
b=b+11.4;
c=c+13.2;
elif(sleep==3):
a=a+5;
c=c+20;
b=b+7.3;
else:
a=a-3.3;
b=b-2.4;
c=c-4.6;
if(drinking==1):
a=a+10;
b=b+17.4;
c=c+4;
else:
a=a-2.1;
b=b-9.4;
c=c-1.1;
if(sex==1):
a=a+a*0.6;
b=b+b*0.3;
c=c+c*0.4;
else:
a=a+a*0.4;
b=b+b+0.6;
c=c+c*1.5;
if(excercise==1):
a=a+10;
b=b+5;
elif(excercise==2):
a=a-6.1;
b=b-3.1;
else:
a=a-9.1;
b=b-6.1;
if(junk==2):
a=a+5.1;
b=b+8.1;
elif(junk==3):
a=a+15;
b=b+15;
else:
a=a-7.4;
b=b-5.3;
if(bmi>30):
a=a+30;
b=b+30;
c=c+9.6;
elif(bmi<20):
a=a+10;
b=b+15.3;
c=c+9.4;
elif(bmi>26):
a=a+5.3;
b=b+6.1;
c=c+3.4;
if(age>45 and age<70):
a=a+20.3;
c=c+12.1;
b=b+15.5;
elif(age>70):
a=a+5;
c=c+10;
b=b+25
elif(age<45 and age>15):
c=c+24;
b=b+5;
if(age<18 and bmi<30 and bmi>20):
a=5;
c=5;
b=5;
if(a>100):
a=95;
if(b>100):
b=94;
if(c>100):
c=96;
if(a<0):
a=5;
if(b<0):
b=6;
if(c<0):
c=7;
if(a>70):
x=1;
if(b>70):
y=1;
if(c>70):
z=1;
dweetIO='https://dweet.io/dweet/for/';
myName='sih20';
myName2='sih201';
myName3='sih2019';
myName4='sihx';
myName5='sihy';
myName6='sihz';
myKey='Diabetes';
myKey2='hyper';
myKey3='depression';
myKey4='x';
myKey5='y';
myKey6='z';
rqString=dweetIO+myName+'?'+myKey+'='+str(a);
rqs=requests.get(rqString);
rqString2=dweetIO+myName2+'?'+myKey2+'='+str(b);
rqString3=dweetIO+myName3+'?'+myKey3+'='+str(c);
rqString4=dweetIO+myName4+'?'+myKey3+'='+str(x);
rqString5=dweetIO+myName5+'?'+myKey3+'='+str(y);
rqString6=dweetIO+myName6+'?'+myKey3+'='+str(z);
rqs=requests.get(rqString);
rqs2=requests.get(rqString2);
rqs3=requests.get(rqString3);
rqs4=requests.get(rqString4);
rqs5=requests.get(rqString5);
rqs6=requests.get(rqString6);
# Clean the data by convert from unicode to float
sample_data = [age_input,bmi_input,drinking_input,exercise_input,gender_choice,junk_input,sleep_input,smoking_input]
clean_data = [float(i) for i in sample_data]
# lean_data = [int(i) for i in sample_data]
# Reshape the Data as a Sample not Individual Features
ex1 = np.array(clean_data).reshape(1,-1)
# reloading model
logit_model = joblib.load('C:\\Users\\Ritvik\\Desktop\\try-sih\\data\\naman_sih.pkl')
result_prediction = logit_model.predict(ex1)
result_prediction = int(result_prediction)
return render_template('index.html',
age_input = age_input,
# height_input = height_input,
gender_choice = gender_choice,
# weight_input = weight_input,
sleep_input = sleep_input,
junk_input = junk_input,
smoking_input = smoking_input,
exercise_input = exercise_input,
drinking_input = drinking_input,
bmi_input = bmi_input,
clean_data=clean_data,
result_prediction=result_prediction)
# model_selected=model_choice)
if __name__ == '__main__':
app.run(debug=True)
dweett()
| [
"noreply@github.com"
] | ritvikbhatia.noreply@github.com |
8b8874992ff3eab1808f62d05bc05d8f4290d857 | 3b0b1ee6c5c0a2740035e3b68d4c4af7c7b2bcea | /src/python/accessor.py | 6634d657453e2c99d2274569dc4ce53019b01da5 | [] | no_license | Exokem/Charm | d90268df00c6304e9cabdd6d57e8ee8eaa69f2e8 | a592b3856cdbd5d94c3200f7d2ce0aeeff4d639b | refs/heads/main | 2023-01-03T23:41:36.170123 | 2020-11-03T23:24:40 | 2020-11-03T23:24:40 | 308,516,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,706 | py | """
Facilitates storage of Charm data.
Functions:
add_word(word, part)
recover_data()
recover_user_data()
recover_words()
save()
store_contents() -> list
Variables:
version
savek
greeting
alpha
book
Authors:
Samuel Henderson
"""
import os.path as path
from src.python.data import *
from src.python.listener import post_query
# The user-specific version, save key, and greeting
version: str = ""
savek: str = ""
greeting: str = ""
# The user-specific alphabet
alpha: list = []
# The user-specific book of words
book: dict = {}
def add_word(word: str, part: Part) -> None:
"""
Add a word to the book.
A new Word is created using a provided string and part of speech and added to the dictionary of all known words.
"""
word = Word(word, [part.indx()])
book[hash(word)] = word
def recover_data() -> None:
"""
Recovers all saved data.
This function exists because the order of execution for the separate data recovery functions matters.
The word recovery is a prerequisite of the userdata recovery.
"""
recover_words()
recover_user_data()
post_query("Connected to Charm interactive", version, mode='v')
def recover_user_data() -> None:
"""
Recovers user data from the user_data file.
LINES:
========= ==============
0 Alphabet
1 Version
2 Save Key
3 Greeting
========= ==============
"""
global alpha, version, savek, greeting
if path.exists("data/user_data"):
userdata = open("data/user_data").read()
userdata = userdata.split('\n')
for line in range(len(userdata)):
sections = userdata[line].split(",")
if line == 0:
# Alphabet is stored in the first line
alpha = sections[0]
elif 1 < len(sections):
if line == 1:
# Version is stored in the second line
version = sections[1][:-2] + str(len(book)) + "-"
end = len(alpha) - 1
# Append last three characters in alphabet to displayed version
version += alpha[end - 4] + alpha[end - 2] + alpha[end]
elif line == 2:
# Save key is stored in the third line
savek = sections[1]
elif line == 3:
# Greeting is stored in the fourth line
greeting = sections[1]
def recover_words() -> None:
"""
Parses each line in the 'words' file as a Word.
"""
dest = "data/words"
if path.exists(dest):
# If the file exists, open it and parse
word_file = open(dest)
for line in word_file:
# Attempt to parse a Word from the current line
word = parse_line(line)
if word is not None:
# Store Word if it has been parsed successfully
book[hash(word)] = word
else:
# Create the file if it is missing
file = open(dest, "w+")
file.close()
def save():
"""
Saves all stored word and user data.
"""
global savek, greeting
# First clear the words data file
open("data/words", "w").close()
words = open("data/words", "r+")
words.truncate(0)
# Write each formatted Word into a separate line in the empty file
for word in book.values():
words.write(word.format())
words.close()
# Collect the contents of the user data file before erasing it
data = store_contents("data/user_data", create=True)
if 4 <= len(data):
# Overwrite the third and fourth lines of the data file with the new save key and greeting values
# Only updated if not an empty string
if savek != "":
data[2] = "save," + savek + "\n"
if greeting != "":
data[3] = "greeting," + greeting + "\n"
# Open the data file and write its new contents
userdata = open("data/user_data", "r+")
for entry in data:
userdata.write(entry)
def store_contents(file: str, create: bool = False) -> list:
"""
Reads the contents of a file into a list.
Each entry in the list represents a line in the provided file, if it exists.
:param file: The path of the file to read
:param create: Should the file be created if it does not exist
:return: A list containing the contents of the file
"""
if not path.exists(file):
if create:
open(file, "w+").close()
return []
else:
lines = []
for line in open(file):
lines.append(line)
return lines
| [
"ex010@live.com"
] | ex010@live.com |
6fd81187e5fd26a209fcf5b54d72c04a1f58c692 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/73/usersdata/261/34558/submittedfiles/triangulo.py | e103c6c1e03699443640a502e2554120aa9b80a0 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 690 | py | # -*- coding: utf-8 -*-
import math
def triangulo():
a=float(input("Qual o comprimento do 1º lado? "))
b=float(input("Qual o comprimento do 2º lado? "))
c=float(input("Qual o comprimento do 3º lado? "))
if not a<b+c:
print ("N")
elif a<b+c:
print ("S")
a=a1
b=b1
c=c1
if (a1**2)==(b1**2)+(c1**2):
print ("Re")
elif (a1**2)>(b1**2)+(c1**2):
print ("Ob")
elif (a1**2)<(b1**2)+(c1**2):
print ("Ac")
if a1==b1 and b1==c1:
print ("Eq")
elif b1==c1 and c1!=a1:
print ("Is")
elif a1!=b1 and b1!=c1 and a1!=c1:
print ("Es")
triangulo() | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
d6640fdc04f1ce77b67b469f8a16f8fd943643bd | 28b093c7626506b8e999df5de138227f8f627929 | /AlmacenLosYuYitos/AlmacenLosYuYitos/settings.py | 98a6b56e654cfbf6fa97943533c4e2bdb176de37 | [] | no_license | Core-out-of-void/LosYuYitos | 8c5d1d73df0121fe66f9f0a4bf2a32e23f8bbca1 | c7427575b24bf4e2762b638a4385a5b3e3dc5b9f | refs/heads/main | 2023-05-14T16:02:47.145961 | 2021-06-07T04:10:15 | 2021-06-07T04:10:15 | 368,354,184 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,023 | py | """
Django settings for AlmacenLosYuYitos project.
Generated by 'django-admin startproject' using Django 3.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-aixfa6mq+d^91q-j#hf*ym&c)apwo%)ter8up2@z%9urm&p-g6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
MESSAGE_STORAGE = "django.contrib.messages.storage.cookie.CookieStorage"
LOGIN_REDIRECT_URL = 'listar-proveedor'
LOGOUT_REDIRECT_URL = 'listar-proveedor'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'LosYuYitos',
'crispy_forms',
]
CRISPY_TEMPLATE_PACK = 'bootstrap4'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'AlmacenLosYuYitos.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates','registration'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'AlmacenLosYuYitos.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.oracle',
'NAME': '127.0.0.1:1521/xe',
'USER': 'c##yuyitos',
'PASSWORD': 'yuyitos',
'TEST': {
'USER': 'default_test',
'TBLSPACE': 'default_test_tbls',
'TBLSPACE_TMP': 'default_test_tbls_tmp',
},
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'es'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_HOST_USER = 'almacen.los.yuyitos@gmail.com'
EMAIL_HOST_PASSWORD = 'yuyitos2021'
| [
"es.cabeza@alumnos.duoc.cl"
] | es.cabeza@alumnos.duoc.cl |
30becabf152e42058f58dcb632120e3cd17a2887 | 3d7bbd8e8fead2b1bd1f8976b47f86260045d664 | /two_pages/two_pages.py | 470cac93697248b91a7c0ac527b558e904cde741 | [] | no_license | Pappers88/web-app- | ff5da4f46e661977dcb39aad13eb47c73471d5c1 | 9d78fbe31b66b51550887eb6f9ae374c9dd25c00 | refs/heads/master | 2021-01-21T14:13:41.063457 | 2017-06-23T20:41:09 | 2017-06-23T20:41:09 | 95,254,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | py | from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def home():
return render_template('home.html')
@app.route('/about')
def about():
return render_template('about.html')
if __name__ == '__main__':
app.run(debug=False) | [
"noreply@github.com"
] | Pappers88.noreply@github.com |
32a87eb6686563bdcc5519a45e396b95dab048f0 | cf200bf2edc476006f84299c5cd6e64f8b73e086 | /project/app/api/crud.py | 91ab19e57e07e4268f28fa57a0edf0654a56491d | [] | no_license | kenjinagai/fastapi-tdd-docker | b936148703513df7a6b76763b213a33d66003370 | 91feeec3ef6966377d5b46e183821841992e8c60 | refs/heads/main | 2023-08-04T23:56:04.021102 | 2021-09-29T07:02:18 | 2021-09-29T07:02:18 | 410,176,588 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 618 | py | # project/app/api/crud.py
from app.models.pydantic import SummaryPayloadSchema
from app.models.tortoise import TextSummary
from typing import Union, List
async def post(payload: SummaryPayloadSchema) -> int:
summary = TextSummary(
url=payload.url,
summary="dummy summary",
)
await summary.save()
return summary.id
async def get(id: int) -> Union[dict, None]:
summary = await TextSummary.filter(id=id).first().values()
if summary:
return summary[0]
return None
async def get_all() -> List:
summaries = await TextSummary.all().values()
return summaries
| [
"s07430@gmail.com"
] | s07430@gmail.com |
d45575c100f6da8ba9c3f250658911fbba639b5c | 5cb3b2d2fe6cf136296ed206f021061774edf305 | /apps/home/urls.py | 248a4da48c3fa9a576467df37ba4acd926a86704 | [
"Apache-2.0"
] | permissive | whytheplatypus/sharemyhealth | 002e6a4b3633d8f5aaedbd9add0b9109723d7e5d | 79ac694686ebd7a9a121741e473afbd35f25cea5 | refs/heads/master | 2020-03-30T12:59:42.841594 | 2019-05-01T19:01:30 | 2019-05-01T19:01:30 | 151,251,593 | 0 | 0 | Apache-2.0 | 2018-10-02T12:35:16 | 2018-10-02T12:35:15 | null | UTF-8 | Python | false | false | 193 | py | from django.conf.urls import url
from django.contrib import admin
from .views import authenticated_home
admin.autodiscover()
urlpatterns = [
url(r'', authenticated_home, name='home'),
]
| [
"aviars@videntity.com"
] | aviars@videntity.com |
00703fcb17eb6a59005c59a62ad6ec9f2fbe3aaa | c5314844fcffb7ce1d9a9453d6f6d458b952bb1b | /notifications/pushover.py | 9374dc1263c05417cb4b5076565f639de24a6d29 | [] | no_license | Admin9705/pgtrak | 92560ba9bd49ac4c751d5603dfb20dc5dc1944ae | 339d5206649211237a8298c6e93e5b20372c93da | refs/heads/master | 2022-07-08T06:02:39.040208 | 2018-04-28T17:23:44 | 2018-04-28T17:23:44 | 126,560,840 | 1 | 0 | null | 2018-08-09T03:03:08 | 2018-03-24T03:02:32 | Python | UTF-8 | Python | false | false | 1,009 | py | import requests
from misc.log import logger
log = logger.get_logger(__name__)
class Pushover:
NAME = "Pushover"
def __init__(self, app_token, user_token):
self.app_token = app_token
self.user_token = user_token
log.debug("Initialized Pushover notification agent")
def send(self, **kwargs):
if not self.app_token or not self.user_token:
log.error("You must specify an app_token and user_token when initializing this class")
return False
# send notification
try:
payload = {
'token': self.app_token,
'user': self.user_token,
'message': kwargs['message']
}
resp = requests.post('https://api.pushover.net/1/messages.json', data=payload, timeout=30)
return True if resp.status_code == 200 else False
except Exception:
log.exception("Error sending notification to %r", self.user_token)
return False
| [
"l3uddz@gmail.com"
] | l3uddz@gmail.com |
b863c1fd30ec1e14e519c6a21166d6172a863880 | 738b4fd5d8ebb8c424947a6786bd41ba30df46d6 | /ibeatles/utilities/load_data.py | 3ddfd54996842b3ce1eabb81d9dceaa9bf2f0284 | [
"MIT"
] | permissive | indudhiman/bragg-edge | ba6e5c02e2bf2c2c5f87b626a4578238f7973e43 | 56af0a448534ef9cb5428879ba900e194dc05db2 | refs/heads/master | 2020-04-16T22:49:53.274903 | 2019-01-08T14:18:32 | 2019-01-08T14:18:32 | 165,985,908 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 757 | py | class LoadData(object):
def __init__(self, parent=None, list_of_files=[]):
self.parent = parent
self.list_of_files = list_of_files
def load(self):
if (self.image_ext == '.tiff') or (self.image_ext == '.tif'):
self.load_tiff()
elif (self.image_ext == '.fits'):
self.load_fits()
else:
raise TypeError("Image Type not supported")
def load_tiff(self):
_list_of_files = self.list_of_files
_data = []
for _file in _list_of_files:
_image = mpimg.imread(_file)
_data.append(_image)
self.image_array = _data
def load_fits(self):
print("loading fits")
print(self.list_of_files)
| [
"bilheuxjm@ornl.gov"
] | bilheuxjm@ornl.gov |
7dc9755c8f6e958b44fe5ef77b3807297c3cfaab | 65b4522c04c2be071c2d42095956fe950fe1cebe | /tests/lib/viscojapan/epoch_3d_array/test_g.py | 4e35bbc0a44fe78a5f8e894fd772930bb2fab9b8 | [] | no_license | geodesy/viscojapan | ac0cd93f7a2134cd2651623b94879dcc21c0c46a | 03e70265b56eb5994e73bcb6066f0be338e42f27 | refs/heads/master | 2021-03-03T18:19:07.779601 | 2015-07-16T03:50:49 | 2015-07-16T03:50:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,080 | py | from os.path import join
import unittest
import numpy as np
import h5py
import viscojapan as vj
class Test_G(vj.MyTestCase):
def setUp(self):
self.this_script = __file__
super().setUp()
def test_ordered_mask_sties(self):
fn = '/home/zy/workspace/viscojapan/tests/share/G0_He50km_VisM6.3E18_Rake83.h5'
with h5py.File(fn,'r') as fid:
sites = vj.utils.as_string(fid['sites'])
arr = vj.epoch_3d_array.G.load(
fid,
mask_sites = sites,
memory_mode=False)
#print(arr.get_array_3d())
def test_not_ordered_mask_sites(self):
fn = '/home/zy/workspace/viscojapan/tests/share/G0_He50km_VisM6.3E18_Rake83.h5'
with h5py.File(fn,'r') as fid:
sites = vj.utils.as_string(fid['sites'])
arr = vj.epoch_3d_array.G.load(
fid,
mask_sites = sites[2:]+sites[0:2],
memory_mode=False)
print(arr.get_array_3d())
if __name__ == '__main__':
unittest.main()
| [
"zy31415@gmail.com"
] | zy31415@gmail.com |
7446b5c185cd44dd66e7b7b4284a0cc1f5f0063c | 04103ca907cac88e3b8fe4e18ac4ced847690d7a | /data_app.py | ea6b82823643d9dd3058cf8e5a08404a90831c7f | [] | no_license | joaobermudez/web_app1 | 62556b5126ce71d42203f401e178d0b18e5806e0 | e1d193e3660153e0ae1237dc0eaadb23adb3ef6b | refs/heads/master | 2022-11-29T07:22:01.984894 | 2020-08-19T03:57:59 | 2020-08-19T03:57:59 | 288,620,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,950 | py | import streamlit as st
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from PIL import Image
st.title('Machine Learning Analysis')
#image = Image.open("C:\\Users\\mars\\Desktop\\mlwebsit.jpg")
#st.image(image, use_column_width =True)
st.write("""
# Explore different classifier and datasets
""")
dataset_name = st.sidebar.selectbox(
'Select Dataset',
('Iris', 'Breast Cancer', 'Wine')
)
st.write(f"## {dataset_name} Dataset")
classifier_name = st.sidebar.selectbox(
'Select classifier',
('KNN', 'SVM', 'Random Forest')
)
def get_dataset(name):
data = None
if name == 'Iris':
data = datasets.load_iris()
elif name == 'Wine':
data = datasets.load_wine()
else:
data = datasets.load_breast_cancer()
X = data.data
y = data.target
return X, y
X, y = get_dataset(dataset_name)
st.write('Shape of dataset:', X.shape)
st.write('number of classes:', len(np.unique(y)))
def add_parameter_ui(clf_name):
params = dict()
if clf_name == 'SVM':
C = st.sidebar.slider('C', 0.01, 10.0)
params['C'] = C
elif clf_name == 'KNN':
K = st.sidebar.slider('K', 1, 15)
params['K'] = K
else:
max_depth = st.sidebar.slider('max_depth', 2, 15)
params['max_depth'] = max_depth
n_estimators = st.sidebar.slider('n_estimators', 1, 100)
params['n_estimators'] = n_estimators
return params
params = add_parameter_ui(classifier_name)
def get_classifier(clf_name, params):
clf = None
if clf_name == 'SVM':
clf = SVC(C=params['C'])
elif clf_name == 'KNN':
clf = KNeighborsClassifier(n_neighbors=params['K'])
else:
clf = clf = RandomForestClassifier(n_estimators=params['n_estimators'],
max_depth=params['max_depth'], random_state=1234)
return clf
clf = get_classifier(classifier_name, params)
#### CLASSIFICATION ####
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
acc = accuracy_score(y_test, y_pred)
st.write(f'Classifier = {classifier_name}')
st.write(f'Accuracy =', acc)
#### PLOT DATASET ####
# Project the data onto the 2 primary principal components
pca = PCA(2)
X_projected = pca.fit_transform(X)
x1 = X_projected[:, 0]
x2 = X_projected[:, 1]
fig = plt.figure()
plt.scatter(x1, x2,
c=y, alpha=0.8,
cmap='viridis')
plt.xlabel('Principal Component 1')
plt.ylabel('Principal Component 2')
plt.colorbar()
#plt.show()
st.pyplot()
| [
"noreply@github.com"
] | joaobermudez.noreply@github.com |
b8c3eea30bc246d692a31b63b8a60ec186d8ceb5 | 9eadbd115634a6f447af4657440fffafbf5648b1 | /com/python/learn/crawlertest/GluepartitionFilter.py | 9baa0fd874ba69571b5570763d4c0a00b8dd2505 | [] | no_license | awsbigdata/learnpython | 910657fe7d178c209992df682bf2a985689f7edb | 7d4d4782b07653b47190935eaa269ed6b2ba1cb2 | refs/heads/master | 2021-10-13T14:52:17.585320 | 2021-10-12T08:23:18 | 2021-10-12T08:23:18 | 161,269,747 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | #!/usr/bin/python
#
# This program used to sync the glue table schema and partition upto 1000
#
import boto3
from sys import argv
import json
from datetime import date, datetime
dbname='default'
tablename='test1'
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, (datetime, date)):
return obj.isoformat()
#dbname=argv[1]
#tablename=argv[2]
client = boto3.client('glue',region_name='us-east-1')
response = client.delete_partition(
DatabaseName=dbname,
TableName=tablename,PartitionValues=['__HIVE_DEFAULT_PARTITION__']
)
print(response) | [
"sivankumar86@gmail.com"
] | sivankumar86@gmail.com |
5767bfce1f275431992887884ac199ef1450f21d | 83449e5c1955868c521e47ef357f51b5d2229f46 | /sirbot/core/cli.py | 353d1b2e0c48704a99e7cd5c7255934eff4087e9 | [
"MIT"
] | permissive | alairock/sir-bot-a-lot | 3c87f56be290a0b8b9586b6f6ab5d8f7aef8d4a1 | 2af140e82cd43ee92cd1769b5f1a91d3184dc156 | refs/heads/master | 2021-01-24T07:02:17.437669 | 2017-05-28T17:44:21 | 2017-05-28T17:44:21 | 93,331,631 | 0 | 0 | null | 2017-06-04T17:58:02 | 2017-06-04T17:58:02 | null | UTF-8 | Python | false | false | 2,414 | py | import logging
import os
import sys
import argparse
import yaml
import asyncio
from .core import SirBot
def parse_args(arguments):
parser = argparse.ArgumentParser(description='The good Sir-bot-a-lot')
parser.add_argument('-P', '--port', dest='port', action='store',
type=int,
help='port where to run sirbot')
parser.add_argument('-c', '--config', action='store',
help='path to the Yaml config file')
parser.add_argument('-u', '--update', help='Run update of plugins'
'if necessary',
action='store_true', dest='update')
parser.add_argument('-p', '--plugins', help='Plugins to load',
dest='plugins', nargs='+')
return parser.parse_args(arguments)
def load_config(path=None):
if not path:
return dict()
if not os.path.isabs(path):
path = os.path.join(os.getcwd(), path)
with open(path) as file:
return yaml.load(file)
def cli_plugin(args, config):
if args.plugins:
try:
config['sirbot']['plugins'].extend(args.plugins)
except KeyError:
if 'sirbot' not in config:
config['sirbot'] = {'plugins': []}
elif 'plugins' not in config['sirbot']:
config['sirbot']['plugins'] = list()
config['sirbot']['plugins'] = args.plugins
return config
def main(): # pragma: no cover
args = parse_args(sys.argv[1:])
logging.basicConfig()
config_file = args.config or os.getenv('SIRBOT_CONFIG')
config = load_config(config_file)
config = cli_plugin(args, config)
try:
port = args.port or config['sirbot']['port']
except KeyError:
port = 8080
try:
if args.update:
update(config)
else:
start(config, port=port)
except Exception as e:
raise
def start(config, port, loop=None): # pragma: no cover
if not loop:
loop = asyncio.get_event_loop()
bot = SirBot(config=config, loop=loop)
bot.run(port=int(port))
return bot
def update(config, loop=None):
if not loop:
loop = asyncio.get_event_loop()
bot = SirBot(config=config, loop=loop)
loop.run_until_complete(bot.update())
return bot
if __name__ == '__main__':
main() # pragma: no cover
| [
"ovv@outlook.com"
] | ovv@outlook.com |
a6aada93732ceb43e1240aa9a5a19635365d827a | a9337f402b63a447e31ac7708aafa9bdbf0e3424 | /cleartest.py | f214182d6e4360c0743742e9b8751d587986c5cb | [] | no_license | brandizzi/netunong | d61756519fa41373a71e65db02f2d8af47acfc1a | 624ec73e2f3bfd760a0e697bb6f772eb3974fc79 | refs/heads/master | 2021-01-21T14:39:23.135130 | 2017-06-24T19:55:25 | 2017-06-24T19:55:25 | 95,319,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | #!/usr/bin/env DJANGO_SETTINGS_MODULE=settings python
from register.tests.test_utilities import clear_database
clear_database()
| [
"adam.brandizzi@seatecnologia.com.br"
] | adam.brandizzi@seatecnologia.com.br |
09f1d118e4bafca802bbd957d20e3534fb93bfeb | d206c0a8bb5dc3e2cfc0d7c3b7ceee4bdb665a4d | /one_page/admin.py | 66f05102838ca840025190a2c73edbcbf1ef4bc1 | [] | no_license | harshit-singh99/Sentiment-Analysis-and-Crowdsourcing | f0cbde5e1043b9460a473b26528a1088695eef87 | b764a317b9cc88ba771df4602019b3ee1ff73d04 | refs/heads/master | 2021-06-25T01:56:44.278542 | 2020-11-20T18:15:16 | 2020-11-20T18:15:16 | 158,106,956 | 0 | 7 | null | 2020-11-20T18:15:17 | 2018-11-18T17:06:07 | HTML | UTF-8 | Python | false | false | 230 | py | from django.contrib import admin
from .models import Movie, Rests, Unlabeled, Labeled
admin.site.register(Movie)
admin.site.register(Rests)
admin.site.register(Unlabeled)
admin.site.register(Labeled)
# Register your models here.
| [
"noreply@github.com"
] | harshit-singh99.noreply@github.com |
8eb1a9bce54d4bb8a6ff1d708e1ab92e53bb1dab | 359da93640b0f9cebe360d93dbdb834f88f568b9 | /mezzanine_pageview/admin.py | 8ea198650dfff51e25f2030b13ec011237a34604 | [
"BSD-2-Clause"
] | permissive | zgohr/mezzanine-pageview | 9005d34fc237cd6b1a7afc50515552e328888bc4 | 5bed5af0d3afce88332799ed8d161e3e683a978c | refs/heads/master | 2020-05-27T00:00:05.490532 | 2012-06-19T16:55:36 | 2012-06-19T16:55:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | from django.contrib import admin
from models import PageViewGroup
class PageViewGroupAdmin(admin.ModelAdmin):
list_display = ("group", "page")
admin.site.register(PageViewGroup, PageViewGroupAdmin)
| [
"zachary.gohr@gmail.com"
] | zachary.gohr@gmail.com |
bc674a16d2dd4ae18d79a0b0a0b77de5a114abab | 16fa2ec8c04bd23072ac6d1df687732f95391f66 | /petrolpy/Examples/bg_example.py | 9c8aa8c5c12162e2fc48e9c60d142010b75ede8a | [
"Apache-2.0"
] | permissive | rashidwadani/petrolpy | 872865ca8b2402bdd6818acc7f15df789ab077fb | eb9573a562fde2fc0374a26f706b432e8a4e04c5 | refs/heads/master | 2020-06-07T12:47:09.493862 | 2019-06-11T21:57:27 | 2019-06-11T21:57:27 | 193,025,890 | 2 | 0 | Apache-2.0 | 2019-06-21T03:41:40 | 2019-06-21T03:41:38 | null | UTF-8 | Python | false | false | 74 | py | import petrolpy
bg = petrolpy.calc_gas_vol_factor(z_value=0.6)
print(bg) | [
"michaelwiv@gmail.com"
] | michaelwiv@gmail.com |
6a9ce73123791da69f96c599b2edf8330df42542 | be46ea3043b2c2e9343fbfca7784613f2b6c97c0 | /models/seq2act_grounding.py | dd8a8bb9abbd0ff58062a5c7103d639011ebc6c9 | [] | no_license | andrei-ars/seq2act | 4f82c10685f25e5eacef0d9833f5a60aab2ed177 | 56879be8645e1d9b5475441d1b52ca65583d6e9a | refs/heads/main | 2023-08-19T03:24:22.120350 | 2021-09-17T20:32:57 | 2021-09-17T20:32:57 | 331,658,639 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,423 | py | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The grounding models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.layers import common_layers
import tensorflow.compat.v1 as tf
from seq2act.layers import area_utils
from seq2act.layers import common_embed
from seq2act.layers import encode_screen
from seq2act.models import seq2act_reference
def encode_screen_ffn(features, hparams, embed_scope):
"""Encodes a screen with feed forward neural network.
Args:
features: the feature dict.
hparams: the hyperparameter.
embed_scope: the name scope.
Returns:
encoder_outputs: a Tensor of shape
[batch_size, num_steps, max_object_count, hidden_size]
obj_mask: A tensor of shape
[batch_size, num_steps, max_object_count]
"""
object_embed, obj_mask, obj_bias = encode_screen.prepare_encoder_input(
features=features, hparams=hparams,
embed_scope=embed_scope)
for layer in range(hparams.num_hidden_layers):
with tf.variable_scope(
"encode_screen_ff_layer_%d" % layer, reuse=tf.AUTO_REUSE):
object_embed = tf.layers.dense(object_embed, units=hparams.hidden_size)
object_embed = common_layers.apply_norm(
object_embed, hparams.norm_type, hparams.hidden_size,
epsilon=hparams.norm_epsilon)
object_embed = tf.nn.relu(object_embed)
object_embed = tf.nn.dropout(
object_embed,
keep_prob=1.0 - hparams.layer_prepostprocess_dropout)
object_embed = object_embed * tf.expand_dims(obj_mask, 3)
return object_embed, obj_bias
def compute_logits(features, references, hparams):
"""Grounds using the predicted references.
Args:
features: the feature dict.
references: the dict that keeps the reference results.
hparams: the hyper-parameters.
Returns:
action_logits: [batch_size, num_steps, num_actions]
object_logits: [batch_size, num_steps, max_num_objects]
"""
lang_hidden_layers = hparams.num_hidden_layers
pos_embed = hparams.pos
hparams.set_hparam("num_hidden_layers", hparams.screen_encoder_layers)
hparams.set_hparam("pos", "none")
with tf.variable_scope("compute_grounding_logits", reuse=tf.AUTO_REUSE):
# Encode objects
if hparams.screen_encoder == "gcn":
screen_encoding, _, screen_encoding_bias = (
encode_screen.gcn_encoder(
features, hparams, references["embed_scope"],
discretize=False))
elif hparams.screen_encoder == "transformer":
screen_encoding, _, screen_encoding_bias = (
encode_screen.transformer_encoder(
features, hparams, references["embed_scope"]))
elif hparams.screen_encoder == "mlp":
screen_encoding, screen_encoding_bias = encode_screen_ffn(
features, hparams, references["embed_scope"])
else:
raise ValueError(
"Unsupported encoder: %s" % hparams.screen_encoder)
# Compute query
if hparams.compute_verb_obj_separately:
verb_hidden, object_hidden = _compute_query_embedding(
features, references, hparams, references["embed_scope"])
else:
verb_hidden = references["verb_hidden"]
object_hidden = references["object_hidden"]
# Predict actions
with tf.variable_scope("compute_action_logits", reuse=tf.AUTO_REUSE):
action_logits = tf.layers.dense(
verb_hidden, units=hparams.action_vocab_size)
# Predict objects
obj_logits, consumed_logits = _compute_object_logits(
hparams,
object_hidden,
screen_encoding,
screen_encoding_bias)
hparams.set_hparam("num_hidden_layers", lang_hidden_layers)
hparams.set_hparam("pos", pos_embed)
return action_logits, obj_logits, consumed_logits
def _compute_object_logits(hparams, object_hidden,
screen_encoding, screen_encoding_bias):
"""The output layer for a specific domain."""
with tf.variable_scope("compute_object_logits", reuse=tf.AUTO_REUSE):
if hparams.alignment == "cosine_similarity":
object_hidden = tf.layers.dense(
object_hidden, units=hparams.hidden_size)
screen_encoding = tf.layers.dense(
screen_encoding, units=hparams.hidden_size)
norm_screen_encoding = tf.math.l2_normalize(screen_encoding, axis=-1)
norm_obj_hidden = tf.math.l2_normalize(object_hidden, axis=-1)
align_logits = tf.matmul(norm_screen_encoding,
tf.expand_dims(norm_obj_hidden, 3))
elif hparams.alignment == "scaled_cosine_similarity":
object_hidden = tf.layers.dense(
object_hidden, units=hparams.hidden_size)
screen_encoding = tf.reshape(
screen_encoding,
common_layers.shape_list(
screen_encoding)[:-1] + [hparams.hidden_size])
screen_encoding = tf.layers.dense(
screen_encoding, units=hparams.hidden_size)
norm_screen_encoding = tf.math.l2_normalize(screen_encoding, axis=-1)
norm_obj_hidden = tf.math.l2_normalize(object_hidden, axis=-1)
dot_products = tf.matmul(norm_screen_encoding,
tf.expand_dims(norm_obj_hidden, 3))
align_logits = tf.layers.dense(dot_products, units=1)
elif hparams.alignment == "dot_product_attention":
object_hidden = tf.layers.dense(
object_hidden, units=hparams.hidden_size)
align_logits = tf.matmul(screen_encoding,
tf.expand_dims(object_hidden, 3))
elif hparams.alignment == "mlp_attention":
batch_size = tf.shape(screen_encoding)[0]
num_steps = tf.shape(screen_encoding)[1]
num_objects = tf.shape(screen_encoding)[2]
tiled_object_hidden = tf.tile(tf.expand_dims(object_hidden, 2),
[1, 1, num_objects, 1])
align_feature = tf.concat([tiled_object_hidden, screen_encoding], axis=-1)
align_feature = tf.reshape(
align_feature,
[batch_size, num_steps, num_objects, hparams.hidden_size * 2])
with tf.variable_scope("align", reuse=tf.AUTO_REUSE):
align_hidden = tf.layers.dense(align_feature, units=hparams.hidden_size)
align_hidden = common_layers.apply_norm(
align_hidden, hparams.norm_type, hparams.hidden_size,
epsilon=hparams.norm_epsilon)
align_hidden = tf.nn.tanh(align_hidden)
align_logits = tf.layers.dense(align_hidden, units=1)
else:
raise ValueError("Unsupported alignment: %s" % hparams.alignment)
obj_logits = tf.squeeze(align_logits, [3]) + screen_encoding_bias
# [batch_size, num_steps]
batch_size = common_layers.shape_list(obj_logits)[0]
num_steps = common_layers.shape_list(obj_logits)[1]
# [batch_size * num_steps, 1]
batch_indices = tf.to_int64(tf.reshape(
tf.tile(tf.expand_dims(tf.range(batch_size), 1), [1, num_steps]),
[-1, 1]))
step_indices = tf.to_int64(tf.reshape(
tf.tile(tf.expand_dims(tf.range(num_steps), 0), [batch_size, 1]),
[-1, 1]))
object_indices = tf.reshape(tf.argmax(obj_logits, -1), [-1, 1])
indices = tf.concat([batch_indices, step_indices, object_indices], -1)
# [batch_size, num_steps, depth]
depth = tf.shape(screen_encoding)[-1]
best_logits = tf.reshape(
tf.gather_nd(screen_encoding, indices=indices),
[batch_size, num_steps, depth])
consumed_logits = tf.layers.dense(
tf.reshape(tf.concat([object_hidden, best_logits], -1),
[batch_size, num_steps, hparams.hidden_size * 2]),
2)
with tf.control_dependencies([tf.assert_equal(
tf.reduce_all(tf.math.is_nan(consumed_logits)), False,
data=[tf.shape(best_logits), best_logits,
tf.constant("screen_encoding"), screen_encoding,
tf.constant("indices"), indices],
summarize=10000, message="consumed_logits_nan")]):
consumed_logits = tf.identity(consumed_logits)
return obj_logits, consumed_logits
def _compute_query_embedding(features, references, hparams, embed_scope=None):
"""Computes lang embeds for verb and object from predictions.
Args:
features: a dictionary contains "inputs" that is a tensor in shape of
[batch_size, num_tokens], "verb_id_seq" that is in shape of
[batch_size, num_actions], "object_spans" and "param_span" tensor
in shape of [batch_size, num_actions, 2]. 0 is used as padding or
non-existent values.
references: the dict that keeps the reference results.
hparams: the general hyperparameters for the model.
embed_scope: the embedding variable scope.
Returns:
verb_embeds: a Tensor of shape
[batch_size, num_steps, depth]
object_embeds:
[batch_size, num_steps, depth]
"""
pred_verb_refs = seq2act_reference.predict_refs(
references["verb_area_logits"],
references["areas"]["starts"],
references["areas"]["ends"])
pred_obj_refs = seq2act_reference.predict_refs(
references["obj_area_logits"],
references["areas"]["starts"],
references["areas"]["ends"])
input_embeddings, _ = common_embed.embed_tokens(
features["task"], hparams.task_vocab_size, hparams.hidden_size, hparams,
embed_scope=references["embed_scope"])
if hparams.obj_text_aggregation == "sum":
area_encodings, _, _ = area_utils.compute_sum_image(
input_embeddings, max_area_width=hparams.max_span)
shape = common_layers.shape_list(features["task"])
encoder_input_length = shape[1]
verb_embeds = seq2act_reference.span_embedding(
encoder_input_length, area_encodings, pred_verb_refs, hparams)
object_embeds = seq2act_reference.span_embedding(
encoder_input_length, area_encodings, pred_obj_refs, hparams)
elif hparams.obj_text_aggregation == "mean":
verb_embeds = seq2act_reference.span_average_embed(
input_embeddings, pred_verb_refs, embed_scope, hparams)
object_embeds = seq2act_reference.span_average_embed(
input_embeddings, pred_obj_refs, embed_scope, hparams)
else:
raise ValueError("Unrecognized query aggreggation %s" % (
hparams.span_aggregation))
return verb_embeds, object_embeds
def compute_losses(loss_dict, features, action_logits, obj_logits,
consumed_logits):
"""Compute the loss based on the logits and labels."""
valid_obj_mask = tf.to_float(tf.greater(features["verbs"], 1))
action_losses = tf.losses.sparse_softmax_cross_entropy(
labels=features["verbs"],
logits=action_logits,
reduction=tf.losses.Reduction.NONE) * valid_obj_mask
action_loss = tf.reduce_mean(action_losses)
object_losses = tf.losses.sparse_softmax_cross_entropy(
labels=features["objects"],
logits=obj_logits,
reduction=tf.losses.Reduction.NONE) * valid_obj_mask
object_loss = tf.reduce_mean(object_losses)
if "consumed" in features:
consumed_loss = tf.reduce_mean(
tf.losses.sparse_softmax_cross_entropy(
labels=features["consumed"],
logits=consumed_logits,
reduction=tf.losses.Reduction.NONE) * valid_obj_mask)
else:
consumed_loss = 0.0
loss_dict["grounding_loss"] = action_loss + object_loss + consumed_loss
loss_dict["verbs_loss"] = action_loss
loss_dict["objects_loss"] = object_loss
loss_dict["verbs_losses"] = action_losses
loss_dict["object_losses"] = object_losses
loss_dict["consumed_loss"] = consumed_loss
return loss_dict["grounding_loss"]
def compute_predictions(prediction_dict, action_logits, obj_logits,
consumed_logits):
"""Predict the action tuple based on the logits."""
prediction_dict["verbs"] = tf.argmax(action_logits, -1)
prediction_dict["objects"] = tf.argmax(obj_logits, -1)
prediction_dict["consumed"] = tf.argmax(consumed_logits, -1)
| [
"z@sp.sp"
] | z@sp.sp |
9bbdec2e3860fd5ff9ee707a7b8d5a07f51aaee9 | 529fba1e9dcf95696dd1022a829f4c8af9e404a3 | /algorithms-in-python/dynamicprogramming/countConstruct memonization.py | 67a207b1ddc49c8e0eebeb762151c13a0f073d98 | [] | no_license | nikhilsn01/algorithms-in-python | 415c8989db07d0ec36a76e05de22df6d57fa1129 | 8df088c1a714853ef864fc813b990f5f56c12249 | refs/heads/master | 2023-06-15T12:09:11.720286 | 2021-07-07T15:44:21 | 2021-07-07T15:44:21 | 382,078,716 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 815 | py | def countConstruct(target,wordBank,memo={}):
if target in memo.keys():
return memo[target]
if target == '':
return 1
totalCount = 0
for word in wordBank:
if target.find(word) == 0:
numWaysForRest = countConstruct(target[len(word):],wordBank,memo)
totalCount+=numWaysForRest
memo[target] = totalCount
return totalCount
print( countConstruct('abcdef',['ab','abc','cd','def','abcd'],memo={}))
print( countConstruct('purple',['purp','p','ur','le','purpl'],memo={}))
print( countConstruct('skateboard',['bo','rd','ate','t','ska','sk','boar'],memo={}))
print( countConstruct('enterapotentpot',['a','p','ent','enter','ot','o','t'],memo={}))
print( countConstruct('eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeef',['e','ee','eeee','eee','eeeee'],memo={}))
| [
"nikhilsn360@gmail.com"
] | nikhilsn360@gmail.com |
0c2e8617661d090ec445720b79f089569ecc8ac3 | 81a62053841c03d9621fd31f8e7984c712c7aed2 | /mmdet_adv/projects/mmdet3d_plugin/datasets/custom_nuscenes_dataset.py | 1326c197618ce1a8859447f5489d89a495615843 | [] | no_license | Daniel-xsy/BEV-Attack | d0eb3a476875f9578c53df9bcb21564dea18ce0c | 7970b27396c1af450c80b12eb312e76a8ab52a0a | refs/heads/master | 2023-05-23T01:13:44.121533 | 2023-02-22T05:48:14 | 2023-02-22T05:48:14 | 540,328,937 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 12,245 | py | import numpy as np
from mmdet.datasets import DATASETS
from mmdet3d.datasets import NuScenesDataset
@DATASETS.register_module()
class DETR3DCustomNuScenesDataset(NuScenesDataset):
r"""NuScenes Dataset.
This datset only add camera intrinsics and extrinsics to the results.
"""
def get_data_info(self, index):
"""Get data info according to the given index.
Args:
index (int): Index of the sample data to get.
Returns:
dict: Data information that will be passed to the data \
preprocessing pipelines. It includes the following keys:
- sample_idx (str): Sample index.
- pts_filename (str): Filename of point clouds.
- sweeps (list[dict]): Infos of sweeps.
- timestamp (float): Sample timestamp.
- img_filename (str, optional): Image filename.
- lidar2img (list[np.ndarray], optional): Transformations \
from lidar to different cameras.
- ann_info (dict): Annotation info.
"""
info = self.data_infos[index]
# standard protocal modified from SECOND.Pytorch
input_dict = dict(
sample_idx=info['token'],
pts_filename=info['lidar_path'],
sweeps=info['sweeps'],
timestamp=info['timestamp'] / 1e6,
)
if self.modality['use_camera']:
image_paths = []
lidar2img_rts = []
lidar2cam_rts = []
cam_intrinsics = []
for cam_type, cam_info in info['cams'].items():
image_paths.append(cam_info['data_path'])
# obtain lidar to image transformation matrix
lidar2cam_r = np.linalg.inv(cam_info['sensor2lidar_rotation'])
lidar2cam_t = cam_info[
'sensor2lidar_translation'] @ lidar2cam_r.T
lidar2cam_rt = np.eye(4)
lidar2cam_rt[:3, :3] = lidar2cam_r.T
lidar2cam_rt[3, :3] = -lidar2cam_t
intrinsic = cam_info['cam_intrinsic']
viewpad = np.eye(4)
viewpad[:intrinsic.shape[0], :intrinsic.shape[1]] = intrinsic
lidar2img_rt = (viewpad @ lidar2cam_rt.T)
lidar2img_rts.append(lidar2img_rt)
cam_intrinsics.append(viewpad)
lidar2cam_rts.append(lidar2cam_rt.T)
input_dict.update(
dict(
img_filename=image_paths,
lidar2img=lidar2img_rts,
cam_intrinsic=cam_intrinsics,
lidar2cam=lidar2cam_rts,
))
if not self.test_mode:
annos = self.get_ann_info(index)
input_dict['ann_info'] = annos
return input_dict
import copy
import numpy as np
from mmdet.datasets import DATASETS
from mmdet3d.datasets import NuScenesDataset
import mmcv
from os import path as osp
from mmdet.datasets import DATASETS
import torch
import numpy as np
from nuscenes.eval.common.utils import quaternion_yaw, Quaternion
from .nuscnes_eval import NuScenesEval_custom
from projects.mmdet3d_plugin.models.utils.visual import save_tensor
from mmcv.parallel import DataContainer as DC
import random
@DATASETS.register_module()
class BEVFormerCustomNuScenesDataset(NuScenesDataset):
r"""NuScenes Dataset.
This datset only add camera intrinsics and extrinsics to the results.
"""
def __init__(self, queue_length=4, bev_size=(200, 200), overlap_test=False, *args, **kwargs):
super().__init__(*args, **kwargs)
self.queue_length = queue_length
self.overlap_test = overlap_test
self.bev_size = bev_size
def prepare_train_data(self, index):
"""
Training data preparation.
Args:
index (int): Index for accessing the target data.
Returns:
dict: Training data dict of the corresponding index.
"""
queue = []
index_list = list(range(index-self.queue_length, index))
random.shuffle(index_list)
index_list = sorted(index_list[1:])
index_list.append(index)
for i in index_list:
i = max(0, i)
input_dict = self.get_data_info(i)
if input_dict is None:
return None
self.pre_pipeline(input_dict)
example = self.pipeline(input_dict)
if self.filter_empty_gt and \
(example is None or ~(example['gt_labels_3d']._data != -1).any()):
return None
queue.append(example)
return self.union2one(queue)
def union2one(self, queue):
imgs_list = [each['img'].data for each in queue]
metas_map = {}
prev_scene_token = None
prev_pos = None
prev_angle = None
for i, each in enumerate(queue):
metas_map[i] = each['img_metas'].data
if metas_map[i]['scene_token'] != prev_scene_token:
metas_map[i]['prev_bev_exists'] = False
prev_scene_token = metas_map[i]['scene_token']
prev_pos = copy.deepcopy(metas_map[i]['can_bus'][:3])
prev_angle = copy.deepcopy(metas_map[i]['can_bus'][-1])
metas_map[i]['can_bus'][:3] = 0
metas_map[i]['can_bus'][-1] = 0
else:
metas_map[i]['prev_bev_exists'] = True
tmp_pos = copy.deepcopy(metas_map[i]['can_bus'][:3])
tmp_angle = copy.deepcopy(metas_map[i]['can_bus'][-1])
metas_map[i]['can_bus'][:3] -= prev_pos
metas_map[i]['can_bus'][-1] -= prev_angle
prev_pos = copy.deepcopy(tmp_pos)
prev_angle = copy.deepcopy(tmp_angle)
queue[-1]['img'] = DC(torch.stack(imgs_list), cpu_only=False, stack=True)
queue[-1]['img_metas'] = DC(metas_map, cpu_only=True)
queue = queue[-1]
return queue
def get_data_info(self, index):
"""Get data info according to the given index.
Args:
index (int): Index of the sample data to get.
Returns:
dict: Data information that will be passed to the data \
preprocessing pipelines. It includes the following keys:
- sample_idx (str): Sample index.
- pts_filename (str): Filename of point clouds.
- sweeps (list[dict]): Infos of sweeps.
- timestamp (float): Sample timestamp.
- img_filename (str, optional): Image filename.
- lidar2img (list[np.ndarray], optional): Transformations \
from lidar to different cameras.
- ann_info (dict): Annotation info.
"""
info = self.data_infos[index]
# standard protocal modified from SECOND.Pytorch
input_dict = dict(
sample_idx=info['token'],
pts_filename=info['lidar_path'],
sweeps=info['sweeps'],
ego2global_translation=info['ego2global_translation'],
ego2global_rotation=info['ego2global_rotation'],
prev_idx=info['prev'],
next_idx=info['next'],
scene_token=info['scene_token'],
can_bus=info['can_bus'],
frame_idx=info['frame_idx'],
timestamp=info['timestamp'] / 1e6,
)
if self.modality['use_camera']:
image_paths = []
lidar2img_rts = []
lidar2cam_rts = []
cam_intrinsics = []
for cam_type, cam_info in info['cams'].items():
image_paths.append(cam_info['data_path'])
# obtain lidar to image transformation matrix
lidar2cam_r = np.linalg.inv(cam_info['sensor2lidar_rotation'])
lidar2cam_t = cam_info[
'sensor2lidar_translation'] @ lidar2cam_r.T
lidar2cam_rt = np.eye(4)
lidar2cam_rt[:3, :3] = lidar2cam_r.T
lidar2cam_rt[3, :3] = -lidar2cam_t
intrinsic = cam_info['cam_intrinsic']
viewpad = np.eye(4)
viewpad[:intrinsic.shape[0], :intrinsic.shape[1]] = intrinsic
lidar2img_rt = (viewpad @ lidar2cam_rt.T)
lidar2img_rts.append(lidar2img_rt)
cam_intrinsics.append(viewpad)
lidar2cam_rts.append(lidar2cam_rt.T)
input_dict.update(
dict(
img_filename=image_paths,
lidar2img=lidar2img_rts,
cam_intrinsic=cam_intrinsics,
lidar2cam=lidar2cam_rts,
))
if not self.test_mode:
annos = self.get_ann_info(index)
input_dict['ann_info'] = annos
rotation = Quaternion(input_dict['ego2global_rotation'])
translation = input_dict['ego2global_translation']
can_bus = input_dict['can_bus']
can_bus[:3] = translation
can_bus[3:7] = rotation
patch_angle = quaternion_yaw(rotation) / np.pi * 180
if patch_angle < 0:
patch_angle += 360
can_bus[-2] = patch_angle / 180 * np.pi
can_bus[-1] = patch_angle
return input_dict
def __getitem__(self, idx):
"""Get item from infos according to the given index.
Returns:
dict: Data dictionary of the corresponding index.
"""
if self.test_mode:
return self.prepare_test_data(idx)
while True:
data = self.prepare_train_data(idx)
if data is None:
idx = self._rand_another(idx)
continue
return data
def _evaluate_single(self,
result_path,
logger=None,
metric='bbox',
result_name='pts_bbox'):
"""Evaluation for a single model in nuScenes protocol.
Args:
result_path (str): Path of the result file.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
metric (str): Metric name used for evaluation. Default: 'bbox'.
result_name (str): Result name in the metric prefix.
Default: 'pts_bbox'.
Returns:
dict: Dictionary of evaluation details.
"""
from nuscenes import NuScenes
self.nusc = NuScenes(version=self.version, dataroot=self.data_root,
verbose=True)
output_dir = osp.join(*osp.split(result_path)[:-1])
eval_set_map = {
'v1.0-mini': 'mini_val',
'v1.0-trainval': 'val',
}
self.nusc_eval = NuScenesEval_custom(
self.nusc,
config=self.eval_detection_configs,
result_path=result_path,
eval_set=eval_set_map[self.version],
output_dir=output_dir,
verbose=True,
overlap_test=self.overlap_test,
data_infos=self.data_infos
)
self.nusc_eval.main(plot_examples=0, render_curves=False)
# record metrics
metrics = mmcv.load(osp.join(output_dir, 'metrics_summary.json'))
detail = dict()
metric_prefix = f'{result_name}_NuScenes'
for name in self.CLASSES:
for k, v in metrics['label_aps'][name].items():
val = float('{:.4f}'.format(v))
detail['{}/{}_AP_dist_{}'.format(metric_prefix, name, k)] = val
for k, v in metrics['label_tp_errors'][name].items():
val = float('{:.4f}'.format(v))
detail['{}/{}_{}'.format(metric_prefix, name, k)] = val
for k, v in metrics['tp_errors'].items():
val = float('{:.4f}'.format(v))
detail['{}/{}'.format(metric_prefix,
self.ErrNameMapping[k])] = val
detail['{}/NDS'.format(metric_prefix)] = metrics['nd_score']
detail['{}/mAP'.format(metric_prefix)] = metrics['mean_ap']
return | [
"1491387884@qq.com"
] | 1491387884@qq.com |
79e354db7538f2508b3968bf27ad832defa3fe6d | 96b032b348e179483bb66f3844f4e68d6e6e52b7 | /newGame.py | 8cd8fed6c0d5f43ba153dfbd76b7cf24a3cbbc30 | [] | no_license | MohamedBelhedi/HandtrackingModule-Python | 2cd77ca9893be2ce57ce42549be1986d9f2d436b | 1da0ff0b074812b0459af47fa659c09ca2e6550b | refs/heads/main | 2023-06-29T13:51:43.968534 | 2021-07-29T06:15:07 | 2021-07-29T06:15:07 | 390,841,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 485 | py |
import cv2
import time
import HandbewegungModul as had
pT = 0
cT = 0
Bild = cv2.VideoCapture(1)
detector=had.handDetector()
while True:
success, img = Bild.read()
img=detector.findHands(img)
lmList=detector.findPosition(img)
if len(lmList) !=0:
print(lmList[4])
cT = time.time()
fps = 1 / (cT - pT)
pT = cT
cv2.putText(img, str(int(fps)), (5, 70), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 255), 3)
cv2.imshow("Image", img)
cv2.waitKey(1) | [
"mbelhedi@outlook.com"
] | mbelhedi@outlook.com |
136f8b9de3645e4f3f6f19676e2d6cb1d3d5e2da | a1cde280b0e0403c99a96816876ea015a8322339 | /TimeTracker/TimeTracker/tests/test_dummy.py | e2ed5cd2194ba58b26531651b0b5e6d4631136f0 | [
"MIT"
] | permissive | TreasonableShorebirds/TimeTracker | a0e5c37bfc9b9cd749627a675f6ed8c46a1bf483 | 8f0593dbe976fab4d510a378919bef71cb56d1ee | refs/heads/master | 2020-04-16T20:34:04.453527 | 2019-02-12T20:46:18 | 2019-02-12T20:46:18 | 165,901,636 | 0 | 0 | MIT | 2019-02-23T00:44:35 | 2019-01-15T18:23:16 | Python | UTF-8 | Python | false | false | 573 | py | from django.test import TestCase
class MyTestClass(TestCase):
@classmethod
def setUpTestData(cls):
print("setUpTestData: Run once to set up non-modified data for all class methods.")
pass
def setUp(self):
print("setUp: Run once for every test method to setup clean data.")
pass
def test_false_is_false(self):
print("Method: test_false_is_false.")
self.assertFalse(False)
def test_one_plus_one_equals_two(self):
print("Method: test_one_plus_one_equals_two.")
self.assertEqual(1 + 1, 2)
| [
"gesparza3@mail.csuchico.edu"
] | gesparza3@mail.csuchico.edu |
19c481922231ef9bdcb388fbd4a0ea73cfecfa51 | ca701b0182230d950af2cd7ccc6671f442a5673b | /venv/bin/gunicorn | bdf16f7f2e5a959a707caa1d9b3d6fe2d9309151 | [] | no_license | BenBowersJr/SoftwareProject | ff53db943c12a6ba45399f4cb63631bf6cc58041 | e84979ff232de9497112d25e954479f51d362c10 | refs/heads/master | 2023-04-19T12:53:38.446228 | 2021-05-10T08:04:45 | 2021-05-10T08:04:45 | 357,283,252 | 0 | 1 | null | 2021-05-06T18:13:33 | 2021-04-12T17:33:00 | Python | UTF-8 | Python | false | false | 998 | #!/Users/ben/code/SoftwareProject/venv/bin/python3
# EASY-INSTALL-ENTRY-SCRIPT: 'gunicorn==20.1.0','console_scripts','gunicorn'
import re
import sys
# for compatibility with easy_install; see #2198
__requires__ = 'gunicorn==20.1.0'
try:
from importlib.metadata import distribution
except ImportError:
try:
from importlib_metadata import distribution
except ImportError:
from pkg_resources import load_entry_point
def importlib_load_entry_point(spec, group, name):
dist_name, _, _ = spec.partition('==')
matches = (
entry_point
for entry_point in distribution(dist_name).entry_points
if entry_point.group == group and entry_point.name == name
)
return next(matches).load()
globals().setdefault('load_entry_point', importlib_load_entry_point)
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(load_entry_point('gunicorn==20.1.0', 'console_scripts', 'gunicorn')())
| [
"benbowersjr.22@gmail.com"
] | benbowersjr.22@gmail.com | |
36b1844a1371f1dae6b2a418d04ff3d5d96fa528 | 841982ab8df8490ed9be26b88f0895b220f7f542 | /excE_S2.py | 5fffc0428c6f71cb93ae8999eec2aea8ba3e775b | [] | no_license | GCatarina/ED_BLBQ | ac1f3b69b27c5f79962243f7a6e4ef70b3badd00 | 2f04f8976eddbfc28323b39aedd322851c5e7687 | refs/heads/main | 2023-06-15T10:26:11.168503 | 2021-07-13T03:03:01 | 2021-07-13T03:03:01 | 385,212,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,788 | py | # packages #
import sys
import numpy as np
import time
from quspin.basis import spin_basis_1d
from quspin.operators import hamiltonian
###############################################################################
# general functions #
# Hamiltonian: BLBQ, 1D
def H_BLBQ_1D(N,J,beta,BC,basis):
#J
SpSm = [[J/2,i,(i+1)%N] for i in range(N-1+BC)]
SmSp = [[J/2,i,(i+1)%N] for i in range(N-1+BC)]
SzSz = [[J,i,(i+1)%N] for i in range(N-1+BC)]
#β
SzSzSzSz = [[beta*J,i,i,(i+1)%N,(i+1)%N] for i in range(N-1+BC)]
SzSpSzSm = [[beta*J/2,i,i,(i+1)%N,(i+1)%N] for i in range(N-1+BC)]
SzSmSzSp = [[beta*J/2,i,i,(i+1)%N,(i+1)%N] for i in range(N-1+BC)]
SpSzSmSz = [[beta*J/2,i,i,(i+1)%N,(i+1)%N] for i in range(N-1+BC)]
SmSzSpSz = [[beta*J/2,i,i,(i+1)%N,(i+1)%N] for i in range(N-1+BC)]
SpSpSmSm = [[beta*J/4,i,i,(i+1)%N,(i+1)%N] for i in range(N-1+BC)]
SpSmSmSp = [[beta*J/4,i,i,(i+1)%N,(i+1)%N] for i in range(N-1+BC)]
SmSpSpSm = [[beta*J/4,i,i,(i+1)%N,(i+1)%N] for i in range(N-1+BC)]
SmSmSpSp = [[beta*J/4,i,i,(i+1)%N,(i+1)%N] for i in range(N-1+BC)]
static = [
["+-",SpSm],
["-+",SmSp],
["zz",SzSz],
["zzzz", SzSzSzSz],
["z+z-", SzSpSzSm],
["z-z+", SzSmSzSp],
["+z-z", SpSzSmSz],
["-z+z", SmSzSpSz],
["++--", SpSpSmSm],
["+--+", SpSmSmSp],
["-++-", SmSpSpSm],
["--++", SmSmSpSp]
]
dynamic = []
no_checks = dict(check_pcon=False,check_symm=False,check_herm=False)
H = hamiltonian(static,dynamic,basis=basis,dtype=np.float64,**no_checks)
return H
# Sz(i) operator
def Szi_op(i,basis):
Sz = [[1,i]]
static = [
['z',Sz]
]
dynamic = []
no_checks = dict(check_pcon=False,check_symm=False,check_herm=False)
Szi = hamiltonian(static,dynamic,basis=basis,dtype=np.float64,**no_checks)
return Szi
# Sz operator
def Sz_op(Nsites,basis):
Sz = 0
for i in range(Nsites):
Sz += Szi_op(i,basis)
return Sz
# S^2 operator
def S2_op(Nsites,basis):
Sz = Sz_op(Nsites,basis)
#S+S-
SpSm = [[1,i,j] for i in range(Nsites) for j in range(Nsites)]
static = [
['+-',SpSm]
]
dynamic = []
no_checks = dict(check_pcon=False,check_symm=False,check_herm=False)
SpSm = hamiltonian(static,dynamic,basis=basis,dtype=np.float64,**no_checks)
S2 = SpSm + np.dot(Sz,Sz) - Sz
return S2
###############################################################################
# main #
start_time = time.time()
# read inputs
if len(sys.argv) != 9:
print('Error: run code as #python_<code.py>_<s>_<N>_<J(meV)>_<beta>_<BC>'
+ '_<Sz>_<DeltaE(meV)>_<nLanczos>\n')
sys.exit(0)
# physical parameters
## s
try:
s = float(sys.argv[1])
except:
print('Error: insert integer or half-integer s > 0\n')
sys.exit(0)
if (2*s)%1 != 0 or s <= 0:
print('Error: insert integer or half-integer s > 0\n')
sys.exit(0)
## N
try:
N = int(sys.argv[2])
except:
print('Error: insert integer N >= 2\n')
sys.exit(0)
if N%1 != 0 or N <= 1:
print('Error: insert integer N >= 2\n')
sys.exit(0)
## J
try:
J = float(sys.argv[3])
except:
print('Error: insert real J != 0\n')
sys.exit(0)
if J==0:
print('Error: insert real J != 0\n')
sys.exit(0)
## beta
try:
beta = float(sys.argv[4])
except:
print('Error: insert real beta\n')
sys.exit(0)
## BC
try:
BC = int(sys.argv[5])
except:
print('Error: insert BC=0 (1) for open (periodic) boundary conditions\n')
sys.exit(0)
if BC!=0 and BC!=1:
print('Error: insert BC=0 (1) for open (periodic) boundary conditions\n')
sys.exit(0)
# other parameters
## Sz
try:
Sz = float(sys.argv[6])
except:
print('Error: insert Sz in [-N*s, -N*s+1, ..., N*s]\n')
sys.exit(0)
if Sz%1 != (N*s)%1 or abs(Sz) > N*s:
print('Error: insert Sz in [-N*s, -N*s+1, ..., N*s]\n')
sys.exit(0)
## DeltaE
try:
DeltaE = float(sys.argv[7])
except:
print('Error: insert real DeltaE >= 0\n')
sys.exit(0)
if DeltaE < 0:
print('Error: insert real DeltaE >= 0\n')
sys.exit(0)
## nLanczos
try:
nLanczos = int(sys.argv[8])
except:
print('Error: insert integer nLanczos > 0\n')
sys.exit(0)
if nLanczos%1 != 0 or nLanczos <= 0:
print('Error: insert integer nLanczos > 0\n')
sys.exit(0)
# open writing file
fw = open("results_excE-S2/s" + str(s) + "_N" + str(N) + "_J" + str(J)
+ "meV_beta" + str(beta) + "_BC" + str(BC) + "_Sz" + str(Sz) + "_DeltaE"
+ str(DeltaE) + "meV_nLanczos" + str(nLanczos) + ".txt", "w")
# basis
if (2*s)%2 == 0:
basis = spin_basis_1d(N, m=Sz/N, S=str(int(s)), pauli=False)
else:
basis = spin_basis_1d(N, m=Sz/N, S=str(int(2*s)) + '/2', pauli=False)
t1 = time.time() - start_time
# Hamiltonian
H = H_BLBQ_1D(N,J,beta,BC,basis)
t2 = time.time() - start_time
# diagonalization
En,psin = H.eigsh(k=nLanczos, which='SA')
t3 = time.time() - start_time
# nmax
nmax = len(En)
for n in range(1,len(En)):
if En[n]-En[0] > DeltaE:
nmax = n
break
if nmax==len(En):
print("Warning: larger nLanczos is required\n")
# excitation energies
excEn = [En[n]-En[0] for n in range(1,nmax)]
# S^2 operator
S2op = S2_op(N,basis)
# <psin|S^2|psin>
S2n = [np.dot(psin[:,n].conj(),S2op.dot(psin[:,n])) for n in range(nmax)]
t4 = time.time() - start_time
# outputs
for n in range(nmax):
fw.write("#E" + str(n) + " = " + str(En[n]) + " meV\n")
fw.write("--------------------\n\n")
fw.write("#List of excitation energies (meV):\n")
fw.write(str(excEn))
fw.write("\n\n")
fw.write("#List of S^2:\n")
fw.write(str(S2n))
fw.write("\n\n")
fw.write("--------------------\n")
fw.write("#time to initialize and find basis = " + str(t1) + " s\n")
fw.write("#time to build Hamiltonian = " + str(t2-t1) + " s\n")
fw.write("#time to diagonalize = " + str(t3-t2) + " s\n")
fw.write("#time to compute excitation energies and S^2 = " + str(t4-t3)
+ " s\n")
fw.write("#total time = " + str(time.time() - start_time) + " s\n")
## close file
fw.close()
###############################################################################
| [
"noreply@github.com"
] | GCatarina.noreply@github.com |
be5ad7ebe947af5dbbf5b7430189e72f8f740454 | 420f43ca145ac4482a56b57d77414b6b1ec45e83 | /app.py | 57be4cdd66d00efca142e15d2149188441bda55b | [] | no_license | msindrasena/surfs_up | fe918147b472810ca7018cf8345a2783142358ff | d1763018e0005c1efd9a515d0d4c9e75a9831429 | refs/heads/master | 2022-11-25T18:24:52.701925 | 2020-08-01T19:53:59 | 2020-08-01T19:53:59 | 283,278,775 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,379 | py | # import dependencies
import datetime as dt
import numpy as np
import pandas as pd
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
# Set up the Database
engine = create_engine("sqlite:///hawaii.sqlite")
# Automap base function
Base = automap_base()
# Reflect tables
Base.prepare(engine, reflect=True)
# Save references to table
Measurement = Base.classes.measurement
Station = Base.classes.station
# Create session link
session = Session(engine)
# Flask App
app = Flask(__name__)
# Define the welcome route
@app.route("/")
def welcome():
return(
'''
Welcome to the Climate Analysis API!
Available Routes:
/api/v1.0/precipitation
/api/v1.0/stations
/api/v1.0/tobs
/api/v1.0/temp/start/end
''')
# Return precipitation data for last year
@app.route("/api/v1.0/precipitation")
def precipitation():
prev_year = dt.date(2017, 8, 23) - dt.timedelta(days=365)
precipitation = session.query(Measurement.date, Measurement.prcp).\
filter(Measurement.date >= prev_year).all()
precip = {date: prcp for date, prcp in precipitation}
return jsonify(precip)
# Define route and route name
@app.route("/api/v1.0/stations")
def stations():
results = session.query(Station.station).all()
stations = list(np.ravel(results))
return jsonify(stations=stations)
@app.route("/api/v1.0/tobs")
def temp_monthly():
prev_year = dt.date(2017, 8, 23) - dt.timedelta(days=365)
results = session.query(Measurement.tobs).\
filter(Measurement.station == 'USC00519281').\
filter(Measurement.date >= prev_year).all()
temps = list(np.ravel(results))
return jsonify(temps=temps)
# Summary Statistics
@app.route("/api/v1.0/temp/<start>")
@app.route("/api/v1.0/temp/<start>/<end>")
def stats(start=None, end=None):
sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]
if not end:
results = session.query(*sel).\
filter(Measurement.date <= start).all()
temps = list(np.ravel(results))
return jsonify(temps)
results = session.query(*sel).\
filter(Measurement.date >= start).\
filter(Measurement.date <= end).all()
temps = list(np.ravel(results))
return jsonify(temps=temps)
| [
"65820576+msindrasena@users.noreply.github.com"
] | 65820576+msindrasena@users.noreply.github.com |
b3390cd0288fa39892f5fae94162d04d20576a13 | ee96ec6e09b0cc1af28ec7b77808eb4fa6611ca8 | /components/collector/src/source_collectors/jira/base.py | 294eed28eddef494b0449c7538f2de4fc936e8c0 | [
"Apache-2.0"
] | permissive | Erik-Stel/quality-time | eb1b8db2022a91f06fc0edfc966dbec7a972b88c | 602b6970e5d9088cb89cc6d488337349e54e1c9a | refs/heads/master | 2023-03-28T13:22:11.043108 | 2021-03-18T14:27:18 | 2021-03-18T14:27:18 | 269,277,099 | 0 | 0 | Apache-2.0 | 2021-03-18T14:20:21 | 2020-06-04T06:20:28 | Python | UTF-8 | Python | false | false | 1,593 | py | """Base classes for Jira collectors."""
from typing import Optional, cast
from collector_utilities.type import URL, Value
from source_model import Entities, Entity
from .issues import JiraIssues
class JiraFieldSumBase(JiraIssues):
"""Base class for collectors that sum a custom Jira field."""
field_parameter = "subclass responsibility"
entity_key = "subclass responsibility"
@classmethod
def _compute_value(cls, entities: Entities) -> Value:
"""Override to sum the field, as specified by the entity key, from the entities."""
return str(round(sum(float(entity[cls.entity_key]) for entity in entities)))
def _create_entity(self, issue: dict, url: URL) -> Entity:
"""Extend to also add the summed field to the entity."""
entity = super()._create_entity(issue, url)
entity[self.entity_key] = str(cast(float, self.__value_of_field_to_sum(issue)))
return entity
def _include_issue(self, issue: dict) -> bool:
"""Override to only include issues that have a sum."""
return self.__value_of_field_to_sum(issue) is not None
def _fields(self) -> str:
"""Extend to also get the field this collector needs to sum."""
return super()._fields() + "," + cast(str, self._parameter(self.field_parameter))
def __value_of_field_to_sum(self, issue: dict) -> Optional[float]:
"""Return the value of the issue field that this collector is to sum."""
value = issue["fields"].get(self._parameter(self.field_parameter))
return value if value is None else float(value)
| [
"noreply@github.com"
] | Erik-Stel.noreply@github.com |
bd8c41077e1be201fa4c44164e0024999fdd435c | 94b9f12b25818aa616e0a57923a633b3c237f78f | /krazapp/views.py | 6fa2679a2a69a97ed936fd2d032af565a56497a0 | [] | no_license | lrodriguezjb/django-models | 933409797fd17a78da513f2d8041a12d5f3a1083 | ed47082d803f428b0c47d008295a8d3e6482022c | refs/heads/master | 2021-09-23T07:47:28.339148 | 2020-01-16T09:53:47 | 2020-01-16T09:53:47 | 234,256,719 | 0 | 0 | null | 2021-09-22T18:34:42 | 2020-01-16T07:03:53 | Python | UTF-8 | Python | false | false | 743 | py | from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView
from django.views.generic import TemplateView
from django.urls import reverse_lazy
from .models import Post
class HomePage(ListView):
template_name = 'home.html'
model = Post
class PostDetailView(DetailView):
template_name = 'posts_detail.html'
model = Post
class BlogCreateView(CreateView):
model= Post
template_name = 'post_new.html'
fields = ['title','author', 'body']
class BlogUpdateView(UpdateView):
model= Post
template_name = 'post_edit.html'
fields = ['title', 'body']
class BlogDeleteView(DeleteView):
template_name = 'delete_post.html'
model = Post
success_url = reverse_lazy('home') | [
"lrodriguezjb@gmail.com"
] | lrodriguezjb@gmail.com |
8f5c92870e65b7077e08adbdadf003de67456181 | 83575716464710c8cd62a9a1e44f5c415afe6fe4 | /projeto/server_remoto/migrations/0001_initial.py | e4212f050b5507ebfb68dc5b0f8d7a5f1e50ded0 | [] | no_license | lsantos0142/serverremoto | aabd3e2ba5560014431da3590d4d19107d0f2c7e | 4b4f15afb8a7c2ae7ddbdaeac6d65e69d1017efa | refs/heads/main | 2023-06-29T09:04:03.790510 | 2021-07-28T16:41:23 | 2021-07-28T16:41:23 | 384,839,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,687 | py | # Generated by Django 3.2.5 on 2021-07-16 21:13
from django.conf import settings
import django.contrib.postgres.fields
from django.db import migrations, models
import django.db.models.deletion
import server_remoto.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='AtualizaServer',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('data_atualizacao', models.DateTimeField(verbose_name='Última Atualização')),
],
),
migrations.CreateModel(
name='Imunobiologico',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('imunobiologico', models.CharField(max_length=30)),
('doses', models.IntegerField()),
('dias_prox_dose', models.SmallIntegerField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Paciente',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('CPF', server_remoto.models.EmptyStringToNoneField(blank=True, default=None, max_length=11, null=True, unique=True)),
('CNS', server_remoto.models.EmptyStringToNoneField(blank=True, default=None, max_length=15, null=True, unique=True)),
('nome', models.CharField(max_length=100)),
('nomeMae', models.CharField(max_length=100)),
('nomeSocial', models.CharField(blank=True, max_length=100)),
('dataNascimento', models.DateField()),
('sexo', models.CharField(choices=[('FEMININO', 'FEMININO'), ('MASCULINO', 'MASCULINO'), ('IGNORADO', 'IGNORADO')], max_length=10)),
('raca', models.CharField(choices=[('AMARELA', 'AMARELA'), ('BRANCA', 'BRANCA'), ('INDIGENA', 'INDIGENA'), ('NAO INFORMADA', 'NAO INFORMADA'), ('PARDA', 'PARDA'), ('PRETA', 'PRETA')], max_length=20)),
('telefone', models.IntegerField()),
('gestante', models.BooleanField()),
('puerpera', models.BooleanField()),
('pais', models.CharField(max_length=100)),
('UF', models.CharField(choices=[('', ''), ('AC', 'AC'), ('AL', 'AL'), ('AM', 'AM'), ('AP', 'AP'), ('BA', 'BA'), ('CE', 'CE'), ('DF', 'DF'), ('ES', 'ES'), ('GO', 'GO'), ('MA', 'MA'), ('MT', 'MT'), ('MS', 'MS'), ('MG', 'MG'), ('PA', 'PA'), ('PB', 'PB'), ('PR', 'PR'), ('PE', 'PE'), ('PI', 'PI'), ('RJ', 'RJ'), ('RN', 'RN'), ('RS', 'RS'), ('RO', 'RO'), ('RR', 'RR'), ('SC', 'SC'), ('SP', 'SP'), ('SE', 'SE'), ('TO', 'TO')], max_length=2)),
('municipio', models.CharField(max_length=100)),
('zona', models.CharField(choices=[('RURAL', 'RURAL'), ('URBANA', 'URBANA')], max_length=6)),
('logradouro', models.CharField(max_length=100)),
('numero', models.IntegerField()),
('bairro', models.CharField(max_length=100)),
('complemento', models.CharField(blank=True, max_length=10)),
('email', models.CharField(blank=True, max_length=100)),
],
),
migrations.CreateModel(
name='Perdas',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('estabelecimento', models.CharField(max_length=100)),
('data', models.DateField()),
('lote', models.CharField(max_length=100)),
('falha_equip', models.IntegerField()),
('falha_trans', models.IntegerField()),
('falta_energ', models.IntegerField()),
('frasc_trans', models.IntegerField()),
('imunobiologico', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='server_remoto.imunobiologico', verbose_name='Imunobiológico')),
],
),
migrations.CreateModel(
name='Lote',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lote', models.CharField(max_length=100)),
('validade', models.DateField(null=True, verbose_name='Data de Validade do Lote')),
('imunobiologico', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='server_remoto.imunobiologico', verbose_name='Imunobiológico')),
],
),
migrations.CreateModel(
name='Imunizacao',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comorbidades', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(blank=True, default=None, max_length=150, verbose_name='Comorbidades'), blank=True, size=None)),
('CRM_medico_resp', models.IntegerField(blank=True, default=None, null=True, verbose_name='CRM médico responsável')),
('num_BPC', models.IntegerField(blank=True, default=None, null=True, verbose_name='Número do BPC')),
('dose', models.CharField(choices=[('UNICA', 'UNICA'), ('1º DOSE', '1º DOSE'), ('2º DOSE', '2º DOSE')], max_length=16, verbose_name='Dose')),
('via_admn', models.CharField(choices=[('EV', 'ENDOVENOSA'), ('ID', 'INTRADERMICA'), ('IM', 'INTRAMUSCULAR'), ('O', 'ORAL'), ('SC', 'SUBCUTANEA')], max_length=20, verbose_name='Via de Administração')),
('local_admn', models.CharField(choices=[('DD', 'DELTOIDE DIREITO'), ('DE', 'DELTOIDE ESQUERDO'), ('G', 'GLUTEO'), ('FL', 'LOCAL DO FERIMENTO'), ('VLD', 'VASTO LATERAL DA COXA DIREITO'), ('VLE', 'VASTO LATERAL DA COXA ESQUERDA'), ('VGD', 'VENTROGLUTEO DIREITO'), ('VGE', 'VENTROGLUTEO ESQUERDO')], max_length=20, verbose_name='Local de Administração')),
('grupo', models.CharField(choices=[('AEROVIARIOS', 'AEROVIARIOS'), ('COMORBIDADE', 'COMORBIDADE'), ('ESTUDO CLINICO', 'ESTUDO CLINICO'), ('IDOSO', 'IDOSO'), ('IDOSO EM ILPI', 'IDOSO EM ILPI'), ('INDIGENAS', 'INDIGENAS'), ('METROVIARIOS/CPTM', 'METROVIARIOS/CPTM'), ('MOTORISTAS E COBRADORES DE ONIBUS', 'MOTORISTAS E COBRADORES DE ONIBUS'), ('PESSOA >= 18 ANOS PORTADORA DE DEFICIENCIA RESIDENTES EM RI', 'PESSOA >= 18 ANOS PORTADORA DE DEFICIENCIA RESIDENTES EM RI'), ('PESSOA COM DEFICIENCIA', 'PESSOA COM DEFICIENCIA'), ('PESSOA COM DEFICIENCIA PERMANENTE SEVERA', 'PESSOA COM DEFICIENCIA PERMANENTE SEVERA'), ('POPULACAO EM GERAL', 'POPULACAO EM GERAL'), ('POPULACAO EM SITUACAO DE RUA', 'POPULACAO EM SITUACAO DE RUA'), ('PORTUARIOS', 'PORTUARIOS'), ('QUILOMBOLA', 'QUILOMBOLA'), ('RIBEIRINHAS', 'RIBEIRINHAS'), ('TRABALHADOR DA EDUCACAO', 'TRABALHADOR DA EDUCACAO'), ('TRABALHADOR DA SEGURANCA PUBLICA', 'TRABALHADOR DA SEGURANCA PUBLICA'), ('TRABALHADOR DE SAUDE', 'TRABALHADOR DE SAUDE')], max_length=100, verbose_name='Grupo de Atendimento')),
('estrategia', models.CharField(choices=[('CAMPANHA INDISCRIMINADA', 'CAMPANHA INDISCRIMINADA')], max_length=100, verbose_name='Estratégia')),
('data_aplic', models.DateField(null=True, verbose_name='Data de Aplicação')),
('data_apraz', models.DateField(blank=True, null=True, verbose_name='Data de Aprazamento')),
('estado_1_dose', models.CharField(blank=True, max_length=100, null=True, verbose_name='Estado Primeira Dose')),
('pais_1_dose', models.CharField(blank=True, max_length=100, null=True, verbose_name='País Primeira Dose')),
('imunobiologico', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='server_remoto.imunobiologico', verbose_name='Imunobiológico')),
('lote', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='server_remoto.lote', verbose_name='Lote')),
('paciente', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='server_remoto.paciente', verbose_name='Paciente')),
('vacinador', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Vacinador')),
],
),
]
| [
"lucasnego0142@gmail.com"
] | lucasnego0142@gmail.com |
9ce382f89143b81d007c0337d557504f92fd6ab6 | a3972cb6ba32abd18b374975f4abd5318bc95f09 | /project/venv/bin/coverage-3.7 | 74c0899d928eae747e8ccb188a682b2769077318 | [] | no_license | ssr03/MiniDelivery | c57bb45e497cab34787473925663ace46dbb6b2d | 659d9757d1f369a6713aa5a66bab2aa5d6381b8e | refs/heads/master | 2020-07-30T15:05:01.401229 | 2019-09-23T11:52:51 | 2019-09-23T11:52:51 | 210,267,973 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | 7 | #!/Users/b201903146/Desktop/rookiehero/project/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from coverage.cmdline import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"43363127+ssr03@users.noreply.github.com"
] | 43363127+ssr03@users.noreply.github.com |
1dc78851f58a021acca413f2f3ac7d3fae1d14c7 | 521504174d832b66a70692e51c80ed1c65004c2d | /nvpr_examples/skia/third_party/externals/gyp/pylib/gyp/generator/msvs_test.py | ca89a074dcdebb5065b0b65cda3956f16220e40c | [
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause"
] | permissive | csevenr/NVprSDK | 5d3909b74d890fd0c43ddcdb82e1472fade44598 | 509b75299ccc5a974ce3a98e60b47ea520b0126b | refs/heads/master | 2020-09-08T14:20:35.134543 | 2016-07-07T17:57:33 | 2016-07-07T17:57:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 965 | py | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Unit tests for the msvs.py file. """
import gyp.generator.msvs as msvs
import unittest
import StringIO
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.stderr = StringIO.StringIO()
def test_GetLibraries(self):
self.assertEqual(
msvs._GetLibraries({}),
[])
self.assertEqual(
msvs._GetLibraries({'libraries': []}),
[])
self.assertEqual(
msvs._GetLibraries({'other':'foo', 'libraries': ['a.lib']}),
['a.lib'])
self.assertEqual(
msvs._GetLibraries({'libraries': ['a.lib', 'b.lib', 'c.lib', '-lb.lib',
'-lb.lib', 'd.lib', 'a.lib']}),
['c.lib', 'b.lib', 'd.lib', 'a.lib'])
if __name__ == '__main__':
unittest.main()
| [
"markkilgard@gmail.com"
] | markkilgard@gmail.com |
ac09c44aaf6a06b59c0016054c4618d7088474af | 9e9253efdeb074b3b2212266d0d4c5945c4995fc | /src/obsolete/addons/core-generator/chisel/tools/generate-macros-test | 19d01f7bdd635a21133c1d751459cc0a6f6c6401 | [
"BSD-3-Clause"
] | permissive | pkerichang/hammer | bfa47964c68c8d06074e67f04367903906b46478 | 64b48bd407e30ac7040c2dcc0a3a514f7bf2824e | refs/heads/master | 2020-03-29T01:51:08.462917 | 2018-09-16T18:57:30 | 2018-09-16T19:00:24 | 149,409,875 | 0 | 0 | BSD-3-Clause | 2018-09-19T07:26:54 | 2018-09-19T07:26:54 | null | UTF-8 | Python | false | false | 4,139 | #!/usr/bin/env python3
# Copyright 2017 Edward Wang <edward.c.wang@compdigitec.com>
# -*- coding: utf-8 -*-
#
# generate-macros-test
import json
import os
import unittest
from importlib.machinery import SourceFileLoader
g = SourceFileLoader("generate-macros", os.path.dirname(os.path.realpath(__file__)) + "/generate-macros").load_module()
class TestGenerateMacros(unittest.TestCase):
def test_rw(self):
gen_mem = g.parseLine('name test_1234 depth 4096 width 64 ports rw')
self.assertEqual(gen_mem, g.Mem(name='test_1234', depth=4096, width=64, ports=['rw'], mask_gran=None))
gen_json = g.memToJSON(gen_mem)
correct_json = json.loads(r"""
{
"type": "sram",
"name": "test_1234",
"depth": 4096,
"width": 64,
"ports": [
{
"clock port name": "RW0_clk",
"output port name": "RW0_rdata",
"input port name": "RW0_wdata",
"address port name": "RW0_addr",
"chip enable port name": "RW0_en",
"write enable port name": "RW0_wmode"
}
]
}
""")
self.assertEqual(gen_json, correct_json)
def test_mrw(self):
gen_mem = g.parseLine('name meow888 depth 4096 width 32 ports mrw mask_gran 8')
self.assertEqual(gen_mem, g.Mem(name='meow888', depth=4096, width=32, ports=['mrw'], mask_gran=8))
gen_json = g.memToJSON(gen_mem)
correct_json = json.loads(r"""
{
"type": "sram",
"name": "meow888",
"depth": 4096,
"width": 32,
"ports": [
{
"clock port name": "RW0_clk",
"mask granularity": 8,
"output port name": "RW0_rdata",
"input port name": "RW0_wdata",
"address port name": "RW0_addr",
"mask port name": "RW0_wmask",
"chip enable port name": "RW0_en",
"write enable port name": "RW0_wmode"
}
]
}
""")
self.assertEqual(gen_json, correct_json)
def test_write(self):
gen_mem = g.parseLine('name write_test_234 depth 1024 width 32 ports write')
self.assertEqual(gen_mem, g.Mem(name='write_test_234', depth=1024, width=32, ports=['write'], mask_gran=None))
gen_json = g.memToJSON(gen_mem)
correct_json = json.loads(r"""
{
"type": "sram",
"name": "write_test_234",
"depth": 1024,
"width": 32,
"ports": [
{
"clock port name": "W0_clk",
"input port name": "W0_data",
"address port name": "W0_addr",
"chip enable port name": "W0_en"
}
]
}
""")
self.assertEqual(gen_json, correct_json)
def test_mwrite(self):
gen_mem = g.parseLine('name mwrite_test_234 depth 1024 width 32 ports mwrite mask_gran 8')
self.assertEqual(gen_mem, g.Mem(name='mwrite_test_234', depth=1024, width=32, ports=['mwrite'], mask_gran=8))
gen_json = g.memToJSON(gen_mem)
correct_json = json.loads(r"""
{
"type": "sram",
"name": "mwrite_test_234",
"depth": 1024,
"width": 32,
"ports": [
{
"clock port name": "W0_clk",
"mask granularity": 8,
"input port name": "W0_data",
"address port name": "W0_addr",
"chip enable port name": "W0_en",
"mask port name": "W0_mask"
}
]
}
""")
self.assertEqual(gen_json, correct_json)
def test_read(self):
gen_mem = g.parseLine('name read_test_234 depth 8192 width 128 ports read')
self.assertEqual(gen_mem, g.Mem(name='read_test_234', depth=8192, width=128, ports=['read'], mask_gran=None))
gen_json = g.memToJSON(gen_mem)
correct_json = json.loads(r"""
{
"type": "sram",
"name": "read_test_234",
"depth": 8192,
"width": 128,
"ports": [
{
"clock port name": "R0_clk",
"output port name": "R0_data",
"address port name": "R0_addr",
"chip enable port name": "R0_en"
}
]
}
""")
self.assertEqual(gen_json, correct_json)
if __name__ == '__main__':
unittest.main()
| [
"edwardcwang@users.noreply.github.com"
] | edwardcwang@users.noreply.github.com | |
2fe810690129980a17d5dedc6792c3d76b5a39d5 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startCirq617.py | d86abac76ba4a91bd6d165619a430aabf728eccf | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,098 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=14
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[2])) # number=10
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.Y.on(input_qubit[3])) # number=5
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=6
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=7
c.append(cirq.H.on(input_qubit[1])) # number=11
c.append(cirq.SWAP.on(input_qubit[2],input_qubit[0])) # number=8
c.append(cirq.SWAP.on(input_qubit[2],input_qubit[0])) # number=9
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=12
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=13
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq617.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
a567db236a6266467b6aa31dcae46bf9eff08118 | bf13529b3352809c160aa445e0ce7c7da95259d9 | /rest_framework/filters.py | c3b846aed56583aa12123f8566984e38f832662c | [
"BSD-2-Clause"
] | permissive | gminds/rapidnewsng | 673c404e27fb3e6ae007f9ca2a66a7ab557cc006 | 7528f751f657f29f2da23a1dd160479947f87977 | refs/heads/master | 2021-01-19T07:53:04.746441 | 2015-07-18T07:30:54 | 2015-07-18T07:30:54 | 39,289,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,756 | py | """
Provides generic filtering backends that can be used to filter the results
returned by list views.
"""
from __future__ import unicode_literals
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from rest_framework.compat import django_filters, six, guardian, get_model_name
from rest_framework.settings import api_settings
from functools import reduce
import operator
FilterSet = django_filters and django_filters.FilterSet or None
class BaseFilterBackend(object):
"""
A base class from which all filter backend classes should inherit.
"""
def filter_queryset(self, request, queryset, view):
"""
Return a filtered queryset.
"""
raise NotImplementedError(".filter_queryset() must be overridden.")
class DjangoFilterBackend(BaseFilterBackend):
"""
A filter backend that uses django-filter.
"""
default_filter_set = FilterSet
def __init__(self):
assert django_filters, 'Using DjangoFilterBackend, but django-filter is not installed'
def get_filter_class(self, view, queryset=None):
"""
Return the django-filters `FilterSet` used to filter the queryset.
"""
filter_class = getattr(view, 'filter_class', None)
filter_fields = getattr(view, 'filter_fields', None)
if filter_class:
filter_model = filter_class.Meta.model
assert issubclass(filter_model, queryset.model), \
'FilterSet model %s does not match queryset model %s' % \
(filter_model, queryset.model)
return filter_class
if filter_fields:
class AutoFilterSet(self.default_filter_set):
class Meta:
model = queryset.model
fields = filter_fields
order_by = True
return AutoFilterSet
return None
def filter_queryset(self, request, queryset, view):
filter_class = self.get_filter_class(view, queryset)
if filter_class:
return filter_class(request.QUERY_PARAMS, queryset=queryset).qs
return queryset
class SearchFilter(BaseFilterBackend):
# The URL query parameter used for the search.
search_param = api_settings.SEARCH_PARAM
def get_search_terms(self, request):
"""
Search terms are set by a ?search=... query parameter,
and may be comma and/or whitespace delimited.
"""
params = request.QUERY_PARAMS.get(self.search_param, '')
return params.replace(',', ' ').split()
def construct_search(self, field_name):
if field_name.startswith('^'):
return "%s__istartswith" % field_name[1:]
elif field_name.startswith('='):
return "%s__iexact" % field_name[1:]
elif field_name.startswith('@'):
return "%s__search" % field_name[1:]
else:
return "%s__icontains" % field_name
def filter_queryset(self, request, queryset, view):
search_fields = getattr(view, 'search_fields', None)
if not search_fields:
return queryset
orm_lookups = [self.construct_search(str(search_field))
for search_field in search_fields]
for search_term in self.get_search_terms(request):
or_queries = [models.Q(**{orm_lookup: search_term})
for orm_lookup in orm_lookups]
queryset = queryset.filter(reduce(operator.or_, or_queries))
return queryset
class OrderingFilter(BaseFilterBackend):
# The URL query parameter used for the ordering.
ordering_param = api_settings.ORDERING_PARAM
ordering_fields = None
def get_ordering(self, request):
"""
Ordering is set by a comma delimited ?ordering=... query parameter.
The `ordering` query parameter can be overridden by setting
the `ordering_param` value on the OrderingFilter or by
specifying an `ORDERING_PARAM` value in the API settings.
"""
params = request.QUERY_PARAMS.get(self.ordering_param)
if params:
return [param.strip() for param in params.split(',')]
def get_default_ordering(self, view):
ordering = getattr(view, 'ordering', None)
if isinstance(ordering, six.string_types):
return (ordering,)
return ordering
def remove_invalid_fields(self, queryset, ordering, view):
valid_fields = getattr(view, 'ordering_fields', self.ordering_fields)
if valid_fields is None:
# Default to allowing filtering on serializer fields
serializer_class = getattr(view, 'serializer_class')
if serializer_class is None:
msg = ("Cannot use %s on a view which does not have either a "
"'serializer_class' or 'ordering_fields' attribute.")
raise ImproperlyConfigured(msg % self.__class__.__name__)
valid_fields = [
field.source or field_name
for field_name, field in serializer_class().fields.items()
if not getattr(field, 'write_only', False)
]
elif valid_fields == '__all__':
# View explictly allows filtering on any model field
valid_fields = [field.name for field in queryset.model._meta.fields]
valid_fields += queryset.query.aggregates.keys()
return [term for term in ordering if term.lstrip('-') in valid_fields]
def filter_queryset(self, request, queryset, view):
ordering = self.get_ordering(request)
if ordering:
# Skip any incorrect parameters
ordering = self.remove_invalid_fields(queryset, ordering, view)
if not ordering:
# Use 'ordering' attribute by default
ordering = self.get_default_ordering(view)
if ordering:
return queryset.order_by(*ordering)
return queryset
class DjangoObjectPermissionsFilter(BaseFilterBackend):
"""
A filter backend that limits results to those where the requesting user
has read object level permissions.
"""
def __init__(self):
assert guardian, 'Using DjangoObjectPermissionsFilter, but django-guardian is not installed'
perm_format = '%(app_label)s.view_%(model_name)s'
def filter_queryset(self, request, queryset, view):
user = request.user
model_cls = queryset.model
kwargs = {
'app_label': model_cls._meta.app_label,
'model_name': get_model_name(model_cls)
}
permission = self.perm_format % kwargs
return guardian.shortcuts.get_objects_for_user(user, permission, queryset)
| [
"eddy2cold@yahoo.com"
] | eddy2cold@yahoo.com |
75b7e08950403f8f3d331abc86304d64c6ed25e2 | 314d0b2f0c4687a61e9ee4ecbd5a6823903a2678 | /qa/rpc-tests/maxblocksinflight.py | 5620e927145149b0eb678d51843f9fa6d609c804 | [
"MIT"
] | permissive | pelermu/zaap | e956f6ff2f89e02d86054f70ba32e9b3ad871b6b | 58363ba5c14fc04e4439aa7cc9a18d7870270e43 | refs/heads/master | 2020-03-27T06:36:59.900631 | 2018-08-25T18:38:38 | 2018-08-25T18:38:38 | 146,120,318 | 0 | 0 | MIT | 2018-08-25T18:35:56 | 2018-08-25T18:35:56 | null | UTF-8 | Python | false | false | 3,729 | py | #!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import logging
'''
In this test we connect to one node over p2p, send it numerous inv's, and
compare the resulting number of getdata requests to a max allowed value. We
test for exceeding 128 blocks in flight, which was the limit an 0.9 client will
reach. [0.10 clients shouldn't request more than 16 from a single peer.]
'''
MAX_REQUESTS = 128
class TestManager(NodeConnCB):
# set up NodeConnCB callbacks, overriding base class
def on_getdata(self, conn, message):
self.log.debug("got getdata %s" % repr(message))
# Log the requests
for inv in message.inv:
if inv.hash not in self.blockReqCounts:
self.blockReqCounts[inv.hash] = 0
self.blockReqCounts[inv.hash] += 1
def on_close(self, conn):
if not self.disconnectOkay:
raise EarlyDisconnectError(0)
def __init__(self):
NodeConnCB.__init__(self)
self.log = logging.getLogger("BlockRelayTest")
def add_new_connection(self, connection):
self.connection = connection
self.blockReqCounts = {}
self.disconnectOkay = False
def run(self):
self.connection.rpc.generate(1) # Leave IBD
numBlocksToGenerate = [8, 16, 128, 1024]
for count in range(len(numBlocksToGenerate)):
current_invs = []
for i in range(numBlocksToGenerate[count]):
current_invs.append(CInv(2, random.randrange(0, 1 << 256)))
if len(current_invs) >= 50000:
self.connection.send_message(msg_inv(current_invs))
current_invs = []
if len(current_invs) > 0:
self.connection.send_message(msg_inv(current_invs))
# Wait and see how many blocks were requested
time.sleep(2)
total_requests = 0
with mininode_lock:
for key in self.blockReqCounts:
total_requests += self.blockReqCounts[key]
if self.blockReqCounts[key] > 1:
raise AssertionError("Error, test failed: block %064x requested more than once" % key)
if total_requests > MAX_REQUESTS:
raise AssertionError("Error, too many blocks (%d) requested" % total_requests)
print "Round %d: success (total requests: %d)" % (count, total_requests)
self.disconnectOkay = True
self.connection.disconnect_node()
class MaxBlocksInFlightTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("zaapD", "zaapd"),
help="Binary to test max block requests behavior")
def setup_chain(self):
print "Initializing test directory "+self.options.tmpdir
initialize_chain_clean(self.options.tmpdir, 1)
def setup_network(self):
self.nodes = start_nodes(1, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']],
binary=[self.options.testbinary])
def run_test(self):
test = TestManager()
test.add_new_connection(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test))
NetworkThread().start() # Start up network handling in another thread
test.run()
if __name__ == '__main__':
MaxBlocksInFlightTest().main()
| [
"37992703+zaapnetwork@users.noreply.github.com"
] | 37992703+zaapnetwork@users.noreply.github.com |
945da60ea278654f3bc727c1bc13e89b5b69a475 | f65afea25b89cbea0c9d98c69f1f1b67d225ebe5 | /code/user_newt/kdd_20200612/baseline/baseline_0.332.py | bfb29429837c4dae0a83211bb93ccf7027f6c834 | [] | no_license | ness001/KDD2020-Debiasing-Team666 | 7685d03d65050421de22001d00db91e977195ac8 | 538ebb356691887b2a45d0b0356344d7c1ea27db | refs/heads/master | 2022-11-08T02:23:34.253899 | 2020-06-19T08:40:27 | 2020-06-19T08:40:27 | 266,018,990 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,295 | py | from collections import defaultdict
from tqdm import tqdm
import pandas as pd
import math
import seaborn as sns
import matplotlib.pyplot as plt
def get_sim_item(df_, user_col, item_col, use_iif=False):
df = df_.copy()
user_item_ = df.groupby(user_col)[item_col].agg(list).reset_index()
user_item_dict = dict(zip(user_item_[user_col], user_item_[item_col]))
user_time_ = df.groupby(user_col)['time'].agg(list).reset_index() # 引入时间因素
user_time_dict = dict(zip(user_time_[user_col], user_time_['time']))
sim_item = {}
item_cnt = defaultdict(int) # 商品被点击次数
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
sim_item.setdefault(item, {})
for loc2, relate_item in enumerate(items):
if item == relate_item:
continue
t1 = user_time_dict[user][loc1] # 点击时间提取
t2 = user_time_dict[user][loc2]
sim_item[item].setdefault(relate_item, 0)
if not use_iif:
if loc1 - loc2 > 0:
sim_item[item][relate_item] += 1 * 0.7 * (0.8**(loc1-loc2-1)) *(
1 - (t1 - t2) * 10000) / math.log(1 + len(items)) # 逆向
else:
sim_item[item][relate_item] += 1 * 1.0 * (0.8**(loc2-loc1-1)) * (
1 - (t2 - t1) * 10000) / math.log(1 + len(items)) # 正向
else:
sim_item[item][relate_item] += 1 / math.log(1 + len(items))
sim_item_corr = sim_item.copy() # 引入AB的各种被点击次数
for i, related_items in sim_item.items():
for j, cij in related_items.items():
sim_item_corr[i][j] = cij / ((item_cnt[i] * item_cnt[j]) ** 0.2)
return sim_item_corr, user_item_dict
def recommend(sim_item_corr, user_item_dict, user_id, top_k, item_num):
'''
input:item_sim_list, user_item, uid, 500, 50
# 用户历史序列中的所有商品均有关联商品,整合这些关联商品,进行相似性排序
'''
rank = {}
interacted_items = user_item_dict[user_id]
interacted_items = interacted_items[::-1]
for loc, i in enumerate(interacted_items):
for j, wij in sorted(sim_item_corr[i].items(), reverse=True)[0:top_k]:
if j not in interacted_items:
rank.setdefault(j, 0)
rank[j] += wij * (0.7 ** loc)
return sorted(rank.items(), key=lambda d: d[1], reverse=True)[:item_num]
# fill user to 50 items
def get_predict(df, pred_col, top_fill):
top_fill = [int(t) for t in top_fill.split(',')]
scores = [-1 * i for i in range(1, len(top_fill) + 1)]
ids = list(df['user_id'].unique())
fill_df = pd.DataFrame(ids * len(top_fill), columns=['user_id'])
fill_df.sort_values('user_id', inplace=True)
fill_df['item_id'] = top_fill * len(ids)
fill_df[pred_col] = scores * len(ids)
df = df.append(fill_df)
df.sort_values(pred_col, ascending=False, inplace=True)
df = df.drop_duplicates(subset=['user_id', 'item_id'], keep='first')
df['rank'] = df.groupby('user_id')[pred_col].rank(method='first', ascending=False)
df = df[df['rank'] <= 50]
df = df.groupby('user_id')['item_id'].\
apply(lambda x: ','.join([str(i) for i in x])).str.split(',',expand=True).reset_index()
return df
now_phase = 1
import os
user_path=os.path.expanduser('~')
train_path = os.path.join(user_path, r'kdd\data\underexpose_train')
test_path = os.path.join(user_path, r'kdd\data\underexpose_test')
recom_item = []
online_user_num=0
whole_click = pd.DataFrame()
times=[]
for c in tqdm(range(0,now_phase + 1)):
print('phase:', c)
click_train = pd.read_csv(train_path + '\\underexpose_train_click-{}.csv'.format(c), header=None,nrows=None,
names=['user_id', 'item_id', 'time'])
click_test = pd.read_csv(test_path + '\\underexpose_test_click-{}.csv'.format(c), header=None,nrows=None,
names=['user_id', 'item_id', 'time'])
user_test=set(click_test['user_id'].values)
online_user_num+=len(user_test)
all_click = click_train.append(click_test)
times.append(all_click['time'].values)
whole_click = whole_click.append(all_click)
whole_click = whole_click.drop_duplicates(subset=['user_id', 'item_id', 'time'], keep='last')
whole_click = whole_click.sort_values('time')
item_sim_list, user_item = get_sim_item(whole_click, 'user_id', 'item_id', use_iif=False)
for i in click_test['user_id'].unique():
rank_item = recommend(item_sim_list, user_item, i, 500, 500)
# print(len(rank_item))
for j in rank_item:
recom_item.append([i, j[0], j[1]])
# find most popular items
top50_click = whole_click['item_id'].value_counts().index[:50].values
top50_click = ','.join([str(i) for i in top50_click])
print('get_predict......................')
recom_df = pd.DataFrame(recom_item, columns=['user_id', 'item_id', 'sim'])
result = get_predict(recom_df, 'sim', top50_click)
assert result.shape==(online_user_num,51)
print(result.head())
result.to_csv('baseline_2.csv', index=False, header=None) | [
"newtusst@gmail.com"
] | newtusst@gmail.com |
bbbf84bb64342131a02819a91622ca98bb81be77 | 77286d36c31f312f3a0917e1d617485ae73cb9fd | /build/dynamixel_motor/catkin_generated/pkg.installspace.context.pc.py | b064747d0bd2765f9d66a0f55b641bfbb5661f45 | [] | no_license | minumn/catkin_ws | a746eb8341813a4b5d4762b25c5f1aee6a158592 | 017ae5578a575060261c263fd7cdf025c2c1728e | refs/heads/master | 2020-04-10T16:18:44.266404 | 2018-12-10T10:03:52 | 2018-12-10T10:03:52 | 161,141,244 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "dynamixel_motor"
PROJECT_SPACE_DIR = "/home/ros/catkin_ws/install"
PROJECT_VERSION = "0.0.0"
| [
"Ben93@live.dk"
] | Ben93@live.dk |
dd28bccfbbb9e14a20b8c19e5d2019b0f14e74d3 | 3b8300fe00b26ceb7c0cec885e287a31c288d3d4 | /unit4_lesson_01_understanding_modules.py | 88ed3954e708367f16365dfa63868ff08df16a00 | [] | no_license | vamsi-bulusu/Mission-RND-PYTHON-COURSE | 168952c3d1c8c6a009153df9d4a6e7c7f597b24f | bfed6215be6b8310ade8fd373c61580af19cbc01 | refs/heads/master | 2023-03-08T19:09:36.694720 | 2019-01-05T16:32:39 | 2019-01-05T16:32:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,317 | py | __author__ = 'Kalyan'
notes = '''
modules are a abstraction feature which greatly aids in building large applications.
modules are defined in .py file (socket.py, random.py, csv.py ...) and usually contain
a set of function, data and class definitions which provide a specific functionality. This
allows for easy reuse and discovery of functionality. e.g. you can be pretty sure that
socket module exposes functionality related to communication using sockets.
Each of our lesson and assignments is a module too (though they do not expose any useful api).
Reading material:
- http://effbot.org/zone/import-confusion.htm
'''
notes_1 = '''
All these tests uses module1.py to module4.py. Take a look at them before starting on the tests.
'''
#this is a global import, generally you use only these. rarely will you use function level imports, but we are doing that
#here for the sake of testing how import works by limiting the scope of the import to each function.
import sys
import placeholders
from placeholders import *
def test_module_without_import():
try:
module1.greet("jack")
except NameError as ae: # what specific exception did you get here, replace ___ with that
print(ae) # print it out and see
assert True
def test_module_usage_needs_import():
import module1
assert "module1 says hi to jack" == module1.greet("jack")
def test_module_usage_multiple():
import module1
import module2
assert "module1 says hi to jack" == module1.greet("jack")
assert "module2 says hi to jack" == module2.greet("jack")
def test_module_import_affects_current_namespace():
import module1
def inner_func():
import module2
assert True == ('module2' in locals())
return module2.greet("jack")
assert "module1 says hi to jack" == module1.greet("jack")
assert "module2 says hi to jack" == inner_func()
assert False == ('placeholders' in locals())
assert True == ('placeholders' in globals())
assert True == ('module1' in locals())
assert False == ('module1' in globals())
assert False == ('module2' in locals())
assert False == ('module2' in globals())
def test_module_type():
assert 'module' == type(placeholders).__name__
def test_module_is_an_object():
assert 12 == len(dir(placeholders))
assert "placeholders" == placeholders.__name__
assert None == placeholders.__doc__
def test_module_from_import():
from module1 import greet
assert False == ('module1' in locals())
assert True == ('greet' in locals())
try:
module1.greet()
except NameError:
pass
assert "module1 says hi to jack" == greet("jack")
def test_module_why_from_import_is_a_bad_idea():
from module1 import greet
from module2 import greet
assert "module2 says hi to jack" == greet("jack")
def test_modules_are_cached():
import module1
import module1 as new_name
def inner():
import module1
return module1.some_attr
try:
inner()
except AttributeError: # what exception do you get here?
pass
module1.some_attr = 10
assert 10 == inner()
def inner2():
import module1
return module1.some_attr
assert 10 == inner2()
assert 'dict' == type(sys.modules).__name__
assert True == (module1 is sys.modules['module1'])
assert False == ('new_name' in sys.modules)
assert True == (new_name is module1)
assert True == (new_name is sys.modules['module1'])
s1 = set()
s2 = set()
s3 = set()
s1 = set(dir())
from module3 import *
s2 = set(dir())
from module4 import *
s3 = set(dir())
def test_module_star_import():
# * imports are not allowed within functions, so we had to do it at global scope
assert {"m3_func1", "m3_func2"} == (s2 - s1) # what did module3 import bring in.
assert {"m4_func1", "_m4_func3"} == (s3 - s2) # what did module4 import bring in.
three_things_i_learnt = """
- from X import * imports the module X, and creates references in the current namespace to all public objects
defined by that module (that is, everything that doesn’t have a name starting with “_”).
-
-
"""
| [
"noreply@github.com"
] | vamsi-bulusu.noreply@github.com |
c3a7a88175dedd15c4d7732706671b83f424d7b0 | c546beb9adf6f6f9918a8aeeda5317ace5546132 | /project/web/app/utils/models.py | ae1fcb499668ea7e029399c3c7c34372c2990447 | [
"MIT"
] | permissive | jroeland/teapot | fa98e96a650f77af6025d7b23460df4f84ed562d | b62dddc9d2aee13c66fa9a1f7788362078143979 | refs/heads/master | 2021-01-11T01:20:12.909710 | 2016-11-06T18:10:17 | 2016-11-06T18:10:17 | 70,722,585 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | from __future__ import unicode_literals
from django.db import models
#Some of the data to use in the given examples have an id
#In order for not to interfere with django's id system, we will use this field instead
class Uid(models.Model):
class Meta:
abstract = True
uid = models.CharField(max_length = 4, db_index = True, unique = True, default = "", blank = False, null = False) | [
"jaime.roeland@asoreco.com"
] | jaime.roeland@asoreco.com |
c6474f1ae4c1ef0f0389dbd7e5c7cd258f6514ac | 42ddde3ae707ea25329f360dea331f8ca5d4b0d8 | /Collections/properties.py | b5aeb71a39f52b9d17a767123888cacc2fb9748b | [] | no_license | MinnuMariyaJoy/luminarpythoncode | 326b4a387b2599ef15d3138a2e1d214682c648f8 | 121a4e85ddb6a69f56b0c0d7830a360a9e427c0c | refs/heads/master | 2023-04-04T06:31:25.606966 | 2021-04-16T09:52:23 | 2021-04-16T09:52:23 | 358,550,870 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | #How to define?
#Check whether it support heterogenous data
#Dupicated valued allowed or not
#Insertion order preserved or not
#Mutable or imutable
lst=[10,"minnu",10.5,"bigdata",True]
print(lst)
#10 int
#sabir string
#true boolean
#each individual entity is treated as single entity
| [
"minnumariyaj@gmail.com"
] | minnumariyaj@gmail.com |
6d81669daebbc5f5b7097f09b892fe8d52e7f35f | e798446e7ec39c2d6d4005b17c3d1ceb01a29ba5 | /resources/util.py | 833bb5efb9f5d716636f4b496656dc31cabbe34b | [] | no_license | sxgc0477/changanhospital | a7af066f613b897abda96d6465ea5831bf38b98b | 5eeceabcca4be6af76f8eecccb7f7bf3a3de7f95 | refs/heads/master | 2021-05-14T14:51:26.272891 | 2018-01-02T06:31:20 | 2018-01-02T06:31:23 | 115,979,720 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | import os
import json
msg_files = list(filter(lambda x: x.endswith('.json'), os.listdir('.')))
for f in msg_files:
msg = json.load(open(f, 'r', encoding='utf-8'))
utf8f = f.split('.')[0]+'_utf.json'
print('build utf8 file name[{}]'.format(utf8f))
print("checking the encoding for message[{}]".format(msg))
#json.dumps(msg, open(utf8f, 'w'), ensure_ascii=True) | [
"sxgc0477@163.com"
] | sxgc0477@163.com |
e801da624f1f1e81603ba48969615c97fad89143 | 8ff0a962d8ea988768eb93ec7862b7da19fba7e7 | /euler7.py | 22145f9ec31f51773136928d9a79c6ff274d0c47 | [] | no_license | metodj/Project-Euler | 6c9a69054afe6f2c981ec274c641707a6346eb59 | 974c7e4d73f91f735f60d95de929027bb67c77f8 | refs/heads/master | 2021-01-12T06:35:25.810484 | 2016-12-26T14:48:37 | 2016-12-26T14:48:37 | 77,389,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | def prastevilo(n):
s = 0
for i in range(2,int(n**(1/2))+1):
if n % i == 0:
s += 1
if s == 0:
return True
else:
return False
def euler7(n):
seznam = [2]
for i in range(3,200000,2):
if prastevilo(i) == True:
seznam.append(i)
return seznam[n-1]
def euler7_while(n):
seznam = [2]
i = 3
while len(seznam)<n:
if prastevilo(i) == True:
seznam.append(i)
i += 2
return seznam[n-1]
| [
"noreply@github.com"
] | metodj.noreply@github.com |
b6b5b34de009f8bbe527828a0a8f946d8e017c3a | 4d16f17fee9e21c9a516baa6d739faef2874d588 | /netserverfile.py | a2f5c2133b33e372df0b1e78169b0e1068fade8b | [] | no_license | abdulrafaykhan/Security-with-Python-Code | f7d04b3ea7f0835a85b5f831dd043a2eb6598f70 | 8bac007ed17cc3df1547814c55d4d4ea5efb19a2 | refs/heads/master | 2020-03-18T19:23:16.325678 | 2018-05-28T20:57:23 | 2018-05-28T20:57:23 | 135,151,133 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | import socket
# Setting the host, port and the size
host = "localhost"
port = 8890
size = 512
# Step 1 (Initializing the socket)
mysock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mysock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Step 2 (Binding the socket to the address)
mysock.bind((host, port))
mysock.listen(4)
# Step 3 (Accepting the connection)
c, addr = mysock.accept()
# Step 4 (Receiving the data)
data = c.recv(size)
if data:
fileone = open("buffer.txt", "+w")
print("Connection received from: ", addr[0])
fileone.write(addr[0])
fileone.write(" : ")
fileone.write(data.decode("utf-8"))
fileone.close()
mysock.close() | [
"abdul.rafay.khan10@gmail.com"
] | abdul.rafay.khan10@gmail.com |
5b4df505f5b543a9793b58b055c262aa0ec8aa30 | e68291668b94e45ed96197c118645285a4045f43 | /getclosestpoints.py | 72314b94d3ee3e80a684433f3f7807ca1528b3c2 | [] | no_license | lukasgartmair/strainrate-analysis | 4177c2949f2033a28beef4973c8c658a3c36ccb9 | 2f917fa80c0e30190bdd7ea72c29a864579b9025 | refs/heads/master | 2021-01-10T07:43:55.065336 | 2016-10-11T09:04:03 | 2016-10-11T09:04:03 | 55,695,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 9 18:33:59 2015
@author: Lukas Gartmair
"""
import numpy as np
def get_clostest_points_to_trend(nopts, ranked_indices, time_corr, strain_plast):
sub_time_filtered = np.take(time_corr,ranked_indices[0:nopts])
sub_strain_plast_filtered = np.take(strain_plast,ranked_indices[0:nopts])
return sub_time_filtered, sub_strain_plast_filtered
| [
"Lukas_Gartmair@gmx.de"
] | Lukas_Gartmair@gmx.de |
4d846fa812fa03f6115efab9639956da04cf8ac6 | 2bf2659c31be13b41649f8f77ac0777cbdea49d6 | /user/migrations/0001_initial.py | 6fa32f1bc636b9460860ad6e839c0b69e3dab6cc | [] | no_license | akashsaingar/django_project | daa6e911ca97623adc165a09293ecba2ae016e98 | 0df8965035bf3af8f373dc2574a67c1e02d346a7 | refs/heads/main | 2023-03-17T07:01:48.700037 | 2021-03-17T06:16:57 | 2021-03-17T06:16:57 | 348,600,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 777 | py | # Generated by Django 3.1.7 on 2021-03-09 16:06
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(default='default.jpg', upload_to='profile_pics')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"akashsaingar387@gmail.com"
] | akashsaingar387@gmail.com |
42f12b01d38f701e9250c98af2dface4db16d8f0 | 947e71b34d21f3c9f5c0a197d91a880f346afa6c | /ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/supervisor_prod.py | c98346ba4b4ed71ee3909d9cf4af9d9e8b7c12f2 | [
"MIT",
"Apache-2.0",
"GPL-1.0-or-later",
"GPL-2.0-or-later",
"OFL-1.1",
"MS-PL",
"AFL-2.1",
"GPL-2.0-only",
"Python-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] | permissive | liuwenru/Apache-Ambari-ZH | 4bc432d4ea7087bb353a6dd97ffda0a85cb0fef0 | 7879810067f1981209b658ceb675ac76e951b07b | refs/heads/master | 2023-01-14T14:43:06.639598 | 2020-07-28T12:06:25 | 2020-07-28T12:06:25 | 223,551,095 | 38 | 44 | Apache-2.0 | 2023-01-02T21:55:10 | 2019-11-23T07:43:49 | Java | UTF-8 | Python | false | false | 2,429 | py | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from storm import storm
from service import service
from supervisord_service import supervisord_service, supervisord_check_status
from resource_management.libraries.script import Script
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions import format
from resource_management.core.resources.system import Execute
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions import StackFeature
class Supervisor(Script):
def install(self, env):
self.install_packages(env)
self.configure(env)
def configure(self, env):
import params
env.set_params(params)
storm()
def pre_upgrade_restart(self, env, upgrade_type=None):
import params
env.set_params(params)
if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
stack_select.select_packages(params.version)
def start(self, env, upgrade_type=None):
import params
env.set_params(params)
self.configure(env)
supervisord_service("supervisor", action="start")
service("logviewer", action="start")
def stop(self, env, upgrade_type=None):
import params
env.set_params(params)
supervisord_service("supervisor", action="stop")
service("logviewer", action="stop")
def status(self, env):
supervisord_check_status("supervisor")
def get_log_folder(self):
import params
return params.log_dir
def get_user(self):
import params
return params.storm_user
if __name__ == "__main__":
Supervisor().execute()
| [
"ijarvis@sina.com"
] | ijarvis@sina.com |
2adc5622beeb4801d6ba41c98dbce868e8ea881e | de75304d96e433f67dba3438f2456dd3dbb2ce08 | /scriptsLinAlg/11_equations.ipy | 31ed41d27bed0ea5b433897d4f42ac117ae134cc | [] | no_license | dalerxli/slides_linear-algebra-intro | ef7486a2779d5cd6633222662c629eae0ee59997 | 9bdbafeecd620a13e2c152bc3eb331543a5d7674 | refs/heads/master | 2023-07-14T14:35:24.395828 | 2021-08-10T17:55:04 | 2021-08-10T17:55:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | ipy | #!/usr/bin/env python3
# _*_ coding: utf-8 _*_
import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(-5,5, 20)
y1 = 3*x + 5
y2 = 6*x + 7
y3 = 5/2*x - 1
y4 = x/2 + 0.5
y5 = -3/5*x + 8/5
y6 = -4/3*x + 7/3
y7 = 2*x + 5
y8 = 2*x + 5
def graph_equations(x, *ys):
colors = ('red', 'blue', 'orange', 'cyan')
for c,y in enumerate(ys):
plt.plot(x, y, color=colors[c])
plt.xlim(-5,5)
plt.ylim(-10,10)
plt.axhline(y=0, color='gray')
plt.axvline(x=0, color='gray')
plt.show()
graph_equations(x, y1,y2,y3)
graph_equations(x, y4,y5,y6)
graph_equations(x, y7,y8) | [
"torresc.rafael@gmail.com"
] | torresc.rafael@gmail.com |
468b8879c2d1ea37f83ca9cbb14250213709d3ac | 2eb386991d9975f0f8440d90de26e950304ac42f | /DMOJCTF2020/small_aes/solve_small_aes.py | d7538f6002a20d37951be17f9a8a391758a03e82 | [] | no_license | Quintec/CTFs2020 | 2816a66e8a486537c31e5ac25253840bc3a8ffe9 | bdaa327c9f0b0ee16ff95bafcaf65f0df8acd8b9 | refs/heads/master | 2022-12-19T21:39:14.129702 | 2020-10-01T16:49:06 | 2020-10-01T16:49:06 | 281,812,929 | 1 | 0 | null | 2020-10-01T16:49:08 | 2020-07-23T00:37:44 | null | UTF-8 | Python | false | false | 433 | py | from base64 import urlsafe_b64decode, urlsafe_b64encode
from Crypto.Cipher import AES
from itertools import product
enc = b"53rW_RiyUiwXq3PD7E4RHJuzjlHbw4YmG8wNRILXEQdBFiJZlpI2WjD_kNeQAUYG"
enc = urlsafe_b64decode(enc)
for key in product(range(256), repeat=3):
key = bytes(key) * 8
key = urlsafe_b64encode(key)
cipher = AES.new(key, AES.MODE_ECB)
if b"ctf" in cipher.decrypt(enc):
print(cipher.decrypt(enc)) | [
"stanleyzhong8@gmail.com"
] | stanleyzhong8@gmail.com |
fb344228b89fb2bfa1abc22b6952283ef0ffec1d | 4226d6b2d57ab9c3961da8e525bebdf123d4faf3 | /Classes/Stack.py | 47d25e4a3ec3edce690818002b7504c0cbc4ac80 | [] | no_license | jkishbaugh/pyEssentials | 6d279b26edcb68a870e44e7fd31f5f5d3ee937d2 | 99314ebdd9e4bd42ad414e5077c3f043f2d2ce24 | refs/heads/master | 2020-09-25T12:10:59.716082 | 2019-12-12T18:33:51 | 2019-12-12T18:33:51 | 226,002,557 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 670 | py | #!/usr/bin/env python3
class Stack:
# the three methods your class will need will be
def __init__(self):
self._stack = list()
def push(self, val):
if val: self._stack.append(val)
print(self._stack)
def pop(self):
try:
self._stack[0]
except:
print("All done. Pat yourself on the back.")
else:
print(self._stack.pop())
def main():
stack = Stack()
stack.push("Me First!!!")
stack.push("No it's my turn.")
stack.push(78)
stack.pop()
stack.pop()
stack.pop()
stack.pop()
if __name__ == '__main__': main() | [
"root@JKISHBAUGH-XPS.jti.int"
] | root@JKISHBAUGH-XPS.jti.int |
2c708f1f00fdd825f8fcd9eb53d91ea5873169a0 | 59b0ebc4249f20edd0e87dc63784c6e8c138c7fd | /.history/anagrams_20180606002502.py | 12a96c38d517b42578e53a8ef9346ac3c9fc8fa9 | [] | no_license | Los4U/first_python_programs | f397da10be3ef525995f3f220e3b60012a6accaa | c3fc33a38c84abd292cb2e86de63e09434fc7fc4 | refs/heads/master | 2020-03-22T08:09:40.426118 | 2018-07-04T17:17:58 | 2018-07-04T17:17:58 | 139,748,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | import sys
from os import path
words = []
words = sys.argv[1:]
for i in words:
print(i) | [
"inz.kamil.wos@gmail.com"
] | inz.kamil.wos@gmail.com |
3f653fbe285baa74853e80cd68566f7d277c4087 | 8ca33a01c1c0e9b77c2e44a6a4cd2225abae340e | /test4-5.py | a928041244e0ba1ed4c394fc1f98016578d0f2e0 | [] | no_license | luxiaofeng0112/learn-python | b66afe7c295008b9d17efd6f2a10626009427368 | 38eaf700672918ed8d424296fc78f780347e6464 | refs/heads/master | 2021-01-21T07:30:30.362172 | 2015-05-30T13:43:51 | 2015-05-30T13:43:51 | 33,612,934 | 0 | 0 | null | 2015-04-08T14:45:27 | 2015-04-08T14:45:27 | null | GB18030 | Python | false | false | 4,874 | py | #-*-coding:utf-8-*-
#嵌套词典
import math
Numberlist={
'白雪萍':{'学号':'1417140301','性别':'女','年龄':18,'成绩信息':80},
'董良':{'学号':'1417140302','性别':'女','年龄':18,'成绩信息':86},
'范佳琪':{'学号':'1417140303','性别':'女','年龄':18,'成绩信息':87},
'郭全威':{'学号':'1417140304','性别':'男','年龄':19,'成绩信息':88},
'金诗琪':{'学号':'1417140305','性别':'女','年龄':18,'成绩信息':92},
'李建':{'学号':'1417140306','性别':'男','年龄':20,'成绩信息':86},
'李奇星':{'学号':'1417140307','性别':'男','年龄':22,'成绩信息':14},
'李思婳':{'学号':'1417140308','性别':'女','年龄':18,'成绩信息':76},
'李易伦':{'学号':'1417140309','性别':'男','年龄':19,'成绩信息':76},
'李哲':{'学号':'1417140310','性别':'女','年龄':18,'成绩信息':88},
'林江':{'学号':'1417140311','性别':'男','年龄':19,'成绩信息':76},
'刘世儀':{'学号':'1417140312','性别':'男','年龄':19,'成绩信息':96},
'刘新':{'学号':'1417140313','性别':'女','年龄':18,'成绩信息':78},
'马艳霞':{'学号':'1417140314','性别':'女','年龄':18,'成绩信息':70},
'缪冬花':{'学号':'1417140315','性别':'女','年龄':18,'成绩信息':69},
'曲友光':{'学号':'1417140316','性别':'男','年龄':19,'成绩信息':77},
'汪进':{'学号':'1417140317','性别':'男','年龄':19,'成绩信息':72},
'王海漫':{'学号':'1417140318','性别':'女','年龄':18,'成绩信息':76},
'王野':{'学号':'1417140319','性别':'女','年龄':18,'成绩信息':77},
'杨朔':{'学号':'1417140320','性别':'女','年龄':18,'成绩信息':70},
'张程':{'学号':'1417140321','性别':'女','年龄':18,'成绩信息':98},
'张硕':{'学号':'1417140322','性别':'男','年龄':19,'成绩信息':77},
'张天翼':{'学号':'1417140323','性别':'女','年龄':20,'成绩信息':89},
'张肖':{'学号':'1417140324','性别':'男','年龄':21,'成绩信息':90},
'张泽正':{'学号':'1417140325','性别':'女','年龄':18,'成绩信息':80},
'章华':{'学号':'1417140326','性别':'女','年龄':18,'成绩信息':86}
}
#Print all the student name, student number, gender, age, grade information
print 'Print all the student name:'
for name in Numberlist.keys():
print name
print 'Print all the student name, student number, gender, age, grade information'
for name in Numberlist.keys():
print 'name:' , name , '学号:' , Numberlist[name]['学号'] , '性别:' , Numberlist[name]['性别'] , '年龄:', Numberlist[name]['年龄'] , '成绩信息:' , Numberlist[name]['成绩信息']
#增加学生输入
#Numberlist['name']={'学号':'14171403xx','性别':'x','年龄':xx,'成绩信息':xx}
#name属于 Numberlist里的名字
#删除学生输入
#del Numberlist['name']
#name属于 Numberlist里的名字
#修改信息输入
#Numberlist['name']['所要修改的信息'] = '所修改的数据'
#查找信息输入
#Numberlist['name']['所要查找的信息']
#name属于 Numberlist里的名字
names=['白雪萍','董良','范佳琪',
'郭全威','金诗琪','李建',
'李奇星','李思婳','李易伦',
'李哲','林江','刘世儀',
'刘新','林江','刘世儀',
'刘新','马艳霞','缪冬花',
'曲友光','汪进','王海漫'
'王野','杨朔','张程',
'张硕','张天翼','张肖',
'张泽正','章华']
grades=[80,86,87,
88,92,86,
14,76,76,
88,76,96,
78,70,69,
77,72,76,
77,70,98,
77,89,90,
80,86]
ages=[18,18,18,
18,18,20,
22,18,19,
18,19,19,
18,18,18,
19,19,18,
18,89,18,
19,20,21,
18,18]
import math
print '---------------------------------------------------------------------'
print '全班人数',len(names) #全班人数
print '---------------------------------------------------------------------'
print '最大年龄',max(ages) #最大年龄
print '---------------------------------------------------------------------'
print '最小年龄',min(ages) #最小年龄
print '---------------------------------------------------------------------'
print '平均年龄',(1.0*sum(ages)/len(ages)) #平均年龄
print '---------------------------------------------------------------------'
print '最高成绩',max(grades) #最高成绩
print '---------------------------------------------------------------------'
print '最低成绩',min(grades) #最低成绩
print '---------------------------------------------------------------------'
print '平均成绩',1.0*sum(grades)/len(grades) #平均成绩
| [
"760454160@qq.com"
] | 760454160@qq.com |
a5dbdec4e80f6f652fcdfaf82244c9d8297733cc | 58b0a9b7c95861b60d7eb039be98cfc3bf655dbc | /tests_scripts/test_trim.py | 13cb572d565317b3d9a1c5b57f30195a4143a478 | [] | no_license | Arigowin/Expert-System | 74fe0c7aacc4b6e09f4df995bdf096e413b0c02e | 2dd8ac62ca832e4b8d31b316479742b06d851745 | refs/heads/master | 2021-03-27T16:05:16.009199 | 2018-04-26T16:20:15 | 2018-04-26T16:20:15 | 122,976,665 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,190 | py | import sys
from subprocess import Popen, PIPE
print("\n", __file__)
arg = sys.argv
i = 1
if (len(arg) > 1):
i = int(arg[1])
tmp = {'A': "A : True",
'B': "B : False",
'C': "C : True",
'D': "D : True",
'E': "E : True",
'F': "F : False",
'G': "G : False",
'H': "H : Undefined",
'I': "I : False",
'J': "J : False",
'K': "K : False",
'L': "L : False",
'M': "M : False",
'N': "N : False",
'O': "O : False",
'P': "P : False",
'Q': "Q : False",
'R': "R : False",
'S': "S : False",
'T': "T : False",
'U': "U : False",
'V': "V : False",
'W': "W : False",
'X': "X : True",
'Y': "Y : False",
'Z': "Z : False"}
for i in range(i):
process = Popen(["python3", "expert_system.py", "-cdv", "tests/trim.txt"], stdout=PIPE)
(ret, err) = process.communicate()
exit_code = process.wait()
b = False
for k in tmp:
if str(ret).find(tmp[k]) == -1:
print("Diff on", k)
b = True
if b is True:
print(ret)
break
print("%d " % (i + 1), end='', flush=True)
| [
"dolewski@student.42.fr"
] | dolewski@student.42.fr |
a8191a4664159d7c42abbb8661c38209a6b00b63 | 4e1f0ec8e73bb612985576c18c70cbc5321727c6 | /mapreduce/check.py | 1d057ae0cef70e9133cfb68063fdc35f1cb24d45 | [] | permissive | StackVista/sts-agent-integrations-core | 6b7c21185be0ee7e7768b42fb2ac3111cec8b366 | 909344f2a829256c114f261a3a3efbe27e09ba2c | refs/heads/master | 2023-07-07T14:13:39.889101 | 2021-04-30T13:29:06 | 2021-04-30T13:29:06 | 85,045,995 | 3 | 4 | BSD-3-Clause | 2023-06-26T14:53:13 | 2017-03-15T08:11:51 | Python | UTF-8 | Python | false | false | 21,148 | py |
'''
MapReduce Job Metrics
---------------------
mapreduce.job.elapsed_ime The elapsed time since the application started (in ms)
mapreduce.job.maps_total The total number of maps
mapreduce.job.maps_completed The number of completed maps
mapreduce.job.reduces_total The total number of reduces
mapreduce.job.reduces_completed The number of completed reduces
mapreduce.job.maps_pending The number of maps still to be run
mapreduce.job.maps_running The number of running maps
mapreduce.job.reduces_pending The number of reduces still to be run
mapreduce.job.reduces_running The number of running reduces
mapreduce.job.new_reduce_attempts The number of new reduce attempts
mapreduce.job.running_reduce_attempts The number of running reduce attempts
mapreduce.job.failed_reduce_attempts The number of failed reduce attempts
mapreduce.job.killed_reduce_attempts The number of killed reduce attempts
mapreduce.job.successful_reduce_attempts The number of successful reduce attempts
mapreduce.job.new_map_attempts The number of new map attempts
mapreduce.job.running_map_attempts The number of running map attempts
mapreduce.job.failed_map_attempts The number of failed map attempts
mapreduce.job.killed_map_attempts The number of killed map attempts
mapreduce.job.successful_map_attempts The number of successful map attempts
MapReduce Job Counter Metrics
-----------------------------
mapreduce.job.counter.reduce_counter_value The counter value of reduce tasks
mapreduce.job.counter.map_counter_value The counter value of map tasks
mapreduce.job.counter.total_counter_value The counter value of all tasks
MapReduce Map Task Metrics
--------------------------
mapreduce.job.map.task.progress The distribution of all map task progresses
MapReduce Reduce Task Metrics
--------------------------
mapreduce.job.reduce.task.progress The distribution of all reduce task progresses
'''
# stdlib
from urlparse import urljoin
from urlparse import urlsplit
from urlparse import urlunsplit
# 3rd party
import requests
from requests.exceptions import Timeout, HTTPError, InvalidURL, ConnectionError
from simplejson import JSONDecodeError
# Project
from checks import AgentCheck
from config import _is_affirmative
# Default Settings
DEFAULT_CUSTER_NAME = 'default_cluster'
# Service Check Names
YARN_SERVICE_CHECK = 'mapreduce.resource_manager.can_connect'
MAPREDUCE_SERVICE_CHECK = 'mapreduce.application_master.can_connect'
# URL Paths
YARN_APPS_PATH = 'ws/v1/cluster/apps'
MAPREDUCE_JOBS_PATH = 'ws/v1/mapreduce/jobs'
# Application type and states to collect
YARN_APPLICATION_TYPES = 'MAPREDUCE'
YARN_APPLICATION_STATES = 'RUNNING'
# Metric types
HISTOGRAM = 'histogram'
INCREMENT = 'increment'
# Metrics to collect
MAPREDUCE_JOB_METRICS = {
'elapsedTime': ('mapreduce.job.elapsed_time', HISTOGRAM),
'mapsTotal': ('mapreduce.job.maps_total', INCREMENT),
'mapsCompleted': ('mapreduce.job.maps_completed', INCREMENT),
'reducesTotal': ('mapreduce.job.reduces_total', INCREMENT),
'reducesCompleted': ('mapreduce.job.reduces_completed', INCREMENT),
'mapsPending': ('mapreduce.job.maps_pending', INCREMENT),
'mapsRunning': ('mapreduce.job.maps_running', INCREMENT),
'reducesPending': ('mapreduce.job.reduces_pending', INCREMENT),
'reducesRunning': ('mapreduce.job.reduces_running', INCREMENT),
'newReduceAttempts': ('mapreduce.job.new_reduce_attempts', INCREMENT),
'runningReduceAttempts': ('mapreduce.job.running_reduce_attempts', INCREMENT),
'failedReduceAttempts': ('mapreduce.job.failed_reduce_attempts', INCREMENT),
'killedReduceAttempts': ('mapreduce.job.killed_reduce_attempts', INCREMENT),
'successfulReduceAttempts': ('mapreduce.job.successful_reduce_attempts', INCREMENT),
'newMapAttempts': ('mapreduce.job.new_map_attempts', INCREMENT),
'runningMapAttempts': ('mapreduce.job.running_map_attempts', INCREMENT),
'failedMapAttempts': ('mapreduce.job.failed_map_attempts', INCREMENT),
'killedMapAttempts': ('mapreduce.job.killed_map_attempts', INCREMENT),
'successfulMapAttempts': ('mapreduce.job.successful_map_attempts', INCREMENT),
}
MAPREDUCE_JOB_COUNTER_METRICS = {
'reduceCounterValue': ('mapreduce.job.counter.reduce_counter_value', INCREMENT),
'mapCounterValue': ('mapreduce.job.counter.map_counter_value', INCREMENT),
'totalCounterValue': ('mapreduce.job.counter.total_counter_value', INCREMENT),
}
MAPREDUCE_MAP_TASK_METRICS = {
'elapsedTime': ('mapreduce.job.map.task.elapsed_time', HISTOGRAM)
}
MAPREDUCE_REDUCE_TASK_METRICS = {
'elapsedTime': ('mapreduce.job.reduce.task.elapsed_time', HISTOGRAM)
}
class MapReduceCheck(AgentCheck):
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
# Parse job specific counters
self.general_counters = self._parse_general_counters(init_config)
# Parse job specific counters
self.job_specific_counters = self._parse_job_specific_counters(init_config)
def check(self, instance):
# Get properties from conf file
rm_address = instance.get('resourcemanager_uri')
if rm_address is None:
raise Exception('The ResourceManager URL must be specified in the instance configuration')
collect_task_metrics = _is_affirmative(instance.get('collect_task_metrics', False))
# Get additional tags from the conf file
tags = instance.get('tags', [])
if tags is None:
tags = []
else:
tags = list(set(tags))
# Get the cluster name from the conf file
cluster_name = instance.get('cluster_name')
if cluster_name is None:
self.warning("The cluster_name must be specified in the instance configuration, defaulting to '%s'" % (DEFAULT_CUSTER_NAME))
cluster_name = DEFAULT_CUSTER_NAME
tags.append('cluster_name:%s' % cluster_name)
# Get the running MR applications from YARN
running_apps = self._get_running_app_ids(rm_address)
# Report success after gathering all metrics from ResourceManaager
self.service_check(YARN_SERVICE_CHECK,
AgentCheck.OK,
tags=['url:%s' % rm_address],
message='Connection to ResourceManager "%s" was successful' % rm_address)
# Get the applications from the application master
running_jobs = self._mapreduce_job_metrics(running_apps, tags)
# # Get job counter metrics
self._mapreduce_job_counters_metrics(running_jobs, tags)
# Get task metrics
if collect_task_metrics:
self._mapreduce_task_metrics(running_jobs, tags)
# Report success after gathering all metrics from Application Master
if running_jobs:
job_id, metrics = running_jobs.items()[0]
am_address = self._get_url_base(metrics['tracking_url'])
self.service_check(MAPREDUCE_SERVICE_CHECK,
AgentCheck.OK,
tags=['url:%s' % am_address],
message='Connection to ApplicationManager "%s" was successful' % am_address)
def _parse_general_counters(self, init_config):
'''
Return a dictionary for each job counter
{
counter_group_name: [
counter_name
]
}
}
'''
job_counter = {}
if init_config.get('general_counters'):
# Parse the custom metrics
for counter_group in init_config['general_counters']:
counter_group_name = counter_group.get('counter_group_name')
counters = counter_group.get('counters')
if not counter_group_name:
raise Exception('"general_counters" must contain a valid "counter_group_name"')
if not counters:
raise Exception('"general_counters" must contain a list of "counters"')
# Add the counter_group to the job_counters if it doesn't already exist
if counter_group_name not in job_counter:
job_counter[counter_group_name] = []
for counter in counters:
counter_name = counter.get('counter_name')
if not counter_name:
raise Exception('At least one "counter_name" should be specified in the list of "counters"')
job_counter[counter_group_name].append(counter_name)
return job_counter
def _parse_job_specific_counters(self, init_config):
'''
Return a dictionary for each job counter
{
job_name: {
counter_group_name: [
counter_name
]
}
}
}
'''
job_counter = {}
if init_config.get('job_specific_counters'):
# Parse the custom metrics
for job in init_config['job_specific_counters']:
job_name = job.get('job_name')
metrics = job.get('metrics')
if not job_name:
raise Exception('Counter metrics must have a "job_name"')
if not metrics:
raise Exception('Jobs specified in counter metrics must contain at least one metric')
# Add the job to the custom metrics if it doesn't already exist
if job_name not in job_counter:
job_counter[job_name] = {}
for metric in metrics:
counter_group_name = metric.get('counter_group_name')
counters = metric.get('counters')
if not counter_group_name:
raise Exception('Each counter metric must contain a valid "counter_group_name"')
if not counters:
raise Exception('Each counter metric must contain a list of "counters"')
# Add the counter group name if it doesn't exist for the current job
if counter_group_name not in job_counter[job_name]:
job_counter[job_name][counter_group_name] = []
for counter in counters:
counter_name = counter.get('counter_name')
if not counter_name:
raise Exception('At least one "counter_name" should be specified in the list of "counters"')
job_counter[job_name][counter_group_name].append(counter_name)
return job_counter
def _get_running_app_ids(self, rm_address, **kwargs):
'''
Return a dictionary of {app_id: (app_name, tracking_url)} for the running MapReduce applications
'''
metrics_json = self._rest_request_to_json(rm_address,
YARN_APPS_PATH,
YARN_SERVICE_CHECK,
states=YARN_APPLICATION_STATES,
applicationTypes=YARN_APPLICATION_TYPES)
running_apps = {}
if metrics_json.get('apps'):
if metrics_json['apps'].get('app') is not None:
for app_json in metrics_json['apps']['app']:
app_id = app_json.get('id')
tracking_url = app_json.get('trackingUrl')
app_name = app_json.get('name')
if app_id and tracking_url and app_name:
running_apps[app_id] = (app_name, tracking_url)
return running_apps
def _mapreduce_job_metrics(self, running_apps, addl_tags):
'''
Get metrics for each MapReduce job.
Return a dictionary for each MapReduce job
{
job_id: {
'job_name': job_name,
'app_name': app_name,
'user_name': user_name,
'tracking_url': tracking_url
}
'''
running_jobs = {}
for app_id, (app_name, tracking_url) in running_apps.iteritems():
metrics_json = self._rest_request_to_json(tracking_url,
MAPREDUCE_JOBS_PATH,
MAPREDUCE_SERVICE_CHECK)
if metrics_json.get('jobs'):
if metrics_json['jobs'].get('job'):
for job_json in metrics_json['jobs']['job']:
job_id = job_json.get('id')
job_name = job_json.get('name')
user_name = job_json.get('user')
if job_id and job_name and user_name:
# Build the structure to hold the information for each job ID
running_jobs[str(job_id)] = {'job_name': str(job_name),
'app_name': str(app_name),
'user_name': str(user_name),
'tracking_url': self._join_url_dir(tracking_url, MAPREDUCE_JOBS_PATH, job_id)}
tags = ['app_name:' + str(app_name),
'user_name:' + str(user_name),
'job_name:' + str(job_name)]
tags.extend(addl_tags)
self._set_metrics_from_json(tags, job_json, MAPREDUCE_JOB_METRICS)
return running_jobs
def _mapreduce_job_counters_metrics(self, running_jobs, addl_tags):
'''
Get custom metrics specified for each counter
'''
for job_id, job_metrics in running_jobs.iteritems():
job_name = job_metrics['job_name']
# Check if the job_name exist in the custom metrics
if self.general_counters or (job_name in self.job_specific_counters):
job_specific_metrics = self.job_specific_counters.get(job_name)
metrics_json = self._rest_request_to_json(job_metrics['tracking_url'],
'counters',
MAPREDUCE_SERVICE_CHECK)
if metrics_json.get('jobCounters'):
if metrics_json['jobCounters'].get('counterGroup'):
# Cycle through all the counter groups for this job
for counter_group in metrics_json['jobCounters']['counterGroup']:
group_name = counter_group.get('counterGroupName')
if group_name:
counter_metrics = set([])
# Add any counters in the job specific metrics
if job_specific_metrics and group_name in job_specific_metrics:
counter_metrics = counter_metrics.union(job_specific_metrics[group_name])
# Add any counters in the general metrics
if group_name in self.general_counters:
counter_metrics = counter_metrics.union(self.general_counters[group_name])
if counter_metrics:
# Cycle through all the counters in this counter group
if counter_group.get('counter'):
for counter in counter_group['counter']:
counter_name = counter.get('name')
# Check if the counter name is in the custom metrics for this group name
if counter_name and counter_name in counter_metrics:
tags = ['app_name:' + job_metrics.get('app_name'),
'user_name:' + job_metrics.get('user_name'),
'job_name:' + job_name,
'counter_name:' + str(counter_name).lower()]
tags.extend(addl_tags)
self._set_metrics_from_json(tags,
counter,
MAPREDUCE_JOB_COUNTER_METRICS)
def _mapreduce_task_metrics(self, running_jobs, addl_tags):
'''
Get metrics for each MapReduce task
Return a dictionary of {task_id: 'tracking_url'} for each MapReduce task
'''
for job_id, job_stats in running_jobs.iteritems():
metrics_json = self._rest_request_to_json(job_stats['tracking_url'],
'tasks',
MAPREDUCE_SERVICE_CHECK)
if metrics_json.get('tasks'):
if metrics_json['tasks'].get('task'):
for task in metrics_json['tasks']['task']:
task_type = task.get('type')
if task_type:
tags = ['app_name:' + job_stats['app_name'],
'user_name:' + job_stats['user_name'],
'job_name:' + job_stats['job_name'],
'task_type:' + str(task_type).lower()]
tags.extend(addl_tags)
if task_type == 'MAP':
self._set_metrics_from_json(tags, task, MAPREDUCE_MAP_TASK_METRICS)
elif task_type == 'REDUCE':
self._set_metrics_from_json(tags, task, MAPREDUCE_REDUCE_TASK_METRICS)
def _set_metrics_from_json(self, tags, metrics_json, metrics):
'''
Parse the JSON response and set the metrics
'''
for status, (metric_name, metric_type) in metrics.iteritems():
metric_status = metrics_json.get(status)
if metric_status is not None:
self._set_metric(metric_name,
metric_type,
metric_status,
tags)
def _set_metric(self, metric_name, metric_type, value, tags=None, device_name=None):
'''
Set a metric
'''
if metric_type == HISTOGRAM:
self.histogram(metric_name, value, tags=tags, device_name=device_name)
elif metric_type == INCREMENT:
self.increment(metric_name, value, tags=tags, device_name=device_name)
else:
self.log.error('Metric type "%s" unknown' % (metric_type))
def _rest_request_to_json(self, address, object_path, service_name, *args, **kwargs):
'''
Query the given URL and return the JSON response
'''
response_json = None
service_check_tags = ['url:%s' % self._get_url_base(address)]
url = address
if object_path:
url = self._join_url_dir(url, object_path)
# Add args to the url
if args:
for directory in args:
url = self._join_url_dir(url, directory)
self.log.debug('Attempting to connect to "%s"' % url)
# Add kwargs as arguments
if kwargs:
query = '&'.join(['{0}={1}'.format(key, value) for key, value in kwargs.iteritems()])
url = urljoin(url, '?' + query)
try:
response = requests.get(url, timeout=self.default_integration_http_timeout)
response.raise_for_status()
response_json = response.json()
except Timeout as e:
self.service_check(service_name,
AgentCheck.CRITICAL,
tags=service_check_tags,
message="Request timeout: {0}, {1}".format(url, e))
raise
except (HTTPError,
InvalidURL,
ConnectionError) as e:
self.service_check(service_name,
AgentCheck.CRITICAL,
tags=service_check_tags,
message="Request failed: {0}, {1}".format(url, e))
raise
except JSONDecodeError as e:
self.service_check(service_name,
AgentCheck.CRITICAL,
tags=service_check_tags,
message='JSON Parse failed: {0}, {1}'.format(url, e))
raise
except ValueError as e:
self.service_check(service_name,
AgentCheck.CRITICAL,
tags=service_check_tags,
message=str(e))
raise
return response_json
def _join_url_dir(self, url, *args):
'''
Join a URL with multiple directories
'''
for path in args:
url = url.rstrip('/') + '/'
url = urljoin(url, path.lstrip('/'))
return url
def _get_url_base(self, url):
'''
Return the base of a URL
'''
s = urlsplit(url)
return urlunsplit([s.scheme, s.netloc, '', '', ''])
| [
"noreply@github.com"
] | StackVista.noreply@github.com |
bccd3466d630f543aa89df829d9e325db177dfba | 24f826e18caec1197b2371b96ecdefd1be069743 | /Project2/war.py | 3300559b2309fefa26bed4af05b2b5ba6199573f | [] | no_license | harshit2118/my-python | 0096d1b5dcaff92d125e4b499ae88c53df9aacd5 | 51b616196eeb2525f9f7c2bc8d4bae0e57ab1cb9 | refs/heads/master | 2022-12-16T09:31:44.477555 | 2020-09-19T03:36:34 | 2020-09-19T03:36:34 | 286,046,113 | 0 | 0 | null | 2020-08-14T10:41:52 | 2020-08-08T13:14:05 | Python | UTF-8 | Python | false | false | 3,814 | py | '''
Wellcomw to the game of cards called wars
'''
import random
suits=('Hearts','Diamonds','Clubs','Spades')
ranks=('Two','Three','Four','Five','Six','Seven','Eight','Nine','Ten','Jack','Queen','King','Ace')
values={'Two':2,'Three':3,'Four':4,'Five':5,'Six':6,'Seven':7,'Eight':8,'Nine':9,'Ten':10,'Jack':11,'Queen':12,'King':13,'Ace':14}
'''
Class for giving properties to a particular card like its rank,suits
etc
'''
class Card:
def __init__(self,suit,rank):
self.suit=suit
self.rank=rank
self.value=values[rank]
def __str__(self):
return self.rank+ " of "+self.suit
'''
Class of deck of 52 cards in which each card is created and gets its
suits and ranks and stored in list
'''
class Deck:
def __init__(self):
self.all_cards=[]
for suit in suits:
for rank in ranks:
# Creating a card object
created_card=Card(suit,rank)
self.all_cards.append(created_card)
def suffle_deck(self):
random.shuffle(self.all_cards)
def card_out(self):
try:
return self.all_cards.pop()
except IndexError:
return "All cards are out"
'''
Class for adding a player in which player can add
and remove the cards from his/her own decks
'''
class Player:
def __init__(self,name):
self.name=name
self.all_cards=[]
def remove_card(self):
return self.all_cards.pop(0)
def add_card(self,new_cards):
if type(new_cards) is type([]):
#For MULTIPLE CARDS
self.all_cards.extend(new_cards)
else:
#For SINGLE CARD
self.all_cards.append(new_cards)
def __str__(self):
if len(self.all_cards) is 1:
return ('Player {} has {} card left'.format(self.name,len(self.all_cards)))
return ('Player {} have {} cards left'.format(self.name,len(self.all_cards)))
##Game Setup
player_one=Player("One")
player_two=Player("Two")
new_deck = Deck()
new_deck.suffle_deck()
for x in range(26):
player_one.add_card(new_deck.card_out())
player_two.add_card(new_deck.card_out())
game_on=True
round_num=0
while game_on:
round_num+=1
print("Round {}".format(round_num))
if len(player_one.all_cards) is 0:
print("Player 1 is out of the cards!!!Player 2 wins")
game_on=False
break
if len(player_two.all_cards) is 0:
print("Player 2 is out of the cards!!!Player 1 wins")
game_on=False
break
#Start A new round
player_one_cards=[]
player_one_cards.append(player_one.remove_card())
player_two_cards=[]
player_two_cards.append(player_two.remove_card())
at_war= True
while at_war:
if player_one_cards[-1].value > player_two_cards[-1].value:
player_one.add_card(player_one_cards)
player_one.add_card(player_two_cards)
at_war=False
elif player_one_cards[-1].value < player_two_cards[-1].value:
player_two.add_card(player_one_cards)
player_two.add_card(player_two_cards)
at_war=False
else:
print("WAR!!!")
if len(player_one.all_cards)<5:
print("Player 1 Unable To Declare War!!!\nPlayer 2 Win!!!")
game_on = False
break
elif len(player_two.all_cards)<5:
print("Player 2 Unable To Declare War!!!\nPlayer 1 Win!!!")
game_on = False
break
else:
for _ in range(5):
player_one_cards.append(player_one.remove_card())
player_two_cards.append(player_two.remove_card()) | [
"hrshtjoshi238@gmail.com"
] | hrshtjoshi238@gmail.com |
c71b7a1fc8331f274ee3fdb20ad0aab1a4971c27 | 317806a5e8aaa3ae8072414f12376e6fa50858f5 | /modules/IterativeHistogram.py | 17991599c6d391e86437199e25d13c5abd4a2878 | [
"Apache-2.0"
] | permissive | rlorigro/great_lengths | 9c1cbb849ece3747d6783475fb36eb079f16a2db | 796425fe8331f715bed762b591468a16402d4bae | refs/heads/main | 2023-02-02T16:59:07.645551 | 2020-12-11T23:01:36 | 2020-12-11T23:01:36 | 320,099,408 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,100 | py | import math
import numpy
import sys
class IterativeHistogram:
def __init__(self, start, stop, n_bins, unbounded_upper_bin=False, unbounded_lower_bin=False, include_upper_edge=True):
self.start = start
self.stop = stop
self.n_bins = n_bins
self.histogram = numpy.zeros(n_bins)
self.bin_size = (stop - start)/n_bins
self.edges = [start + self.bin_size*i for i in range(n_bins+1)]
self.unbounded_upper_bin = unbounded_upper_bin
self.unbounded_lower_bin = unbounded_lower_bin
self.include_upper_edge = include_upper_edge
def get_bin(self, x):
# find index of bin by normalizing and centering the value w.r.t. bin edges and add value to that bin
bin_index = int(math.floor((x - self.start)/self.bin_size))
if x == self.stop and self.include_upper_edge:
bin_index = self.n_bins - 1
if self.unbounded_lower_bin and x < self.start:
bin_index = 0
if self.unbounded_upper_bin and x > self.stop:
bin_index = self.n_bins - 1
return bin_index
def update(self, x):
bin_index = self.get_bin(x)
if 0 <= bin_index <= (self.n_bins-1):
self.histogram[bin_index] += 1
# print(x, bin_index)
# print(self.edges)
# print(self.histogram)
def get_histogram(self):
return self.histogram
def get_normalized_histogram(self):
total = sum(self.histogram)
normalized_histogram = self.histogram/numpy.sum(self.histogram)
return normalized_histogram
if __name__ == "__main__":
# test the iterative histogram
iterative_histogram = IterativeHistogram(start=0, stop=10, n_bins=10)
iterative_histogram.update(0) # 1
iterative_histogram.update(-1) # None
iterative_histogram.update(10) # 10
iterative_histogram.update(9.99999) # 10
iterative_histogram.update(10.0001) # None
iterative_histogram.update(0.5) # 1
iterative_histogram.update(1.5) # 2
iterative_histogram.update(1.0) # 2
iterative_histogram.update(1.99999) # 2
# ^ expect [2,3,0,0,0,0,0,0,0,2]
print(iterative_histogram.get_histogram())
iterative_histogram = IterativeHistogram(start=0, stop=1.0, n_bins=10)
iterative_histogram.update(0) # 1
iterative_histogram.update(-0.1) # None
iterative_histogram.update(1.0) # 10
iterative_histogram.update(0.999999) # 10
iterative_histogram.update(1.00001) # None
iterative_histogram.update(0.05) # 1
iterative_histogram.update(0.15) # 2
iterative_histogram.update(0.10) # 2
iterative_histogram.update(0.199999) # 2
# ^ expect [2,3,0,0,0,0,0,0,0,2]
print(iterative_histogram.get_histogram())
iterative_histogram = IterativeHistogram(start=1, stop=2.0, n_bins=10)
iterative_histogram.update(1 + 0) # 1
iterative_histogram.update(1 + -0.1) # None
iterative_histogram.update(1 + 1.0) # 10
iterative_histogram.update(1 + 0.999999) # 10
iterative_histogram.update(1 + 1.00001) # None
iterative_histogram.update(1 + 0.05) # 1
iterative_histogram.update(1 + 0.15) # 2
iterative_histogram.update(1 + 0.10) # 2
iterative_histogram.update(1 + 0.199999) # 2
# ^ expect [2,3,0,0,0,0,0,0,0,2]
print(iterative_histogram.get_histogram())
iterative_histogram = IterativeHistogram(start=-0.5, stop=0.5, n_bins=10)
iterative_histogram.update(-0.5 + 0) # 1
iterative_histogram.update(-0.5 + -0.1) # None
iterative_histogram.update(-0.5 + 1.0) # 10 right edge
iterative_histogram.update(-0.5 + 0.999999) # 10
iterative_histogram.update(-0.5 + 1.00001) # None
iterative_histogram.update(-0.5 + 0.05) # 1
iterative_histogram.update(-0.5 + 0.15) # 2
iterative_histogram.update(-0.5 + 0.10) # 2 ... in near-edge cases float division may shift left a bin
iterative_histogram.update(-0.5 + 0.199999) # 2
# DON'T USE THIS CLASS FOR BINS WITH SIZE that NEARS FLOAT PRECISION
# ^ expect [2,3,0,0,0,0,0,0,0,2]
print(iterative_histogram.get_histogram())
print(iterative_histogram.get_normalized_histogram())
print(sum(iterative_histogram.get_normalized_histogram()))
iterative_histogram = IterativeHistogram(start=0, stop=1.0, n_bins=10, unbounded_lower_bin=True, unbounded_upper_bin=True)
iterative_histogram.update(0) # 1
iterative_histogram.update(-0.1) # 1
iterative_histogram.update(1.0) # 10
iterative_histogram.update(0.999999) # 10
iterative_histogram.update(1.00001) # 10
iterative_histogram.update(0.05) # 1
iterative_histogram.update(0.15) # 2
iterative_histogram.update(0.10) # 2
iterative_histogram.update(0.199999) # 2
# ^ expect [3,3,0,0,0,0,0,0,0,3]
print(iterative_histogram.get_histogram())
print(iterative_histogram.get_normalized_histogram())
print(sum(iterative_histogram.get_normalized_histogram()))
| [
"rlorigro@ucsc.edu"
] | rlorigro@ucsc.edu |
fa5d1b2335a2a964bbf5336341028d0a2adf3417 | 1cbb86c1c9f6416f78c5ac13260f2a8bd3688b98 | /NLTK_twitter/bot.py | ea8a3219dc0cd60b77b3c94c6b0a618f02b0bea2 | [] | no_license | SrikanthTad/NLTK | 3e56dfe98052fd51ca8c984571b58c79c5fa7556 | 2fde7ee1f2e8ec6e4be5ab1323759df97efbb5d3 | refs/heads/master | 2021-07-14T02:54:22.866931 | 2017-10-19T01:23:05 | 2017-10-19T01:23:05 | 107,481,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 647 | py | import nltk
from nltk.twitter import Twitter
# access_token = "918706943593549824-Z3gQO9jTS3UMxR2b4d1YP4gRrWstELr"
# access_token_secret = "Q93Y5ocsuLDKVrZIWgDkZTQaXEliO9KcikLHzpEFmpAaN"
# consumer_key = "xoUFitbD6pKfkjLBkF7SVj0iw"
# consumer_secret ="Lpgv9bgi2ns6wppoCyu8tX6JALe6vjAOZbxviEcaMo7S61sa2e"
# oauth_token = "918706943593549824-Z3gQO9jTS3UMxR2b4d1YP4gRrWstELr"
# oauth_token_secret = "Q93Y5ocsuLDKVrZIWgDkZTQaXEliO9KcikLHzpEFmpAaN"
# app_key = "xoUFitbD6pKfkjLBkF7SVj0iw"
# app_secret ="Lpgv9bgi2ns6wppoCyu8tX6JALe6vjAOZbxviEcaMo7S61sa2e"
tw = Twitter()
tw.tweets(keywords = 'cher, python, nlp, soccer, celine dion', limit =10)
| [
"s_tadise@live.concordia.ca"
] | s_tadise@live.concordia.ca |
678d156e92d850e3ef382868cc210d096eb45cb5 | 6f1e3df8a7302e45fde7914b7904ce26993c9373 | /websecurityapp/migrations/0002_auto_20191225_1739.py | 69113067db9deb98e08543f743d762d8c8a3823a | [] | no_license | Pablo-Pino/WebSecurity-632 | 3418941eb8337745e8097fe3d1677048139aae68 | 2a8612a1d73f42c49c8eaa1a28cf8d9e9e73c54a | refs/heads/master | 2023-09-01T04:25:34.849538 | 2020-06-03T22:09:59 | 2020-06-03T22:09:59 | 234,390,980 | 0 | 0 | null | 2021-09-22T19:08:32 | 2020-01-16T19:03:52 | Python | UTF-8 | Python | false | false | 2,394 | py | # Generated by Django 2.0 on 2019-12-25 17:39
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('websecurityapp', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Actividad',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('enlace', models.URLField()),
('descripcion', models.CharField(max_length=1000)),
('borrador', models.BooleanField()),
('vetada', models.BooleanField()),
('motivoVeto', models.CharField(max_length=1000)),
('fechaCreacion', models.DateField()),
('comentable', models.BooleanField()),
('identificador', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Anexo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('anexo', models.URLField()),
],
),
migrations.CreateModel(
name='Usuario',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('telefono', models.CharField(max_length=30)),
('empresaUEquipo', models.CharField(max_length=100)),
('vetado', models.BooleanField()),
('esAdmin', models.BooleanField()),
('django_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.DeleteModel(
name='UserData',
),
migrations.AddField(
model_name='anexo',
name='usuario',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='websecurityapp.Usuario'),
),
migrations.AddField(
model_name='actividad',
name='autor',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='websecurityapp.Usuario'),
),
]
| [
"pabpinjim@alum.us.es"
] | pabpinjim@alum.us.es |
0f45c18037529079303a94ec4dc96c95fda477d2 | 265af11e1d8ce06f2c8bbc045638b10a46b72edf | /pygcam/queryFile.py | 5f93b17c68e1b8c1b63fa3701c02ee4b12fb8f38 | [
"MIT"
] | permissive | JGCRI/pygcam | 2174d8b4e1dad446d13fb89d698b747033f5b2d5 | ba069c43698057dffe34c4df1e0b143c5e7fcfd8 | refs/heads/master | 2023-07-27T09:07:31.049076 | 2022-08-16T23:42:30 | 2022-08-16T23:42:30 | 93,781,235 | 25 | 12 | NOASSERTION | 2023-07-06T21:28:09 | 2017-06-08T18:45:35 | Python | UTF-8 | Python | false | false | 5,810 | py | '''
.. Created on: 5/11/16
.. Copyright (c) 2016 Richard Plevin
See the https://opensource.org/licenses/MIT for license details.
'''
from collections import defaultdict
from .config import getParam
from .error import PygcamException
from .utils import getBooleanXML, resourceStream
from .XMLFile import XMLFile
#
# Classes to parse queryFiles and the <queries> element of project.xml
# (see pygcam/etc/queries-schema.xsd). These are in a separate file
# for sharing between query.py and project.py
#
class Query(object):
def __init__(self, node, defaultMap):
self.name = node.get('name')
self.saveAs = node.get('saveAs', self.name)
self.delete = getBooleanXML(node.get('delete', '1'))
self.useDefault = useDefault = getBooleanXML(node.get('useDefault', '1'))
self.states = node.get('states', 'none')
# see if the user provided the attribute, or we defaulted to 1
explicitUseDefault = node.get('useDefault', None) and useDefault
# Create a list of tuples with (mapName, level) where level may be None
rewriters = node.findall('rewriter')
self.rewriters = [(obj.get('name'), obj.get('level')) for obj in rewriters]
# We add the default map in two cases: (i) user specified some rewriters and explicitly
# set useDefault="1", or (ii) there are no rewriters and useDefault has not been set to "0".
if defaultMap and ((rewriters and explicitUseDefault) or (not rewriters and useDefault)):
self.rewriters.append((defaultMap, None))
class QueryFile(object):
def __init__(self, node):
defaultMap = self.defaultMap = node.get('defaultMap', None)
nodes = node.findall('query')
self.queries = [Query(node, defaultMap) for node in nodes]
def queryFilenames(self):
"""
Return the name used to compose the filename for this query, which
may differ from the original query name, e.g., if the same query
needs to be rewritten differently for different purposes.
"""
names = [q.saveAs for q in self.queries]
return names
@classmethod
def parse(cls, filename):
"""
Parse an XML file holding a list of query descriptions.
:param filename: (str) the name of the XML file to read
:return: a QueryFile instance.
"""
xmlFile = XMLFile(filename, schemaPath='etc/queries-schema.xsd', conditionalXML=True)
return cls(xmlFile.tree.getroot())
#
# Classes to parse rewriteSets.xml (see pygcam/etc/rewriteSets-schema.xsd)
#
class Rewrite(object):
def __init__(self, node):
self.From = node.get('from') # 'from' is a keyword...
self.to = node.get('to')
self.byAEZ = getBooleanXML(node.get('byAEZ', '0'))
self.byBasin = getBooleanXML(node.get('byBasin', '0')) # TBD: GCAM5
def __str__(self):
# TBD: Add byBasin for GCAM5
return "<Rewrite from='%s' to='%s' byAEZ='%s'>" % (self.From, self.to, self.byAEZ)
class RewriteSet(object):
def __init__(self, node):
self.name = node.get('name')
self.level = node.get('level')
self.byAEZ = getBooleanXML(node.get('byAEZ', '0'))
self.byBasin = getBooleanXML(node.get('byBasin', '0')) # TBD: GCAM5
self.appendValues = getBooleanXML(node.get('append-values', '0'))
self.rewrites = [Rewrite(x) for x in node.findall('rewrite')]
def __str__(self):
# TBD: Add byBasin for GCAM5
return "<RewriteSet name='%s' level='%s' byAEZ='%s' append-values='%s'>" % \
(self.name, self.level, self.byAEZ, self.appendValues)
def asRegionMap(self):
regionMap = defaultdict(list)
for rewrite in self.rewrites:
regionMap[rewrite.to].append(rewrite.From)
return regionMap
class RewriteSetParser(object):
# store instances by filename to avoid repeated parsing
cache = {}
def __init__(self, node, filename):
rewriteSets = [RewriteSet(x) for x in node.findall('rewriteSet')]
self.rewriteSets = {obj.name : obj for obj in rewriteSets}
self.filename = filename # for error messages only
def getRewriteSet(self, name):
try:
return self.rewriteSets[name]
except KeyError:
raise PygcamException('RewriteSet "%s" not found in file "%s"' % (name, self.filename))
@classmethod
def parse(cls, filename=None):
"""
Parse an XML file holding a list of query result rewrites.
:param filename: (str) the name of the XML file to read, or, if
None, the value of config variable GCAM.XmlSetsFile is used.
:return: a RewriteSetParser instance
"""
filename = filename or getParam('GCAM.RewriteSetsFile')
obj = cls.cache.get(filename)
if obj:
return obj
xmlFile = XMLFile(filename, schemaPath='etc/rewriteSets-schema.xsd')
obj = cls(xmlFile.tree.getroot(), filename)
cls.cache[filename] = obj
return obj
@classmethod
def getRegionMap(cls, rewriteSetName, filename=None):
"""
Lookup a RewriteSet in the given file (or in GCAM.RewriteSetsFile)
and return it as dictionary.
:param rewriteSetName: (str) the name of the set to look up.
:param filename: (str or None) path to rewriteSets.xml file, or if
None, the value of config variable GCAM.RewriteSetsFile is used.
:return: (dict) a dictionary keyed by aggregate region names,
with values being a list of standard GCAM region names comprising
the aggregate.
"""
rewriteParser = cls.parse()
rewriteSet = rewriteParser.getRewriteSet(rewriteSetName)
return rewriteSet.asRegionMap()
| [
"rich@plevin.com"
] | rich@plevin.com |
1ecf6cfdafa1599f4a5b8c796e45d6781e96f6f1 | 3bba622c8a50c9407555d6ff56c48ff8cc596047 | /train_cvae.py | 6477a0e5882ff9e831997af917e291e43f06bd96 | [] | no_license | Easylife247/gan_stability-cvae | 168f12bf14e74a850486d397fde48a7553047ab7 | 562de6dfe3bec5e4288d4ba2cbd4cf4340876f2b | refs/heads/master | 2023-07-03T09:08:35.806162 | 2019-03-21T09:26:44 | 2019-03-21T09:26:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,280 | py | import os
import tensorflow as tf
from absl import flags
import datetime
import utils
gpu = str(utils.choose_gpu())
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = gpu
from eval_model import Evaluator, eval_cvae
from cvae import CVAE
gfile = tf.gfile
train_dir = os.path.dirname(os.path.realpath(__file__))
flags.DEFINE_string('checkpoint_dir', os.path.join(train_dir, '../../data/wiki-art/cvae/checkpoints'),
'Directory, where the data to feed is located.')
flags.DEFINE_integer('batch_size', 32, 'The batch_size for the model.')
flags.DEFINE_integer('shuffle_buffer_size', 10000, 'Number of records to load '
'before shuffling and yielding for consumption. [100000]')
flags.DEFINE_integer('save_summaries_steps', 300, 'Number of seconds between '
'saving summary statistics. [1]') # default 300
flags.DEFINE_integer('save_checkpoint_secs', 1200, 'Number of seconds between '
'saving checkpoints of model. [1200]')
flags.DEFINE_integer('n_steps', 3000000, 'The total number of train steps to take.')
flags.DEFINE_bool('load_model', False, 'Whether to load from existing weights or train new from scratch.')
FLAGS = flags.FLAGS
def main(_):
print('learning_rate ', FLAGS.learning_rate)
print('Adam\'s beta parameter ', FLAGS.optimizer_add)
print('data_dir ', FLAGS.data_dir)
print(FLAGS.batch_size)
print('gf_ef_dim', FLAGS.gf_dim, FLAGS.ef_dim)
print('Use conditional imstance normalization? ', FLAGS.use_cin)
print('Starting the program..')
gfile.MakeDirs(FLAGS.checkpoint_dir)
if FLAGS.load_model:
model_dir = 'cvae_wiki-art_32_128_2019-01-07-16-10-51'
logdir = os.path.join(FLAGS.checkpoint_dir, model_dir)
print('reloading existing model and continuing training.')
else:
now = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
model_dir = '%s_%s_%s_%s' % (
'cvae_wiki-art', FLAGS.batch_size, FLAGS.image_size, now)
logdir = os.path.join(FLAGS.checkpoint_dir, model_dir)
print('checkpoint_dir: {}'.format(FLAGS.checkpoint_dir))
print('model_dir: {}'.format(model_dir))
gfile.MakeDirs(logdir)
with tf.Graph().as_default():
# Set up device to use
device = '/gpu:0'
with tf.device(device):
# Instantiate global_step.
global_step = tf.train.create_global_step()
# create model graph
cvae = CVAE(FLAGS, global_step, device)
evaluator = Evaluator(cvae.dec_fcn, FLAGS, device, logdir)
session_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
# train
tf.contrib.training.train(cvae.train_op, logdir=logdir, hooks=(
[tf.train.StopAtStepHook(num_steps=300000), evaluator.compute_stats(),
eval_cvae(cvae.dec_fcn, device, FLAGS, logdir, FLAGS.z_g_dim)]),
save_summaries_steps=FLAGS.save_summaries_steps,
save_checkpoint_secs=FLAGS.save_checkpoint_secs, config=session_config)
if __name__ == '__main__':
tf.app.run()
| [
"andreas.blattmann@gmx.de"
] | andreas.blattmann@gmx.de |
82a96289f8c0a87ebbf7f2ec31f4f6361a0e78b7 | ea5becf1b1effff8be726756a1e7d79ff63c4bc9 | /weather/settings.py | 976dd6270bfa519924ce4e6f3c9061258a90b1cf | [] | no_license | Sarthak-tech-coder/Django | 144e8d08f9251347fafb41e7a640271a65293ce6 | 5f401bed69726811e429b603f03707b312c2e084 | refs/heads/main | 2023-03-06T03:45:05.949211 | 2021-02-17T05:47:03 | 2021-02-17T05:47:03 | 327,493,322 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,024 | py | """
Django settings for weather project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '6nu3of#jacy25x-^8lhfwz3=1_4w=2vkj@frflg^)bsye)hi!$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'api.apps.ApiConfig',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'allauth.socialaccount.providers.github',
]
SITE_ID = 1
LOGIN_REDIRECT_URL = '/'
AUTHENTICATION_BACKENDS = [
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'weather.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'weather.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
SOCIALACCOUNT_PROVIDERS = {
'google': {
'APP': {
'client_id': '872887232412-p5tiqcjhgd7acrjpl6a2rd8kfbipjel9.apps.googleusercontent.com',
'secret': 'S3uliSFfs8z73t3ovscIel7E',
'key': ''
}
}
} | [
"sarthak.chowdhry@hotmail.com"
] | sarthak.chowdhry@hotmail.com |
fc6717dc0d7d88f085c033890e77ce583e5ec628 | b1f9706100de7f2b2501403060d9acbc2f102370 | /test/test_diagram.py | 25b62e5bf940feb9e8cdb1ab73180de3570455a2 | [
"Apache-2.0"
] | permissive | NeverMore23/Laky-Earo | 0f70528623acd1b397e789c51fcd5588f3530fe3 | 67fa4f2b45596fc3dcfe1b6a54e12202e62cd860 | refs/heads/master | 2021-12-23T05:17:25.353635 | 2016-05-05T04:28:14 | 2016-05-05T04:28:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,567 | py | #!/usr/bin/python
# -*- coding:utf-8 -*-
import unittest
from earo.event import Event, Field
from earo.handler import Handler, Emittion, NoEmittion
from earo.mediator import Mediator
from earo.context import Context
from earo.processor import Processor, ProcessFlow
from earo.diagram import Diagram
class TestDiagram(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_active_process_flow(self):
mediator = Mediator()
processor = Processor('.+')
class EventA(Event):
event_a_field = Field(int, 100);
class EventB(Event):
event_b_field = Field(str, 'hello');
class EventC(Event):
event_c_field = Field(float, 1.1);
class EventD(Event):
event_d_field = Field(dict, {'x': 3, 'y': 4});
class EventE(Event):
event_e_field = Field(list, [3, 8, 7]);
def fooA_BC(context, event):
import time
time.sleep(0.5)
return (Emittion(EventB()), NoEmittion(EventC, 'Test No Emmittion EventC'))
def fooA(context, event):
pass
def fooB_D(context, event):
return Emittion(EventD())
def fooC(context, event):
pass
def fooD(context, event):
1 / 0
handler_1 = Handler(EventA, fooA_BC, [EventB, EventC])
handler_2 = Handler(EventA, fooA)
handler_3 = Handler(EventB, fooB_D, [EventD])
handler_4 = Handler(EventC, fooC)
handler_5 = Handler(EventD, fooD)
mediator.register_event_handler(
handler_1,
handler_2,
handler_3,
handler_4,
handler_5
)
context = Context(mediator, EventA(), processor)
context.process()
process_flow = context.process_flow
diagram = Diagram(process_flow=process_flow)
self.assertIsNotNone(diagram.json)
def test_inactive_process_flow(self):
mediator = Mediator()
class EventA(Event):
event_a_field = Field(int, 100);
class EventB(Event):
event_b_field = Field(str, 'hello');
class EventC(Event):
event_c_field = Field(float, 1.1);
class EventD(Event):
event_d_field = Field(dict, {'x': 3, 'y': 4});
def fooBC(context, event):
return (Emittion(EventB()), Emittion(EventC()))
def fooD(context, event):
return Emittion(EventD())
def foo(context, event):
pass
def fooEx(context, event):
1 / 0
handler_1 = Handler(EventA, fooBC, [EventB, EventC])
handler_2 = Handler(EventA, foo)
handler_3 = Handler(EventB, fooD, [EventD])
handler_4 = Handler(EventC, foo)
handler_5 = Handler(EventD, fooEx)
mediator.register_event_handler(
handler_1,
handler_2,
handler_3,
handler_4,
handler_5
)
process_flow = ProcessFlow(mediator, EventA)
diagram = Diagram(process_flow=process_flow)
self.assertIsNotNone(diagram.json)
def test_json(self):
mediator = Mediator()
class EventA(Event):
event_a_field = Field(int, 100);
class EventB(Event):
event_b_field = Field(str, 'hello');
class EventC(Event):
event_c_field = Field(float, 1.1);
class EventD(Event):
event_d_field = Field(dict, {'x': 3, 'y': 4});
def fooBC(context, event):
return (Emittion(EventB()), Emittion(EventC()))
def fooD(context, event):
return Emittion(EventD())
def foo(context, event):
pass
def fooEx(context, event):
1 / 0
handler_1 = Handler(EventA, fooBC, [EventB, EventC])
handler_2 = Handler(EventA, foo)
handler_3 = Handler(EventB, fooD, [EventD])
handler_4 = Handler(EventC, foo)
handler_5 = Handler(EventD, fooEx)
mediator.register_event_handler(
handler_1,
handler_2,
handler_3,
handler_4,
handler_5
)
process_flow = ProcessFlow(mediator, EventA)
diagram_from_process_flow = Diagram(process_flow=process_flow)
json = diagram_from_process_flow.json
diagram_from_json = Diagram(json=json)
self.assertIsNotNone(diagram_from_json.json)
if __name__ == '__main__':
unittest.main()
| [
"463785757@qq.com"
] | 463785757@qq.com |
30d5b2683d2aeb26587f0d76e9a79de50db384e1 | b98ee5f9d69cf0301cbea001815113799db62981 | /WebDashboard/v2/ArtifactInstall_Leave_02_create_service.py | e6add63a170315a6512c919c14651c0242360108 | [] | no_license | LunaticMaestro/MenderArtifactExamples | f3027bf86f1bd8de5622251d5254e25eff2487cd | 44c2c3aefe208a32408297b09004baf64529ae45 | refs/heads/main | 2023-03-11T22:50:49.886935 | 2021-03-01T02:56:02 | 2021-03-01T02:56:02 | 342,505,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 639 | py | #!/usr/bin/python3
# Create a service file
import os
import sys
import subprocess
serviceWork = \
'''
[Unit]
Description=webdashboard-2.0
After=multi-user.target
Conflicts=getty@tty.service
[Service]
Type=simple
ExecStart=/usr/local/bin/webdashboard
StandardInput=tty-force
[Install]
WantedBy=multi-user.target
'''
if os.geteuid() == 0:
pass
else:
print("We're not root.")
subprocess.call(['sudo', 'python3', *sys.argv])
sys.exit()
serviceName = 'webdashboard'
tmpFileLocation = '/var/local/'
fullFilePath_ext = tmpFileLocation+serviceName+'.service'
with open(fullFilePath_ext, 'w') as file:
file.write(serviceWork)
| [
"deepak@iiitkalyani.ac.in"
] | deepak@iiitkalyani.ac.in |
ae7a99b42394af979aae9d5dd02d5e052a89a2ab | c44e21bb099ca1d745d072363736e9d716a8623d | /Homework/Group_2/Problem_1.py | b0915dd769a32577ea561f4aab178c92b5e88adf | [] | no_license | thaiduy1704/CS112.L21 | 72b8e140d0af3c181ca0484e5b9393b51684a3c6 | 4243068ddd010e80670e341c2ecbe8054f3b12a5 | refs/heads/main | 2023-06-11T13:47:38.706073 | 2021-06-25T06:11:19 | 2021-06-25T06:11:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65 | py | a = list(map(int, input().split()))
a.sort()
print(*a, sep = " ") | [
"caohungphuvn@gmail.com"
] | caohungphuvn@gmail.com |
fb110cb92a51eb38e90844363a386c6d5fb0b018 | fbcb873cba61a43b134f01ffb12190fc6f9a8bab | /ota_app/views.py | f9933e0b8e31beaae49ecebe90e7b947c126e3cf | [] | no_license | PiotrFiedoruk/ota_app | 1555a32ae4aafc5641f8a57922c31133518398c9 | 2ab2f1e2a4dbf8564798c24e0d9042030449bd23 | refs/heads/master | 2023-03-30T09:10:24.956336 | 2021-04-08T20:25:17 | 2021-04-08T20:25:17 | 346,031,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,351 | py | from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.mixins import PermissionRequiredMixin, LoginRequiredMixin
from django.core.paginator import Paginator
from django.shortcuts import render, redirect
from django.urls import reverse_lazy
from django.views import View
import datetime
from django.views.generic.edit import UpdateView, DeleteView, FormView, CreateView
from django.db.models import Avg, Sum, Min
from ota_app.forms import AddUserForm, LoginForm, AddRateplanForm, AddReviewForm
from ota_app.models import Hotel, Room, Rateplan, Price, Reservation, Hotel_owner, Review
from django.contrib.auth.models import Group, User
class MainView(View):
def get(self, request):
if 'city' in request.GET:
city = request.GET.get('city')
# provide dummy rates if no rates selected
if request.GET.get('arrival'):
arrival = request.GET.get('arrival')
departure = request.GET.get('departure')
guests = request.GET.get('guests')
else:
arrival = datetime.date.today()
departure = arrival + datetime.timedelta(days=3)
arrival = arrival.strftime("%Y-%m-%d")
departure = departure.strftime("%Y-%m-%d")
guests = 1
# below search returns all hotels with availability on ANY given date between arrival-departure.
hotelsearch = Hotel.objects.filter(
city=city, hotel_rooms__room_rateplans__rateplan_prices__date__range=[arrival, departure],
hotel_rooms__room_rateplans__rateplan_prices__availability__gt=0).distinct()
ctx = {'hotelsearch': hotelsearch, 'city': city, 'arrival': arrival, 'departure': departure,
'guests': guests}
else:
ctx = {}
return render(request, 'ota_app/main.html', ctx)
class HotelDetailsView(View):
def get(self, request, hid):
hotel = Hotel.objects.get(id=hid)
# get reviews queryset
reviews = Review.objects.filter(hotel_id=hid)
reviews_avg = reviews.aggregate(Avg('score_overall'))
'''
1 display available rooms if dates are known.
2 get list of available rooms and rooms with no availability
on any given date.
3 then compare both queryset using 'difference'.'''
if 'arrival' in request.GET:
arrival = request.GET.get('arrival')
departure = request.GET.get('departure')
guests = request.GET.get('guests')
hotel_rooms = hotel.hotel_rooms.filter(
room_rateplans__rateplan_prices__date__range=[arrival, departure],
room_rateplans__rateplan_prices__availability__gt=0, ).distinct()
not_available_rooms = hotel.hotel_rooms.filter(
room_rateplans__rateplan_prices__date__range=[arrival, departure],
room_rateplans__rateplan_prices__availability__lt=1, ).distinct()
available_rooms = hotel_rooms.difference(not_available_rooms)
# count average room price for each room
available_rooms_price = hotel.hotel_rooms.filter(
room_rateplans__rateplan_prices__date__range=[arrival, departure],
room_rateplans__rateplan_prices__availability__gt=0, ).distinct() \
.annotate(avg_price=Min('room_rateplans__rateplan_prices__price_1'))
ctx = {'hotel': hotel, 'available_rooms': available_rooms, 'available_rooms_price': available_rooms_price,
'arrival': arrival, 'departure': departure, 'guests': guests, 'reviews': reviews,
'reviews_avg': reviews_avg}
else:
available_rooms = []
available_rooms_price = []
ctx = {'hotel': hotel, 'available_rooms': available_rooms, 'available_rooms_price': available_rooms_price,
'reviews': reviews, 'reviews_avg': reviews_avg}
return render(request, 'ota_app/hotel.html', ctx)
class RoomReserveView(View):
def get(self, request, hid, rid):
room = Room.objects.get(id=rid)
if 'arrival' in request.GET:
arrival = request.GET.get('arrival')
departure = request.GET.get('departure')
guests = request.GET.get('guests')
# get price 1 or 2:
if guests == '1':
price_variable = 'rateplan_prices__price_1'
elif guests == '2':
price_variable = 'rateplan_prices__price_2'
else:
raise Exception('Please provide number of guests')
available_rateplans = Rateplan.objects.filter(room_id=rid, rateplan_prices__date__gte=arrival,
rateplan_prices__date__lt=departure,
rateplan_prices__availability__gt=0,
).annotate(total_price=Sum(price_variable))
ctx = {'room': room, 'available_rateplans': available_rateplans, 'arrival': arrival, 'departure': departure,
'guests': guests}
else:
ctx = {'room': room}
return render(request, 'ota_app/room_reserve.html', ctx)
class ConfirmReservationView(LoginRequiredMixin, View):
def post(self, request):
# get post data:
rpid = request.POST.get('rpid')
arrival = request.POST.get('arrival')
departure = request.POST.get('departure')
guests = request.POST.get('guests')
# create departure -1 day date for availability and price count:
departure_obj = datetime.datetime.strptime(departure, "%Y-%m-%d")
departure_decreased = departure_obj - datetime.timedelta(days=1)
departure_dec = departure_decreased.strftime("%Y-%m-%d")
# get objects to create new reservation:
hotel_obj = Hotel.objects.get(hotel_rooms__room_rateplans__in=[rpid])
guest_id = request.user.id
guest_obj = User.objects.get(id=guest_id)
rateplan = Rateplan.objects.get(id=rpid)
room = Room.objects.get(room_rateplans__in=[rpid])
# count total price:
total_price_query = Price.objects.filter(rateplan_id=rateplan, date__range=[arrival, departure_dec])
total_price = 0
for price in total_price_query:
total_price += price.price_1
# save new reservation:
new_reservation = Reservation.objects.create(hotel=hotel_obj, guest=guest_obj,
price=int(float(total_price)), room=room, arrival=arrival,
departure=departure,
num_of_guests=int(guests), status='active')
new_reservation.save()
# assign new reservation to a rateplan
new_reservation.rateplan.add(rateplan)
# decrease room availability for the booked dates:
availability_set = Price.objects.filter(rateplan_id__room_id=room, date__range=[arrival, departure_dec])
for price in availability_set:
price_availability = price.availability
price.availability = price_availability - 1
price.save()
ctx = {'hotel': hotel_obj, 'room': room, 'rateplan': rateplan, 'guests': guests, 'arrival': arrival,
'departure': departure, 'total_price': total_price}
return render(request, 'ota_app/confirm_reservation.html', ctx)
class HotelDashboardView(PermissionRequiredMixin, View):
permission_required = 'ota_app.view_hotel'
def get(self, request, hid):
hotel = Hotel.objects.get(id=hid)
# check if property belongs to logged in user:
hotel_owner_username = hotel.hotel_owner.user.username
if hotel_owner_username != request.user.username:
raise Exception('this property does not belong to you')
# paginate reservations:
reservations = Reservation.objects.filter(hotel=hotel).order_by('-created')
paginator = Paginator(reservations, 5) # Show reservations per page.
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
# get recent reviews:
date_recent = datetime.date.today()
date_recent = date_recent - datetime.timedelta(days=30)
reviews = Review.objects.filter(hotel_id=hid, created__gt=date_recent).order_by('-created')
# calculate average prices:
date_today = datetime.date.today()
date_end = date_today + datetime.timedelta(days=30)
date_today = datetime.date.today().strftime("%Y-%m-%d")
date_end = date_end.strftime("%Y-%m-%d")
total_average = Price.objects.filter(rateplan_id__room_id__hotel_id=hid,
date__range=[date_today, date_end]).aggregate(Avg('price_1'))
ctx = {'hotel': hotel, 'reservations': reservations, 'total_average': total_average, 'page_obj': page_obj,
'reviews': reviews}
return render(request, 'ota_app/dahsboard.html', ctx)
# f
class HotelCreateView(LoginRequiredMixin, CreateView):
model = Hotel
fields = ['name', 'city', 'street', 'description', 'facilities']
exclude = ['hotel_owner']
def get_success_url(self):
hid = self.kwargs['hid']
return f'/dashboard/{hid}'
def form_valid(self, form):
hotel = Hotel(
name=form.cleaned_data['name'],
city=form.cleaned_data['city'],
street=form.cleaned_data['street'],
description=form.cleaned_data['description'],
facilities=form.cleaned_data['facilities'],
)
# assign current user to hotel_owner_group permission
user = self.request.user
hotel_owner_group = Group.objects.get(name='hotel_owner_group')
hotel_owner_group.user_set.add(user)
# assign hotel_owner to new hotel
hotel_owner = Hotel_owner.objects.create(user=user)
hotel.hotel_owner = hotel_owner
# save hotel:
hotel.save()
return redirect('dashboard', hotel.id)
class HotelUpdateView(PermissionRequiredMixin, UpdateView):
permission_required = ('ota_app.view_hotel', 'ota_app.add_hotel',)
model = Hotel
fields = ('name', 'city', 'description', 'facilities')
template_name_suffix = '_update_form'
def get_success_url(self):
hid = self.kwargs['pk']
return f"/dashboard/{hid}"
# f
class RoomCreateView(PermissionRequiredMixin, CreateView):
permission_required = ('ota_app.view_room', 'ota_app.add_room',)
model = Room
fields = ['name', 'description', 'amenities']
def get_success_url(self):
hid = self.kwargs['hid']
return f"dashboard/{hid}"
def form_valid(self, form):
hid = self.kwargs['hid']
hotel = Hotel.objects.get(id=hid)
name = form.cleaned_data['name']
description = form.cleaned_data['description']
amenities = form.cleaned_data['amenities']
room = Room(hotel_id=hotel, name=name, description=description, amenities=amenities)
room.save()
return redirect('dashboard', hid)
class RoomUpdateView(PermissionRequiredMixin, UpdateView):
permission_required = ('ota_app.view_room', 'ota_app.add_room',)
model = Room
fields = ['name', 'description', 'amenities']
template_name_suffix = '_update_form'
# define success url:
def get_success_url(self):
room = self.object
hid = room.hotel_id_id
return f'/dashboard/{hid}'
class RoomDeleteView(PermissionRequiredMixin, DeleteView):
permission_required = ('ota_app.view_hotel', 'ota_app.delete_room')
model = Room
def get_success_url(self):
room = self.object
hotel_id = room.hotel_id_id
return reverse_lazy('dashboard', kwargs={'hid': hotel_id})
class RoomDetailsView(View):
def get(self, request, hid, rid):
room = Room.objects.get(id=rid)
ctx = {'room': room}
return render(request, 'ota_app/room_details.html', ctx)
class RateplanCreateView(PermissionRequiredMixin, View):
permission_required = ('ota_app.view_rateplan', 'ota_app.add_rateplan',)
model = Rateplan
fields = ['name']
def get(self, request, hid, rid):
form = AddRateplanForm()
return render(request, 'ota_app/rateplan_form.html', {'form': form})
def post(self, request, hid, rid):
form = AddRateplanForm(request.POST)
if form.is_valid():
name = form.cleaned_data['name']
price_1 = form.cleaned_data['price_1']
price_2 = form.cleaned_data['price_2']
room_obj = Room.objects.get(id=rid)
hotel_id = room_obj.hotel_id.id
new_rateplan = Rateplan(name=name, room_id=room_obj)
new_rateplan.save()
# create list of dates
date_today = datetime.date.today()
datelist = [date_today + datetime.timedelta(days=x) for x in range(365)] # number of initial dates here
# create default prices for new rateplan for the datelist range:
for date in datelist:
Price.objects.create(rateplan_id=new_rateplan, date=date, price_1=price_1,
price_2=price_2, availability=0)
return redirect('dashboard', hotel_id)
class RateplanUpdateView(PermissionRequiredMixin, UpdateView):
permission_required = ('ota_app.view_rateplan', 'ota_app.change_rateplan',)
model = Rateplan
fields = ['name']
template_name_suffix = '_update_form'
def get_success_url(self):
rateplan = self.object
hotel_id = rateplan.room_id.hotel_id_id
return reverse_lazy('dashboard', kwargs={'hid': hotel_id})
class RateplanDeleteView(PermissionRequiredMixin, DeleteView):
permission_required = ('ota_app.view_hotel', 'ota_app.delete_rateplan',)
model = Rateplan
def get_success_url(self):
rateplan = self.object
hotel_id = rateplan.room_id.hotel_id_id
return reverse_lazy('dashboard', kwargs={'hid': hotel_id})
class PriceCreateView(PermissionRequiredMixin, View): # price calendar
permission_required = ('ota_app.view_price', 'ota_app.add_price', 'ota_app.change_price')
def get(self, request, hid):
hotel = Hotel.objects.get(id=hid)
# # check if property belongs to logged in user:
if hotel.hotel_owner.user.username != request.user.username:
raise Exception('this property does not belong to you')
else:
# define start and end date for price list. if start date not provided in url, set it to today's date
if 'date' in request.GET:
start_date = request.GET.get('date')
start_date = datetime.datetime.strptime(start_date,
'%Y-%m-%d').date()
if start_date < datetime.date.today(): # changing past prices not allowed
start_date = datetime.date.today()
else:
start_date = datetime.date.today()
end_date = start_date + datetime.timedelta(days=14) # set number of calendar days here
prev_date = start_date - datetime.timedelta(days=14) # get previous date for navigation links
if prev_date < datetime.date.today(): # changing past prices not allowed
prev_date = datetime.date.today()
# create price form:
form = ""
for room in hotel.hotel_rooms.all():
form = form + f"<p><h3>{room.name}</h3></p>"
loop = 1
for rateplan in room.room_rateplans.all():
form = form + f"<p><strong>{rateplan.name}</strong></p>"
form = form + f"<table class='table' style='table-layout: fixed'><tr>"
for price in rateplan.rateplan_prices.filter(date__gte=start_date, date__lte=end_date):
form = form + "<td>"
if loop == 1:
form = form + f"{price.date.strftime('%a %d %b')}<br>"
form = form + f"<input type='hidden' value='{price.date}' name='dt-{price.id}'><br>"
form = form + f"<input type='number' value='{price.availability}' name='av-{price.id}'><br>"
form = form + f"<p></p>"
form = form + f"<input type='number' value='{price.price_1}' name='pr1-{price.id}'><br>"
form = form + f"<input type='number' value='{price.price_2}' name='pr2-{price.id}'><br>"
form = form + "</td>"
form = form + "</tr></table>"
loop += 1
ctx = {'hotel': hotel, 'start_date': start_date, 'end_date': end_date, 'prev_date': prev_date, 'form': form}
return render(request, 'ota_app/add_price.html', ctx)
def post(self, request, hid):
hotel = Hotel.objects.get(id=hid)
if 'date' in request.GET:
start_date = request.GET.get('date')
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d').date()
else:
start_date = datetime.datetime.today().date()
end_date = start_date + datetime.timedelta(days=14) # set number of calendar days here
# save dates from form:
for room in hotel.hotel_rooms.all():
loop = 1
for rateplan in room.room_rateplans.all():
for price in rateplan.rateplan_prices.filter(date__gte=start_date, date__lte=end_date):
rateplan_id = rateplan.id
date = price.date.strftime('%Y-%m-%d')
price_1 = request.POST.get(f"pr1-{price.id}")
price_2 = request.POST.get(f"pr2-{price.id}")
if loop == 1:
# first update availability for room prices on the same date:
availability = request.POST.get(f"av-{price.id}")
Price.objects.filter(rateplan_id__room_id=room.id).filter(date=date).update(
availability=availability)
Price.objects.filter(id=price.id).update(rateplan_id=rateplan_id,
price_1=price_1,
price_2=price_2)
else:
# then update other fields:
Price.objects.filter(id=price.id).update(rateplan_id=rateplan_id,
price_1=price_1,
price_2=price_2)
loop += 1
return redirect('create_price', hid)
class PriceUpdateView(PermissionRequiredMixin, View): # batch update
permission_required = ('ota_app.view_price', 'ota_app.add_price', 'ota_app.change_price')
def get(self, request, hid):
hotel = Hotel.objects.get(id=hid)
# # check if property belongs to logged in user:
if hotel.hotel_owner.user.username != request.user.username:
raise Exception('this property does not belong to you')
else:
rateplans = Rateplan.objects.filter(room_id__hotel_id_id=hid)
ctx = {'rateplans': rateplans, 'hotel': hotel}
return render(request, 'ota_app/price_update_form.html', ctx)
def post(self, request, hid):
rateplan_id = request.POST.get('rateplan_id')
room = Room.objects.get(room_rateplans__id=rateplan_id)
start_date = request.POST.get('date_start')
end_date = request.POST.get('date_end')
availability = request.POST.get('availability')
price_1 = request.POST.get('price_1')
price_2 = request.POST.get('price_2')
# convert dates to datetime objects:
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d').date()
end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d').date()
# create a list of dates:
date_list = [start_date + datetime.timedelta(days=x) for x in range((end_date - start_date).days + 1)]
for date in date_list:
# update availability for all rateplans:
if availability != "":
Price.objects.filter(rateplan_id__room_id=room.id).filter(date=date).update(
availability=availability)
# # update prices for chosen rateplan
price = Price.objects.filter(rateplan_id=rateplan_id).filter(date=date)
if price_1 != "":
price.update(price_1=price_1)
if price_2 != "":
price.update(price_2=price_2)
return redirect('create_price', hid)
class CreateUserView(FormView):
template_name = 'ota_app/hotelowner_form.html'
form_class = AddUserForm
success_url = '/login'
def form_valid(self, form):
user = User.objects.create_user(
username=form.cleaned_data['username'],
first_name=form.cleaned_data['first_name'],
last_name=form.cleaned_data['last_name'],
password=form.cleaned_data['password'],
email=form.cleaned_data['email']
)
user.save()
# assign to user guest_group permission
guest_group = Group.objects.get(name='guest_group')
guest_group.user_set.add(user)
return super().form_valid(form)
class LoginView(View):
def get(self, request):
form = LoginForm()
return render(request, "ota_app/login.html", {"form": form})
def post(self, request):
form = LoginForm(request.POST)
if form.is_valid():
username = request.POST.get('login')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user:
login(request, user)
return render(request, "ota_app/main.html")
else:
err = 'wrong username or password. try again'
return render(request, "ota_app/login.html", {"form": form, 'err': err})
else:
err = 'something went wrong'
return render(request, "ota_app/login.html", {"form": form, 'err': err})
class LogoutView(LoginRequiredMixin, View):
def get(self, request):
logout(request)
return redirect('main')
class ProfileView(PermissionRequiredMixin, View):
permission_required = 'ota_app.view_reservation'
def get(self, request):
user = request.user
# get list of user's reservations:
reservations = user.guest_reservations.all().order_by('-created')
ctx = {'reservations': reservations}
return render(request, 'ota_app/profile_view.html', ctx)
class MyHotelsView(PermissionRequiredMixin, View):
permission_required = 'ota_app.view_hotel'
def get(self, request):
user = request.user
# get list of owned hotels:
hotels_owned = user.hotel_owner.hotels_owned.all()
ctx = {'hotels_owned': hotels_owned}
return render(request, 'ota_app/my_hotels.html', ctx)
class ReservationDetailsView(PermissionRequiredMixin, View):
permission_required = 'ota_app.view_reservation'
def get(self, request, resid):
reservation = Reservation.objects.get(id=resid)
user = request.user
# check if reservation belongs to user:
if reservation in user.guest_reservations.all():
ctx = {'reservation': reservation}
return render(request, 'ota_app/reservation_details.html', ctx)
else:
raise Exception('This reservation does not belong to you')
def post(self, request, resid):
# change reservation status to 'cancelled'
reservation_obj = Reservation.objects.get(id=resid)
reservation_obj.status = 'CLX'
reservation_obj.save()
# if reservation cancelled return room availability for the booked dates:
reservation = Reservation.objects.get(id=resid)
room = reservation.room
arrival = reservation.arrival
departure = reservation.departure
availability_set = Price.objects.filter(rateplan_id__room_id=room, date__range=[arrival, departure])
for price in availability_set:
price_availability = price.availability
price.availability = price_availability + 1
price.save()
return redirect('profile')
class CreateReviewView(PermissionRequiredMixin, View):
permission_required = 'ota_app.view_reservation'
def get(self, request, hid):
form = AddReviewForm
ctx = {'form': form}
return render(request, 'ota_app/review_form.html', ctx)
def post(self, request, hid):
form = AddReviewForm(request.POST)
hotel = Hotel.objects.get(id=hid)
guest = request.user
if form.is_valid():
review = Review(
hotel=hotel,
guest=guest,
title=form.cleaned_data['title'],
text=form.cleaned_data['text'],
score_overall=form.cleaned_data['score_overall'],
score_location=form.cleaned_data['score_location'],
score_cleaning=form.cleaned_data['score_cleaning'],
score_service=form.cleaned_data['score_service']
)
review.save()
return redirect('main')
else:
return redirect(f'ota_app/add-review/{hotel.id}/')
class ReviewView(View):
def get(self, request, revid):
review = Review.objects.get(id=revid)
ctx = {'review': review}
return render(request, 'review_details.html', ctx)
class ReservationDetailsHotelView(PermissionRequiredMixin, View):
permission_required = 'ota_app.view_reservation'
def get(self, request, resid):
reservation = Reservation.objects.get(id=resid)
ctx = {'reservation': reservation}
return render(request, 'ota_app/reservation_details_hotel.html', ctx)
| [
"fiedorukpiotr@gmail.com"
] | fiedorukpiotr@gmail.com |
4faee0244fe112e63384a0da03c463c20421d8ee | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03359/s388821277.py | 34bcc557630d40378511ab48229fa37efb5ef00a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46 | py | a,b=map(int,input().split())
print(a-1+(b>=a)) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
13c0f70eec666fec2dc0c362557d02452697220e | d2c7a5ec3076ba688384e9e41d44f91b966337db | /classification/models.py | 79994eba235b806deb91f50b717cc649c17f82b9 | [
"MIT"
] | permissive | ShamCondor/cavia | 864cd471ec6c4643fe64b5a0baac0cf57a41e9fc | 4cf41f227444969cc45a44c70579c5e5959cd2de | refs/heads/master | 2020-06-06T10:52:39.417276 | 2019-06-14T14:36:28 | 2019-06-14T14:36:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,755 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
class CondConvNet(nn.Module):
def __init__(self,
num_classes,
num_filters,
max_pool,
num_context_params,
context_in,
num_film_hidden_layers,
imsize,
initialisation,
device
):
super(CondConvNet, self).__init__()
self.num_classes = num_classes
self.num_filters = num_filters
self.max_pool = max_pool
self.num_context_params = num_context_params
self.context_in = context_in
self.num_film_hidden_layers = num_film_hidden_layers
self.kernel_size = 3
# -- shared network --
stride = 1
padding = 1
self.num_channels = 3
# conv-layers
self.conv1 = nn.Conv2d(self.num_channels, self.num_filters, self.kernel_size, stride=stride,
padding=padding).to(device)
self.conv2 = nn.Conv2d(self.num_filters, self.num_filters, self.kernel_size, stride=stride, padding=padding).to(
device)
self.conv3 = nn.Conv2d(self.num_filters, self.num_filters, self.kernel_size, stride=stride, padding=padding).to(
device)
if not self.max_pool:
self.conv4 = nn.Conv2d(self.num_filters, self.num_filters, self.kernel_size, stride=stride).to(device)
else:
self.conv4 = nn.Conv2d(self.num_filters, self.num_filters, self.kernel_size, stride=stride,
padding=padding).to(device)
# not using this for now - need to implement using the mini-train-dataset statistics
# # batch norm
self.bn1 = nn.BatchNorm2d(self.num_filters, track_running_stats=False).to(device)
self.bn2 = nn.BatchNorm2d(self.num_filters, track_running_stats=False).to(device)
self.bn3 = nn.BatchNorm2d(self.num_filters, track_running_stats=False).to(device)
self.bn4 = nn.BatchNorm2d(self.num_filters, track_running_stats=False).to(device)
# initialise weights for the fully connected layer
if imsize == 84:
self.fc1 = nn.Linear(5 * 5 * self.num_filters + int(context_in[4]) * num_context_params, self.num_classes).to(device)
elif imsize == 28:
self.fc1 = nn.Linear(self.num_filters + int(context_in[4]) * num_context_params, self.num_classes).to(device)
else:
raise NotImplementedError('Cannot handle image size.')
# -- additions to enable context parameters at convolutional layers --
# for each layer where we have context parameters, initialise a FiLM layer
if self.context_in[0]:
self.film1 = nn.Linear(self.num_context_params, self.num_filters * 2).to(device)
if self.num_film_hidden_layers == 1:
self.film11 = nn.Linear(self.num_filters * 2, self.num_filters * 2).to(device)
if self.context_in[1]:
self.film2 = nn.Linear(self.num_context_params, self.num_filters * 2).to(device)
if self.num_film_hidden_layers == 1:
self.film22 = nn.Linear(self.num_filters * 2, self.num_filters * 2).to(device)
if self.context_in[2]:
self.film3 = nn.Linear(self.num_context_params, self.num_filters * 2).to(device)
if self.num_film_hidden_layers == 1:
self.film33 = nn.Linear(self.num_filters * 2, self.num_filters * 2).to(device)
if self.context_in[3]:
self.film4 = nn.Linear(self.num_context_params, self.num_filters * 2).to(device)
if self.num_film_hidden_layers == 1:
self.film44 = nn.Linear(self.num_filters * 2, self.num_filters * 2).to(device)
# parameter initialisation (if different than standard pytorch one)
if initialisation != 'standard':
self.init_params(initialisation)
# initialise context parameters
self.context_params = torch.zeros(size=[self.num_context_params], requires_grad=True).to(device)
def init_params(self, initialisation):
# convolutional weights
if initialisation == 'xavier':
torch.nn.init.xavier_uniform_(self.conv1.weight, gain=nn.init.calculate_gain('relu', self.conv1.weight))
torch.nn.init.xavier_uniform_(self.conv2.weight, gain=nn.init.calculate_gain('relu', self.conv2.weight))
torch.nn.init.xavier_uniform_(self.conv3.weight, gain=nn.init.calculate_gain('relu', self.conv3.weight))
torch.nn.init.xavier_uniform_(self.conv4.weight, gain=nn.init.calculate_gain('relu', self.conv4.weight))
elif initialisation == 'kaiming':
torch.nn.init.kaiming_uniform_(self.conv1.weight, nonlinearity='relu')
torch.nn.init.kaiming_uniform_(self.conv2.weight, nonlinearity='relu')
torch.nn.init.kaiming_uniform_(self.conv3.weight, nonlinearity='relu')
torch.nn.init.kaiming_uniform_(self.conv4.weight, nonlinearity='relu')
# convolutional bias
self.conv1.bias.data.fill_(0)
self.conv2.bias.data.fill_(0)
self.conv3.bias.data.fill_(0)
self.conv4.bias.data.fill_(0)
# fully connected weights at the end
if initialisation == 'xavier':
torch.nn.init.xavier_uniform_(self.fc1.weight, gain=nn.init.calculate_gain('linear', self.fc1.weight))
elif initialisation == 'kaiming':
torch.nn.init.kaiming_uniform_(self.fc1.weight, nonlinearity='linear')
# fully connected bias
self.fc1.bias.data.fill_(0)
# FiLM layer weights
if self.context_in[0] and initialisation == 'xavier':
torch.nn.init.xavier_uniform_(self.film1.weight, gain=nn.init.calculate_gain('linear', self.film1.weight))
elif self.context_in[0] and initialisation == 'kaiming':
torch.nn.init.kaiming_uniform_(self.film1.weight, nonlinearity='linear')
if self.context_in[1] and initialisation == 'xavier':
torch.nn.init.xavier_uniform_(self.film2.weight, gain=nn.init.calculate_gain('linear', self.film2.weight))
elif self.context_in[1] and initialisation == 'kaiming':
torch.nn.init.kaiming_uniform_(self.film2.weight, nonlinearity='linear')
if self.context_in[2] and initialisation == 'xavier':
torch.nn.init.xavier_uniform_(self.film3.weight, gain=nn.init.calculate_gain('linear', self.film3.weight))
elif self.context_in[2] and initialisation == 'kaiming':
torch.nn.init.kaiming_uniform_(self.film3.weight, nonlinearity='linear')
if self.context_in[3] and initialisation == 'xavier':
torch.nn.init.xavier_uniform_(self.film4.weight, gain=nn.init.calculate_gain('linear', self.film4.weight))
elif self.context_in[3] and initialisation == 'kaiming':
torch.nn.init.kaiming_uniform_(self.film4.weight, nonlinearity='linear')
def reset_context_params(self):
self.context_params = self.context_params.detach() * 0
self.context_params.requires_grad = True
def forward(self, x):
# pass through convolutional layer
h1 = self.conv1(x)
# batchnorm
h1 = self.bn1(h1)
# do max-pooling (for imagenet)
if self.max_pool:
h1 = F.max_pool2d(h1, kernel_size=2)
# if we have context parameters, adjust conv output using FiLM variables
if self.context_in[0]:
# FiLM it: forward through film layer to get scale and shift parameter
film1 = self.film1(self.context_params)
if self.num_film_hidden_layers == 1:
film1 = self.film11(F.relu(film1))
gamma1 = film1[:self.num_filters].view(1, -1, 1, 1)
beta1 = film1[self.num_filters:].view(1, -1, 1, 1)
# transform feature map
h1 = gamma1 * h1 + beta1
# pass through ReLu activation function
h1 = F.relu(h1)
h2 = self.conv2(h1)
h2 = self.bn2(h2)
if self.max_pool:
h2 = F.max_pool2d(h2, kernel_size=2)
if self.context_in[1]:
film2 = self.film2(self.context_params)
if self.num_film_hidden_layers == 1:
film2 = self.film22(F.relu(film2))
gamma2 = film2[:self.num_filters].view(1, -1, 1, 1)
beta2 = film2[self.num_filters:].view(1, -1, 1, 1)
h2 = gamma2 * h2 + beta2
h2 = F.relu(h2)
h3 = self.conv3(h2)
h3 = self.bn3(h3)
if self.max_pool:
h3 = F.max_pool2d(h3, kernel_size=2)
if self.context_in[2]:
film3 = self.film3(self.context_params)
if self.num_film_hidden_layers == 1:
film3 = self.film33(F.relu(film3))
gamma3 = film3[:self.num_filters].view(1, -1, 1, 1)
beta3 = film3[self.num_filters:].view(1, -1, 1, 1)
h3 = gamma3 * h3 + beta3
h3 = F.relu(h3)
h4 = self.conv4(h3)
h4 = self.bn4(h4)
if self.max_pool:
h4 = F.max_pool2d(h4, kernel_size=2)
if self.context_in[3]:
film4 = self.film4(self.context_params)
if self.num_film_hidden_layers == 1:
film4 = self.film44(F.relu(film4))
gamma4 = film4[:self.num_filters].view(1, -1, 1, 1)
beta4 = film4[self.num_filters:].view(1, -1, 1, 1)
h4 = gamma4 * h4 + beta4
h4 = F.relu(h4)
# flatten
h4 = h4.view(h4.size(0), -1)
if self.context_in[4]:
h4 = torch.cat((h4, self.context_params.expand(h4.size(0), -1)), dim=1)
y = self.fc1(h4)
return y
| [
"lmzintgraf@gmail.com"
] | lmzintgraf@gmail.com |
aec83169140213e426fd7202c172fee72c35bca0 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/explicitli.py | fb881c1eb0c559e45391aa3dfa66eb8fb9bc9545 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 508 | py | ii = [('RogePAV2.py', 1), ('AubePRP2.py', 1), ('FitzRNS3.py', 1), ('WilbRLW2.py', 1), ('SeniNSP.py', 1), ('AinsWRR3.py', 1), ('KiddJAE.py', 2), ('CoolWHM.py', 1), ('BuckWGM.py', 1), ('LyelCPG.py', 1), ('DaltJMA.py', 1), ('WestJIT2.py', 1), ('WadeJEB.py', 1), ('NewmJLP.py', 1), ('WheeJPT.py', 1), ('BentJRP.py', 2), ('StorJCC.py', 2), ('JacoWHI2.py', 1), ('SomeMMH.py', 1), ('MereHHB2.py', 1), ('ClarGE3.py', 1), ('MartHRW2.py', 1), ('EvarJSP.py', 5), ('DwigTHH.py', 4), ('ThomWEC.py', 1), ('BentJDO.py', 1)] | [
"prabhjyotsingh95@gmail.com"
] | prabhjyotsingh95@gmail.com |
03a31d223bcea3b4a2122df78349d20dd752d2bf | 3596c1091527f7e542e3081508c98bf4671f8e12 | /sequencer.py | 50787dce74c5027942f20c6606c5ae7f20e16b79 | [] | no_license | clacktronics/solar_py | 6980d976555d3b47576bf131d9bbd091572c2d31 | d52264bc962a12c4e74045833eacb6942cf1a2e7 | refs/heads/master | 2020-07-14T21:41:38.942832 | 2019-10-04T16:34:10 | 2019-10-04T16:34:10 | 205,409,262 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | class sequencer:
def __init__(self, txt):
self.txt = txt
open('file.txt','r') as fin:
lines = fin.readlines()
for line in lines:
date = file.open(txt)
| [
"ben@clacktronics.co.uk"
] | ben@clacktronics.co.uk |
e1891e2b37b160d413d9e6cdd9d0ab9da1cbc184 | ccea7e01fbcc755c48f39929d663e842c2826ec4 | /src/align_rgb_depth.py | d554b7b2cca21d9574debef99f68cd3cd2598dff | [] | no_license | jakubmuszynski/boris_vision | 7bdb5508b6af3d64c5fd6002255ce11d544c8e35 | 79e0ed8fb993578d536e6f59bf3a5f6869e65b7a | refs/heads/main | 2023-01-11T04:40:08.440882 | 2020-11-23T10:18:37 | 2020-11-23T10:18:37 | 311,644,733 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,524 | py | #!/usr/bin/python3.6
import pyrealsense2 as rs
import rospy
import numpy as np
from cv_bridge import CvBridge
from sensor_msgs.msg import Image, CameraInfo
# D435 pipeline
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
# Start streaming
pipeline.start(config)
# Align depth to color
align_to = rs.stream.color
align = rs.align(align_to)
# Node init and publisher definition
rospy.init_node('align_rgb_depth', anonymous = True)
pub_color = rospy.Publisher("rgb_image", Image, queue_size=2)
pub_align = rospy.Publisher("align_depth", Image, queue_size=2)
pub_camera_info = rospy.Publisher("camera_info", CameraInfo, queue_size=2)
rate = rospy.Rate(30) # 30hz
# get color camera data
profile = pipeline.get_active_profile()
color_profile = rs.video_stream_profile(profile.get_stream(rs.stream.color))
color_intrinsics = color_profile.get_intrinsics()
camera_info = CameraInfo()
camera_info.width = color_intrinsics.width
camera_info.height = color_intrinsics.height
camera_info.distortion_model = 'plumb_bob'
cx = color_intrinsics.ppx
cy = color_intrinsics.ppy
fx = color_intrinsics.fx
fy = color_intrinsics.fy
camera_info.K = [fx, 0, cx, 0, fy, cy, 0, 0, 1]
camera_info.D = [0, 0, 0, 0, 0]
camera_info.R = [1.0, 0, 0, 0, 1.0, 0, 0, 0, 1.0]
camera_info.P = [fx, 0, cx, 0, 0, fy, cy, 0, 0, 0, 1.0, 0]
bridge = CvBridge()
rospy.loginfo("align_rgb_depth is running")
while not rospy.is_shutdown():
# Get data from cameras
frames = pipeline.wait_for_frames()
color_frame = frames.get_color_frame()
timestamp_d = frames.get_timestamp()
# Publish color image
color_image = np.asanyarray(color_frame.get_data())
color_message = bridge.cv2_to_imgmsg(color_image, encoding="passthrough")
pub_color.publish(color_message)
# Publish camera info
pub_camera_info.publish(camera_info)
# Publish align dpth to color image
aligned_frames = align.process(frames)
aligned_depth_frame = aligned_frames.get_depth_frame()
align_depth = np.asanyarray(aligned_depth_frame.get_data())
align_message = bridge.cv2_to_imgmsg(align_depth, encoding="passthrough")
# timestamp set
t1 = (timestamp_d / 100000000)
t2 = (t1 - int(t1)) * 100000
time = rospy.Time(secs=int(t2), nsecs = int((t2 - int(t2))*100))
align_message.header.stamp = time
pub_align.publish(align_message)
rate.sleep()
# Stop streaming
pipeline.stop()
| [
"noreply@github.com"
] | jakubmuszynski.noreply@github.com |
123ef459e28f9eeaa190bf0a0361b13d1f6357ce | cfa1ede5e7df5f194f99f5f2c5e9b6cd7813e4d7 | /Trainer/views.py | 47b77f7e8e7b1f48d668e639354ea71bdddee32d | [] | no_license | Angella0/django_schoolproject | be86e31f4c33eed3eac8d189e91205739d511999 | f53eb7ffbf614349c6ae44adf2b1ebc803660556 | refs/heads/master | 2023-09-04T04:42:25.349038 | 2021-10-07T08:11:44 | 2021-10-07T08:11:44 | 380,053,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,318 | py | from django.shortcuts import render, redirect
from.models import Trainer
from .forms import TrainerRegistrationForm
from django.shortcuts import render
# Create your views here.
def register_trainer(request):
if request.method == "POST":
form = TrainerRegistrationForm(request.POST, request.FILES)
if form.is_valid():
form.save()
return redirect('trainer:trainer_list')
else:
print(form.errors)
else:
form = TrainerRegistrationForm()
return render(request, "register_trainer.html", {"form":form})
def trainer_list(request):
trainers = Trainer.objects.all()
return render(request,"trainer_list.html", {
"trainers":trainers
})
def trainer_profile(request, id):
trainer = Trainer.objects.get(id=id)
return render(request, "trainer_profile.html", {"trainer":trainer})
def trainer_edit(request, id):
trainer = Trainer.objects.get(id=id)
if request.method=="POST":
form = TrainerRegistrationForm(request.POST, instance=trainer)
if form.is_valid():
form.save()
return redirect("trainer_profile", id = trainer.id)
else:
form = TrainerRegistrationForm(instance=trainer)
return render(request, "trainer_edit.html", {"form":form})
| [
"angellasimbwa@gmail.com"
] | angellasimbwa@gmail.com |
c4217d64a9dda3f5f9a9bf5fd0e043fecd4ae026 | aa3dfdb72f07442f90f462a970474e03d30bcd83 | /accountapp/migrations/0001_initial.py | fb929948e6568c767dacb30d922ffd982ff843a8 | [] | no_license | LEEDOWON96/pinterest | 9efd47cee66efd3c506819d657e7062a22956bf6 | 83bf2f233955080389318059e96306d184c78209 | refs/heads/main | 2023-06-27T17:48:40.891791 | 2021-07-28T12:59:23 | 2021-07-28T12:59:23 | 389,906,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 493 | py | # Generated by Django 3.2.5 on 2021-07-23 14:25
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='HelloWorld',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.CharField(max_length=255)),
],
),
]
| [
"solar0t@naver.com"
] | solar0t@naver.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.