blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
aa4b45c03c37c440887f601fdd49a108f1479990
|
83c2cfd249e2e3c6fce223c9279e7d99e1596eda
|
/tf_gan.py
|
1631a07f29bd815bd6c822c169bd0e59f810ae1c
|
[
"Apache-2.0"
] |
permissive
|
hackthecrisis21/nist_differential_privacy_synthetic_data_challenge
|
ec5eff7102d65b3fc3406039eed80f587ef40062
|
09e93201e36e20f25bebd82cd68cd4c837789297
|
refs/heads/master
| 2022-02-16T19:31:44.739948
| 2019-05-30T00:55:32
| 2019-05-30T01:19:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,866
|
py
|
"""
Author: Moustafa Alzantot (malzantot@ucla.edu)
All rights reserved.
"""
import sys
import pdb
import math
import numpy as np
import data_utils
import pandas as pd
import json
import matplotlib.pyplot as plt
from sklearn.preprocessing import OneHotEncoder
import tensorflow as tf
from tensorflow.core.framework import summary_pb2
import time
from tensorflow.distributions import Bernoulli, Categorical
from differential_privacy.dp_sgd.dp_optimizer import dp_optimizer
from differential_privacy.dp_sgd.dp_optimizer import sanitizer
from differential_privacy.dp_sgd.dp_optimizer import utils
from differential_privacy.privacy_accountant.tf import accountant
flags = tf.app.flags
flags.DEFINE_string('input_file', 'input.csv', 'Input file')
flags.DEFINE_string('output_file', 'output.csv', 'output file')
flags.DEFINE_string('meta_file', 'metadata.json', 'metadata file')
flags.DEFINE_float('epsilon', 8.0, 'Target eps')
flags.DEFINE_float('delta', None, 'maximum delta')
# Training parameters
flags.DEFINE_integer('batch_size', 64, 'Batch size')
flags.DEFINE_float('lr', 1e-3, 'learning rate')
flags.DEFINE_integer('num_epochs', 20, 'Number of training epochs')
flags.DEFINE_integer(
'save_every', 1, 'Save training logs every how many epochs')
flags.DEFINE_float('weight_clip', 0.01, 'weight clipping value')
# Model parameters
flags.DEFINE_integer('z_size', 64, 'Size of input size')
flags.DEFINE_integer('hidden_dim', 1024, 'Size of hidden layer')
# Privacy parameters
flags.DEFINE_bool('with_privacy', False, 'Turn on/off differential privacy')
flags.DEFINE_float('gradient_l2norm_bound', 1.0, 'l2 norm clipping')
# Sampling and model restore
flags.DEFINE_integer('sampling_size', 100000, 'Number of examples to sample')
flags.DEFINE_string('checkpoint', None, 'Checkpoint to restore')
flags.DEFINE_bool('sample', False, 'Perform sampling')
flags.DEFINE_bool('dummy', False,
'If True, then test our model using dummy data ')
#########################################################################
# Utility functions for building the WGAN model
#########################################################################
def lrelu(x, alpha=0.01):
""" leaky relu activation function """
return tf.nn.leaky_relu(x, alpha)
def fully_connected(input_node, output_dim, activation=tf.nn.relu, scope='None'):
""" returns both the projection and output activation """
with tf.variable_scope(scope or 'FC'):
w = tf.get_variable('w', shape=[input_node.get_shape()[1], output_dim],
initializer=tf.truncated_normal_initializer(stddev=0.1))
b = tf.get_variable('b', shape=[output_dim],
initializer=tf.constant_initializer())
tf.summary.histogram('w', w)
tf.summary.histogram('b', b)
z = tf.matmul(input_node, w) + b
h = activation(z)
return z, h
def critic_f(input_node, hidden_dim):
""" Defines the critic model architecture """
z1, h1 = fully_connected(input_node, hidden_dim, lrelu, scope='fc1')
# z2, h2 = fully_connected(h1, hidden_dim, lrelu, scope='fc2')
z3, _ = fully_connected(h1, 1, tf.identity, scope='fc3')
return z3
def generator(input_node, hidden_dim, output_dim):
""" Defines the generator model architecture """
z1, h1 = fully_connected(input_node, hidden_dim, lrelu, scope='fc1')
# z2, h2 = fully_connected(h1, hidden_dim, lrelu, scope='fc2')
z3, _ = fully_connected(h1, output_dim, tf.identity, scope='fc3')
return z3
def nist_data_format(output, metadata, columns_list, col_maps):
""" Output layer format for generator data """
with tf.name_scope('nist_format'):
output_list = []
cur_idx = 0
for k in columns_list:
v = col_maps[k]
if isinstance(v, dict):
if len(v) == 2:
output_list.append(tf.nn.sigmoid(
output[:, cur_idx:cur_idx+1]))
cur_idx += 1
else:
output_list.append(
tf.nn.softmax(output[:, cur_idx: cur_idx+len(v)]))
cur_idx += len(v)
elif v == 'int':
output_list.append(output[:, cur_idx:cur_idx+1])
cur_idx += 1
elif v == 'int_v':
output_list.append(tf.nn.sigmoid(output[:, cur_idx:cur_idx+1]))
output_list.append(output[:, cur_idx+1:cur_idx+2])
cur_idx += 2
elif v == 'void':
pass
else:
raise Exception('ivnalid mapping for col {}'.format(k))
return tf.concat(output_list, axis=1)
def nist_sampling_format(output, metadata, columns_list, col_maps):
"""
Output layer format for generator data plus performing random sampling
from the output softmax and bernoulli distributions.
"""
with tf.name_scope('nist_sampling_format'):
output_list = []
cur_idx = 0
for k in columns_list:
v = col_maps[k]
if isinstance(v, dict):
if len(v) == 2:
output_list.append(
tf.cast(
tf.expand_dims(
Bernoulli(logits=output[:, cur_idx]).sample(), axis=1), tf.float32)
)
cur_idx += 1
else:
output_list.append(
tf.cast(tf.expand_dims(
Categorical(logits=output[:, cur_idx: cur_idx+len(v)]).sample(), axis=1), tf.float32))
cur_idx += len(v)
elif v == 'int':
output_list.append(
tf.nn.relu(output[:, cur_idx:cur_idx+1]))
cur_idx += 1
elif v == 'int_v':
output_list.append(tf.nn.sigmoid(output[:, cur_idx:cur_idx+1]))
output_list.append(tf.nn.relu(output[:, cur_idx+1:cur_idx+2]))
cur_idx += 2
elif v == 'void':
pass
return tf.concat(output_list, axis=1)
def sample_dataset(sess, sampling_output, output_fname, columns_list, sampling_size):
""" Performs sampling to output synthetic data from the generative model.
Saves the result to output_fname file.
"""
sampling_result = []
num_samples = 0
while num_samples < sampling_size:
batch_samples = sess.run(sampling_output)
num_samples += batch_samples.shape[0]
sampling_result.append(batch_samples)
sampling_result = np.concatenate(sampling_result, axis=0)
print(sampling_result.shape)
final_df = data_utils.postprocess_data(
sampling_result, metadata, col_maps, columns_list, greedy=False)
print(final_df.shape)
final_df = pd.DataFrame(
data=final_df, columns=original_df.columns, index=None)
final_df.to_csv(output_fname, index=False)
if __name__ == '__main__':
FLAGS = flags.FLAGS
# Reading input data
original_df, input_data, metadata, col_maps, columns_list = data_utils.preprocess_nist_data(
FLAGS.input_file, FLAGS.meta_file, subsample=False)
input_data = input_data.values # .astype(np.float32)
data_dim = input_data.shape[1]
format_fun = nist_data_format
num_examples = input_data.shape[0]
print('** Reading input ** ')
print('-- Read {} rows, {} columns ----'.format(num_examples, data_dim))
batch_size = FLAGS.batch_size
print('Batch size = ', batch_size)
num_batches = math.ceil(num_examples / batch_size)
T = FLAGS.num_epochs * num_batches
q = float(FLAGS.batch_size) / num_examples
max_eps = FLAGS.epsilon
if FLAGS.delta is None:
max_delta = 1.0 / (num_examples**2)
else:
max_delta = FLAGS.delta
print('Privacy budget = ({}, {})'.format(max_eps, max_delta))
# Decide which accountanint_v to use
use_moments_accountant = max_eps > 0.7
if use_moments_accountant:
if max_eps > 5.0:
sigma = 1.0
else:
sigma = 3.0
eps_per_step = None # unused for moments accountant
delta_per_step = None # unused for moments accountant
print('Using moments accountant (\sigma = {})'.format(sigma))
else:
sigma = None # unused for amortized accountant
# bound of eps_per_step from lemma 2.3 in https://arxiv.org/pdf/1405.7085v2.pdf
eps_per_step = max_eps / (q * math.sqrt(2 * T * math.log(1/max_delta)))
delta_per_step = max_delta / (T * q)
print('Using amortized accountant (\eps, \delta)-per step = ({},{})'.format(
eps_per_step, delta_per_step))
with tf.name_scope('inputs'):
x_holder = tf.placeholder(tf.float32, [None, data_dim], 'x')
z_holder = tf.random_normal(shape=[FLAGS.batch_size, FLAGS.z_size],
dtype=tf.float32, name='z')
sampling_noise = tf.random_normal([FLAGS.batch_size, FLAGS.z_size],
dtype=tf.float32, name='sample_z')
eps_holder = tf.placeholder(tf.float32, [], 'eps')
delta_holder = tf.placeholder(tf.float32, [], 'delta')
print("Data Dimention: ", data_dim)
print("X Holder: ", x_holder)
print("Z Holder: ", z_holder)
with tf.variable_scope('generator') as scope:
gen_output = generator(z_holder, FLAGS.hidden_dim, data_dim)
print(gen_output)
gen_output = format_fun(gen_output, metadata, columns_list, col_maps)
print(gen_output)
scope.reuse_variables()
sampling_output = generator(sampling_noise, FLAGS.hidden_dim, data_dim)
sampling_output = nist_sampling_format(
sampling_output, metadata, columns_list, col_maps)
print(sampling_output)
with tf.variable_scope('critic') as scope:
critic_real = critic_f(x_holder, FLAGS.hidden_dim)
scope.reuse_variables()
critic_fake = critic_f(gen_output, FLAGS.hidden_dim)
with tf.name_scope('train'):
global_step = tf.Variable(
0, dtype=tf.int32, trainable=False, name='global_step')
loss_critic_real = - tf.reduce_mean(critic_real)
loss_critic_fake = tf.reduce_mean(critic_fake)
loss_critic = loss_critic_real + loss_critic_fake
critic_vars = [x for x in tf.trainable_variables()
if x.name.startswith('critic')]
if FLAGS.with_privacy:
# assert FLAGS.sigma > 0, 'Sigma has to be positive when with_privacy=True'
with tf.name_scope('privacy_accountant'):
if use_moments_accountant:
# Moments accountant introduced in (https://arxiv.org/abs/1607.00133)
# we use same implementation of
# https://github.com/tensorflow/models/blob/master/research/differential_privacy/privacy_accountant/tf/accountant.py
priv_accountant = accountant.GaussianMomentsAccountant(
num_examples)
else:
# AmortizedAccountant which tracks the privacy spending in the amortized way.
# It uses privacy amplication via sampling to compute the privacyspending for each
# batch and strong composition (specialized for Gaussian noise) for
# accumulate the privacy spending (http://arxiv.org/pdf/1405.7085v2.pdf)
# we use the implementation of
# https://github.com/tensorflow/models/blob/master/research/differential_privacy/privacy_accountant/tf/accountant.py
priv_accountant = accountant.AmortizedAccountant(
num_examples)
# per-example Gradient l_2 norm bound.
example_gradient_l2norm_bound = FLAGS.gradient_l2norm_bound / FLAGS.batch_size
# Gaussian sanitizer, will enforce differential privacy by clipping the gradient-per-example.
# Add gaussian noise, and sum the noisy gradients at each weight update step.
# It will also notify the privacy accountant to update the privacy spending.
gaussian_sanitizer = sanitizer.AmortizedGaussianSanitizer(
priv_accountant,
[example_gradient_l2norm_bound, True])
critic_step = dp_optimizer.DPGradientDescentOptimizer(
FLAGS.lr,
# (eps, delta) unused parameters for the moments accountant which we are using
[eps_holder, delta_holder],
gaussian_sanitizer,
sigma=sigma,
batches_per_lot=1,
var_list=critic_vars).minimize((loss_critic_real, loss_critic_fake),
global_step=global_step, var_list=critic_vars)
else:
# This is used when we train without privacy.
critic_step = tf.train.RMSPropOptimizer(FLAGS.lr).minimize(
loss_critic, var_list=critic_vars)
# Weight clipping to ensure the critic function is K-Lipschitz as required
# for WGAN training.
clip_c = [tf.assign(var, tf.clip_by_value(
var, -FLAGS.weight_clip, FLAGS.weight_clip)) for var in critic_vars]
with tf.control_dependencies([critic_step]):
critic_step = tf.tuple(clip_c)
# Traing step of generator
generator_vars = [x for x in tf.trainable_variables()
if x.name.startswith('generator')]
loss_generator = -tf.reduce_mean(critic_fake)
generator_step = tf.train.RMSPropOptimizer(FLAGS.lr).minimize(
loss_generator, var_list=generator_vars)
weight_summaries = tf.summary.merge_all()
tb_c_op = tf.summary.scalar('critic_loss', loss_critic)
tb_g_op = tf.summary.scalar('generator_loss', loss_generator)
final_eps = 0.0
final_delta = 0.0
critic_iters = 10
with tf.Session() as sess:
summary_writer = tf.summary.FileWriter('./logs', sess.graph)
summary_writer.flush()
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
if FLAGS.checkpoint:
# Load the model
saver.restore(sess, FLAGS.checkpoint)
if FLAGS.sample:
sample_dataset(sess, sampling_output,
FLAGS.output_file, columns_list, FLAGS.sampling_size)
assert FLAGS.checkpoint is not None, "You must provide a checkpoint."
sys.exit(0)
abort_early = False # Flag that will be changed to True if we exceed the privacy budget
for e in range(FLAGS.num_epochs):
if abort_early:
break
# One epoch is one full pass over the whole training data
start_time = time.time()
# Randomly shuffle the data at the beginning of each epoch
rand_idxs = np.arange(num_examples)
np.random.shuffle(rand_idxs)
idx = 0
abort_early = False
while idx < num_batches and not abort_early:
if idx % 10 == 0:
sys.stdout.write('\r{}/{}'.format(idx, num_batches))
sys.stdout.flush()
critic_i = 0
while critic_i < critic_iters and idx < num_batches and not abort_early:
# Train the critic.
batch_idxs = rand_idxs[idx*batch_size: (idx+1)*batch_size]
batch_xs = input_data[batch_idxs, :]
feed_dict = {x_holder: batch_xs,
eps_holder: eps_per_step,
delta_holder: delta_per_step
}
_, tb_c = sess.run(
[critic_step, tb_c_op], feed_dict=feed_dict)
critic_i += 1
idx += 1
if FLAGS.with_privacy:
if use_moments_accountant:
spent_eps_deltas = priv_accountant.get_privacy_spent(
sess, target_deltas=[max_delta])[0]
else:
spent_eps_deltas = priv_accountant.get_privacy_spent(
sess, target_eps=None)[0]
# Check whether we exceed the privacy budget
if (spent_eps_deltas.spent_delta > max_delta or
spent_eps_deltas.spent_eps > max_eps):
abort_early = True
print(
"\n*** Discriminator training exceeded privacy budget, aborting the training of generator ****")
else:
final_eps = spent_eps_deltas.spent_eps
final_delta = spent_eps_deltas.spent_delta
else:
# Training without privacy
spent_eps_deltas = accountant.EpsDelta(np.inf, 1)
# Train the generator
if not abort_early:
# Check for abort_early because we stop updating the generator
# once we exceeded privacy budget.
privacy_summary = summary_pb2.Summary(value=[
summary_pb2.Summary.Value(tag='eps',
simple_value=final_eps)])
summary_writer.add_summary(privacy_summary, e)
_, tb_g = sess.run([generator_step, tb_g_op])
if e % FLAGS.save_every == 0 or (e == FLAGS.num_epochs-1):
summary_writer.add_summary(tb_g, e)
end_time = time.time()
if (e % FLAGS.save_every == 0) or (e == FLAGS.num_epochs-1) or abort_early:
summary_writer.add_summary(tb_c, e)
weight_summary_out = sess.run(
weight_summaries, feed_dict=feed_dict)
summary_writer.add_summary(weight_summary_out, e)
print('\nEpoch {} took {} seconds. Privacy = ({}, {}).'.format(
e, (end_time-start_time), spent_eps_deltas.spent_eps, spent_eps_deltas.spent_delta))
summary_writer.flush()
if FLAGS.with_privacy:
print('\nTotal (\eps, \delta) privacy loss spent in training = ({}, {})'.format(
final_eps, final_delta))
summary_writer.close()
# Sample synthetic data from the model after training is done.
sample_dataset(sess, sampling_output,
FLAGS.output_file, columns_list, FLAGS.sampling_size)
|
[
"malzantot@ucla.edu"
] |
malzantot@ucla.edu
|
c269d574e4600120c31cf809271f26f9fb1139e2
|
291cdc8de042c272fea74a10169cc3e7b3518dac
|
/console.py
|
0893b1d8f93dfec70c89a432ae6aa18b8f4d27dd
|
[] |
no_license
|
sagarmsraozil/Python-Console
|
64007cba60b5a74b3a8f18fb9feb592e652e50e5
|
82db8833d558b1ed09456fe4f885dc180da5df95
|
refs/heads/master
| 2023-01-08T23:11:36.378184
| 2020-11-06T06:49:32
| 2020-11-06T06:49:32
| 310,515,240
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 125
|
py
|
import random
a={
'sdas':[],
'sdasd':[],
'sdads':[]
}
keyA=[]
for x in a.keys():
keyA.append(x)
print(keyA)
|
[
"sagarcrcoc@gmail.com"
] |
sagarcrcoc@gmail.com
|
91ad79fe802f8441997c7574f787866330f8fdaf
|
7a0334693cd31fe4fdef06324ede0d72c6530c40
|
/event_crud/migrations/0001_initial.py
|
414c9f942def602edac8017b35088131a4404944
|
[] |
no_license
|
lilitotaryan/eventnet-back-end
|
7949668a4108b36a6e1a2f6439d6e966991d64ba
|
5828b1520b8feeb363fdac0b85b08e001572991e
|
refs/heads/main
| 2023-02-18T02:24:45.475978
| 2021-01-22T18:15:42
| 2021-01-22T18:15:42
| 332,027,025
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,402
|
py
|
# Generated by Django 2.2.5 on 2020-02-26 19:01
import authentication.utils
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('user_crud', '0005_remove_customuser_phone_number'),
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(default=None, max_length=100)),
('description', models.CharField(default=None, max_length=500)),
('date', models.DateTimeField(default=authentication.utils.get_current_time)),
('is_responsible', models.BooleanField(default=False)),
('contact_phone_number', models.CharField(default=None, max_length=100, unique=True)),
('address', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, to='user_crud.Address')),
('categories', models.ManyToManyField(blank=True, to='user_crud.Category')),
('users', models.ManyToManyField(blank=True, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"lilit_otaryan@edu.aua.am"
] |
lilit_otaryan@edu.aua.am
|
adbedc8206330810c70cdc570b3140a5f632e51e
|
f7e5a206c5ca75043b662c8f9905a070b7e37060
|
/cart/views.py
|
54f1f59dc1f21f1a4b7c6b04e842911c7f358e15
|
[] |
no_license
|
sweetmentor/Str4-eCommerce-App
|
4d22945f7b5dc0a40b577b8ed45caf22c9e644d4
|
e50edff9183a207c8e7daff16136059fcb5f9002
|
refs/heads/master
| 2020-03-22T00:26:36.973580
| 2019-01-29T01:13:56
| 2019-01-29T01:13:56
| 139,244,613
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,193
|
py
|
from django.shortcuts import render, redirect, get_object_or_404, HttpResponse
from product.models import Product
from .utils import get_cart_items_and_total
# Create your views here.
def view_cart(request):
cart = request.session.get('cart', {})
context = get_cart_items_and_total(cart)
return render(request, "cart/cart.html", context)
def remove_from_cart(request):
id = request.POST['product_id']
product = get_object_or_404(Product, pk=id)
cart = request.session.get('cart', {})
if id in cart:
# Subtract 1 from the quantity
cart[id] -= 1
# If the quantity is now 0, then delete the item
if cart[id] == 0:
del cart[id]
request.session['cart'] = cart
return redirect('view_cart')
def add_to_cart(request):
# Get the product we're adding
id = request.POST['product_id']
product = get_object_or_404(Product, pk=id)
# Get the current Cart
cart = request.session.get('cart', {})
# Update the Cart
cart[id] = cart.get(id, 0) + 1
# Save the Cart back to the session
request.session['cart'] = cart
# Redirect somewhere
return redirect("/")
|
[
"stephenafolabi@gmail.com"
] |
stephenafolabi@gmail.com
|
af5f397e8cb4517e8407a5909c3e2701dcbc7068
|
7dfe9d88e81113c18f3feb11d1dd308483f5949d
|
/run_faces.py
|
5b96539f519fbb4a600dbb2a114840deccb7d2f6
|
[] |
no_license
|
mahavir0/FACE-RECOGNITION-TENSORFLOW-ATTENDENCE
|
63b092c86a4f6bd8a15112ac78984663db0d8fc8
|
730880b982368e799d5e06329b776ee824c06c2b
|
refs/heads/master
| 2020-07-26T08:09:05.374261
| 2019-09-15T12:08:22
| 2019-09-15T12:08:22
| 208,585,446
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,009
|
py
|
import find_faces
import cv2
import sys
import sqlite3
import csv
import json
import pandas as pd
from sklearn import svm
from sklearn.externals import joblib
from PIL import Image,ImageDraw,ImageFont
import time
from identify_face_video import face_video
from identify_face_image import face_image
from sql import *
def convert_date(string):
if string.find("-")!=-1:
temp = string.split('-')
x = str(temp[2])+"_"+str(temp[1])+"_"+str(temp[0])
return x
return string
def take_attendence(date, filename, courseId):
#Call function from identify_face_video.py
found_ids = face_video(filename)
print("i found array: ",found_ids)
print(date)
print(found_ids)
processAttendance(date, courseId, found_ids)
return found_ids
def take_attendence_image(date, filename, courseId):
#Call function from identify_face_image.py
found_ids = face_image(filename)
print("i found array: ",found_ids)
print(date)
print(found_ids)
processAttendance(date, courseId, found_ids)
return found_ids
|
[
"170010116034@adit.ac.in"
] |
170010116034@adit.ac.in
|
b5694c0362e026890305fda0e8d8abc8c2aa85ab
|
f96318ff6cd12c23643a31c5385480535cc6e9cb
|
/307. Range Sum Query - Mutable/solution.py
|
aaf62ee58362e2dd5dbd82ba6305ec8b495c7845
|
[] |
no_license
|
yz89122/leetcode
|
157156c463078fc2aeb98832a6c00f042cf9c7c4
|
95b03a7f5108dc839452361e1428649c382b8908
|
refs/heads/master
| 2022-03-28T12:46:28.565963
| 2019-12-03T14:24:11
| 2019-12-03T14:24:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,259
|
py
|
class NumArray(object):
def __init__(self, nums):
"""
:type nums: List[int]
"""
self.n = len(nums)
self.tree = [0]*self.n + nums
for i in range(self.n-1, 0, -1):
self.tree[i] = self.tree[i*2] + self.tree[i*2+1]
def update(self, i, val):
"""
:type i: int
:type val: int
:rtype: None
"""
i += self.n
self.tree[i] = val
while i > 0:
left, right = (i-1, i) if i&1 else (i, i+1)
self.tree[i//2] = self.tree[left] + self.tree[right]
i //= 2
def sumRange(self, i, j):
"""
:type i: int
:type j: int
:rtype: int
"""
i, j, s = i+self.n, j+self.n, 0
print('aaa')
while i <= j:
if i&1:
print('i', self.tree[i])
s += self.tree[i]
i += 1
if j&1 == 0:
print('j', self.tree[j])
s += self.tree[j]
j -= 1
i //= 2
j //= 2
return s
# Your NumArray object will be instantiated and called as such:
# obj = NumArray(nums)
# obj.update(i,val)
# param_2 = obj.sumRange(i,j)
|
[
"yanzhen610@gmail.com"
] |
yanzhen610@gmail.com
|
ac60830bcb8ab8d05d3b4a995a1b9e7f2e93a2fa
|
2f2e9cd97d65751757ae0a92e8bb882f3cbc5b5b
|
/665.非递减数列.py
|
69ccfdcba89fb3679104b70233a147b4b5ee3c0d
|
[] |
no_license
|
mqinbin/python_leetcode
|
77f0a75eb29f8d2f9a789958e0120a7df4d0d0d3
|
73e0c81867f38fdf4051d8f58d0d3dc245be081e
|
refs/heads/main
| 2023-03-10T18:27:36.421262
| 2021-02-25T07:24:10
| 2021-02-25T07:24:10
| 314,410,703
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 594
|
py
|
#
# @lc app=leetcode.cn id=665 lang=python3
#
# [665] 非递减数列
#
# @lc code=start
class Solution:
def checkPossibility(self, nums: List[int]) -> bool:
c = 0
for i in range(len(nums) -1):
if nums[i] > nums[i+1]:
c +=1
if i > 0 :
if nums[i-1] <= nums[i+1]:
nums[i] = nums[i-1]
else :
nums[i+1] = nums[i]
if c > 1:
return False
return True
# @lc code=end
|
[
"mqinbin@gmail.com"
] |
mqinbin@gmail.com
|
ad240cbc0ee17f741fac4ba96dd63d341d91ab8b
|
f262085a295c6a12b5f4f86b1a36d92e2e254b3d
|
/simulator/lvgl/LVGL.Simulator/lv_examples/src/lv_ex_widgets/lv_ex_dropdown/lv_ex_dropdown_2.py
|
a85d54598d725021dbf1f2d7204ea754294e064d
|
[
"MIT"
] |
permissive
|
fxsheep/8910DM_LVGL
|
a7419a0ec4ff8b0612200b0073759c7beb7fad5e
|
652a91d7ebda374be58fbb927714263234a78698
|
refs/heads/master
| 2023-07-09T16:37:09.962057
| 2021-08-11T07:18:47
| 2021-08-11T07:18:47
| 391,400,407
| 2
| 0
| null | 2021-07-31T15:55:06
| 2021-07-31T15:55:06
| null |
UTF-8
|
Python
| false
| false
| 642
|
py
|
# Create a drop down list
ddlist = lv.dropdown(lv.scr_act())
ddlist.set_options("\n".join([
"Apple",
"Banana",
"Orange",
"Melon",
"Grape",
"Raspberry"]))
ddlist.set_dir(lv.dropdown.DIR.LEFT);
ddlist.set_symbol(None)
ddlist.set_show_selected(False)
ddlist.set_text("Fruits")
# It will be called automatically when the size changes
ddlist.align(None, lv.ALIGN.IN_TOP_RIGHT, 0, 20)
# Copy the drop LEFT list
ddlist = lv.dropdown(lv.scr_act(), ddlist)
ddlist.align(None, lv.ALIGN.IN_TOP_RIGHT, 0, 100)
|
[
"864876693@qq.com"
] |
864876693@qq.com
|
6126ef9b64f70bf1488569093bd468d09d33d3ec
|
7f338db1b0355937ceeff4084d10a99e2febf8d9
|
/Rotating bars/Rotating bars.py
|
e34427582d5d407736cb9cf9f2846ec541318810
|
[] |
no_license
|
Artengar/Drawbot
|
d98c4cb501f940dd039de47a23a92a2ad148484d
|
fc65a63e197ad7597cb1bd32d0ae703ee71557be
|
refs/heads/master
| 2021-06-10T10:41:22.901281
| 2020-04-09T13:58:02
| 2020-04-09T13:58:02
| 108,984,281
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,141
|
py
|
#----
#Maarten Renckens's Snares animation
#Python script for Drawbot
#For questions, please contact maarten.renckens@artengar.com
#----
import math
#from fractions import Fraction
#Good explanation about sine, cosine and tangent: https://www.youtube.com/watch?v=VBygj7p0HKc
#Input that can be changed:
pageWidth=1000
pageHeight=1000
desiredAmountOfFrames=36
desiredAmountOfVisibleBars=20#With an amount to high, colors will not work well.
halfStrokeLength=600
#fixed variables;
if desiredAmountOfVisibleBars==0:
desiredAmountOfVisibleBars=1
stepsInDegrees=360/desiredAmountOfFrames
frameNumber=0
barNumber=0
#frameDuration(1/60)
while frameNumber < desiredAmountOfFrames:
newPage(pageWidth,pageHeight)
translate(pageWidth/2,pageHeight/2)
fill(1)
rect(-pageWidth/2, -pageHeight/2, width(), height())
#calculate corners and distances
#the first bar is straight, but the possible other bars are not.
thisStepInDegrees=stepsInDegrees*frameNumber-(desiredAmountOfVisibleBars-1)*stepsInDegrees
print("degree:", thisStepInDegrees)
#Draw all bars
while barNumber < desiredAmountOfVisibleBars:
inBetweenStep=thisStepInDegrees+barNumber*stepsInDegrees
if inBetweenStep != 90:
xDistance = (math.tan(math.radians(inBetweenStep)))
else:
xDistance = -halfStrokeLength
print(barNumber, inBetweenStep)
#THERE IS A MISTAKE IN CALCULATING THE LINE LENGTH.
#WHEN HALFSTROKELENGTH IS SMALL, THIS IS VISIBLE…
yDistance = (math.sin(math.radians(inBetweenStep)*xDistance))
#Draw the line
visibility=1-((barNumber+1)*(1/desiredAmountOfVisibleBars))
print(visibility)
stroke(visibility, visibility, visibility)
strokeWidth(125)
#deterime the angle of the bar
line((xDistance*halfStrokeLength,-halfStrokeLength), (-xDistance*halfStrokeLength,halfStrokeLength))
#finish the loop
barNumber+=1
#finish the loop
barNumber=0
inBetweenStep=0
frameNumber+=1
#saveImage("~/Github/Drawbot/Rotating_bars.gif")
saveImage("~/Desktop/Rotating_bars.gif")
|
[
"contact@artengar.com"
] |
contact@artengar.com
|
27d214b5b033cb21e812b5568854396b459d8ab9
|
bdd40ea113fdf2f04ef7d61a096a575322928d1d
|
/Rupesh/DjangoTutorial/TOdo/TOdo/Task/migrations/0002_auto_20200219_0600.py
|
56b743b8b63b2342fd7f88303c0256f187fcae5f
|
[] |
no_license
|
rupesh7399/rupesh
|
3eebf924d33790c29636ad59433e10444b74bc2f
|
9b746acf37ab357c147cdada1de5458c5fc64f53
|
refs/heads/master
| 2020-12-22T05:01:29.176696
| 2020-03-03T10:32:36
| 2020-03-03T10:32:36
| 202,111,967
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 357
|
py
|
# Generated by Django 2.2 on 2020-02-19 06:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Task', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='task',
name='lastDate',
field=models.DateField(),
),
]
|
[
"rupesh7399@gmail.com"
] |
rupesh7399@gmail.com
|
93218602b7a30997b8ff0defd0e336e8bd93427c
|
07acf11fadb7fbbf342e1f5005c9197d36b79f56
|
/aliyun-python-sdk-vod/aliyunsdkvod/request/v20170321/DescribeUserAvgTimeByDayRequest.py
|
9a798b69525f04e4ccee97970a5bb855ed2ec093
|
[
"Apache-2.0"
] |
permissive
|
ccflying/aliyun-openapi-python-sdk
|
9ce8d43a39b8fa10a78fdf4f4831befbfc48ad4e
|
2ddb938c366c06526deeec0ec46f8266235e52f6
|
refs/heads/master
| 2020-04-19T09:18:02.597601
| 2019-01-29T05:02:01
| 2019-01-29T05:02:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,822
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DescribeUserAvgTimeByDayRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'vod', '2017-03-21', 'DescribeUserAvgTimeByDay','vod')
def get_VideoType(self):
return self.get_query_params().get('VideoType')
def set_VideoType(self,VideoType):
self.add_query_param('VideoType',VideoType)
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
def get_EndTime(self):
return self.get_query_params().get('EndTime')
def set_EndTime(self,EndTime):
self.add_query_param('EndTime',EndTime)
def get_StartTime(self):
return self.get_query_params().get('StartTime')
def set_StartTime(self,StartTime):
self.add_query_param('StartTime',StartTime)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
|
[
"haowei.yao@alibaba-inc.com"
] |
haowei.yao@alibaba-inc.com
|
fa4f6aaa05c0415156383cf731bbdd6dd0514bdc
|
56403ba98efb1e961e318b2a24e9fe7ee8ca77db
|
/Lehigh_race_Ver_3_1/scripts/Lehigh_Ver_3.py
|
a90a3f89b146381c1f374d8a6ed1705aa3402ea2
|
[] |
no_license
|
Xiyuan-Zhu/Lehigh_race_Ver_3_0_0
|
013e2d47c6d7c59897f02964c5d840f6b88f5084
|
626d7344ff7d5fb7d8b99de0682e698fc0d94d0d
|
refs/heads/master
| 2022-11-20T08:16:23.867347
| 2020-07-10T01:47:46
| 2020-07-10T01:47:46
| 278,510,614
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,199
|
py
|
#!/usr/bin/env python
from __future__ import print_function
import sys
import math
import numpy as np
#ROS Imports
import rospy
from sensor_msgs.msg import Image, LaserScan
from ackermann_msgs.msg import AckermannDriveStamped, AckermannDrive
class reactive_follow_gap:
def __init__(self):
#Topics & Subscriptions,Publishers
lidarscan_topic = '/scan'
drive_topic = '/drive'
self.lidar_sub = rospy.Subscriber(lidarscan_topic, LaserScan, self.lidar_callback)
self.drive_pub = rospy.Publisher(drive_topic, AckermannDriveStamped, queue_size=10)
#self.show_pub = rospy.Publisher(show_topic, LaserScan, queue_size=10)
def preprocess_lidar(self, ranges):
""" Preprocess the LiDAR scan array. Expert implementation includes:
1.Setting each value to the mean over some window
2.Rejecting high values (eg. > 3m)
"""
proc_ranges = [x for x in ranges]
return proc_ranges
def find_max_gap(self, free_space_ranges):
""" Return the start index & end index of the max gap in free_space_ranges
"""
return None
def find_best_point(self, start_i, end_i, ranges):
"""Start_i & end_i are start and end indicies of max-gap range, respectively
Return index of best point in ranges
Naive: Choose the furthest point within ranges and go there
"""
depth=0
best_i=0
for i in range(len(ranges)/4,len(ranges)*3/4):
if ranges[i]>depth:
depth=ranges[i]
best_i=i
#print(best_i)
if min(ranges)<0.3:
best_i=int(len(ranges)/2)
return best_i
def lidar_callback(self, data):
""" Process each LiDAR scan as per the Follow Gap algorithm & publish an AckermannDriveStamped Message
"""
ranges = data.ranges
proc_ranges = self.preprocess_lidar(ranges)
#Find disparity and liminate all points inside 'bubble' (set them to cloesr value)
dis_threshold=1
bubble_size=0.5
i = len(proc_ranges) / 4 # right
while i < len(proc_ranges) * 3/4:
if proc_ranges[i+1]-proc_ranges[i] > dis_threshold:
size=int(bubble_size/(proc_ranges[i]*data.angle_increment))
for j in range(i+1,i+size):
proc_ranges[j]=proc_ranges[i]
i=i+size
elif proc_ranges[i]-proc_ranges[i+1]>dis_threshold:
size=int(bubble_size/(proc_ranges[i+1]*data.angle_increment))
for j in range(i-size,i+1):
proc_ranges[j]=proc_ranges[i+1]
i=i+1
#Find the best point in the gap
best_i = self.find_best_point(0, 0, proc_ranges)
#Publish Drive message
drive_msg = AckermannDriveStamped()
drive_msg.drive.steering_angle = (best_i*data.angle_increment+data.angle_min)*0.6
drive_msg.drive.speed = 3
self.drive_pub.publish(drive_msg)
def main(args):
rospy.init_node("FollowGap_node", anonymous=True)
rfgs = reactive_follow_gap()
rospy.sleep(0.1)
rospy.spin()
if __name__ == '__main__':
main(sys.argv)
|
[
"noreply@github.com"
] |
Xiyuan-Zhu.noreply@github.com
|
7d5ff1926da52012ced9e9ebf96936e820b7484e
|
a41d72e8497fbf89e7f663814dde87f4d4138256
|
/lab5/database.py
|
59c97a76ded865bee8ee58ad4fc0ae8df9ec836b
|
[] |
no_license
|
pteszka/concurrent-programming
|
95b7421654c9dd409bfcb5d41a6bfecdf01568df
|
e53f05d8872748c0718f48eb573256543ac6309f
|
refs/heads/main
| 2023-04-24T15:07:18.546816
| 2021-05-05T14:35:13
| 2021-05-05T14:35:13
| 347,689,391
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 424
|
py
|
db = {
1: 'Nowak',
2: 'Kowalski',
3: 'Wiśniewski',
4: 'Dąbrowski',
5: 'Lewandowski',
6: 'Wójcik',
7: 'Kamiński',
8: 'Kowalczyk',
9: 'Zieliński',
10: 'Szymański',
11: 'Bąk',
12: 'Chmielewski',
13: 'Włodarczyk',
14: 'Borkowski',
15: 'Czarnecki',
16: 'Sawicki',
17: 'Sokołowski',
18: 'Urbański',
19: 'Kubiak',
20: 'Maciejewski'
}
|
[
"pteszka98@gmail.com"
] |
pteszka98@gmail.com
|
42d2ccd0a08c1520cae02783637eee771aedda4f
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_196/ch31_2020_03_14_15_42_06_957078.py
|
7229a92343174b1d0b472e5e5af883e664d7d8d9
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 163
|
py
|
def eh_primo(a):
if a == 2:
return True
x=1
elif (a%2 == 0) or (a%x == 0):
x+=2
return False
elif (a==0) or (a==1):
return False
else:
return True
|
[
"you@example.com"
] |
you@example.com
|
2edda813a68b94ffdf1c3d6201c1cff73d0ddad3
|
aaad70e69d37f92c160c07e4ca03de80becf2c51
|
/filesystem/usr/lib/python3.6/asyncio/base_events.py
|
32b4f0adcd1093409fe44dc22121f8affc046568
|
[] |
no_license
|
OSWatcher/ubuntu-server
|
9b4dcad9ced1bff52ec9cdb4f96d4bdba0ad3bb9
|
17cb333124c8d48cf47bb9cec1b4e1305626b17a
|
refs/heads/master
| 2023-02-10T18:39:43.682708
| 2020-12-26T01:02:54
| 2020-12-26T01:02:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 183
|
py
|
{
"MIME": "text/plain",
"inode_type": "REG",
"magic_type": "Python script, ASCII text executable",
"mode": "-rw-r--r--",
"sha1": "23f1c44c157099ef5d66c87ba91eb7128afa4867"
}
|
[
"mathieu.tarral@protonmail.com"
] |
mathieu.tarral@protonmail.com
|
1b791df01ad4e5c52889dcf50e1aa2acbc51742e
|
88b860c998d45097b5604e1d8a45ad9ec2040a43
|
/v_charge/wsgi.py
|
1aa8c205b9fb1656c4cb25c03ecd6d468a630a8e
|
[
"MIT"
] |
permissive
|
xujpxm/v_charge
|
ac13fa54f41f04e05aa0b97d9b1c7f2050c9fa8f
|
abb1237eeca066cec435680e38ab6878a8d4ac27
|
refs/heads/master
| 2021-03-27T20:46:22.147595
| 2018-01-29T02:53:14
| 2018-01-29T02:53:14
| 100,686,236
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 394
|
py
|
"""
WSGI config for v_charge project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "v_charge.settings")
application = get_wsgi_application()
|
[
"329771774@qq.com"
] |
329771774@qq.com
|
ff41a3a285dac60d7350452a24276e372ba98879
|
2ebe6ccf7e731c90dcb9da8adbf22a9053f6addd
|
/sumithack/apps.py
|
5acf235a7e10b009fc73757aead4fcb2c3d829d2
|
[] |
no_license
|
Anurodhyadav/sumit
|
dda251af2d4035e262c5cba3c919190d40c45cd5
|
954499d4960eda012d7dfac17b0f3f590eb5cb01
|
refs/heads/master
| 2022-11-07T05:46:53.938797
| 2020-06-20T03:58:47
| 2020-06-20T03:58:47
| 273,634,562
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 93
|
py
|
from django.apps import AppConfig
class SumithackConfig(AppConfig):
name = 'sumithack'
|
[
"anurodhyadav072@gmail.com"
] |
anurodhyadav072@gmail.com
|
484e4be2de9b3edd790d88d2fe8f05a5f0df47c3
|
bbdfc358fc8c7d9bc5f2837d750dc2ff82ca4e69
|
/training/00_split_folds.py
|
e1af961b4cf4074b3994bd26acc48c06062808bd
|
[] |
no_license
|
MikeMpapa/CityNet
|
70c03b48eb9b0e8837a1074b00a5777a1ee69def
|
d1178f1b008ac2cb94ce9e0b0c4e87bc8c914707
|
refs/heads/master
| 2023-06-14T06:39:27.752096
| 2021-07-11T09:27:47
| 2021-07-11T09:27:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,067
|
py
|
import os
from collections import Counter
import random
import yaml
base = yaml.load(open('../CONFIG.yaml'))['base_dir']
spec_pkl_dir = base + 'extracted/specs/mel/'
files = [xx for xx in os.listdir(spec_pkl_dir) if xx.endswith('.pkl')]
file_sites = [xx.split('_')[0] for xx in files]
print len(files)
print len(set(file_sites))
site_counts = Counter(file_sites)
print site_counts
num_folds = 6
for seed in range(1000):
print "Seed is ", seed
sites = sorted(list(set(file_sites)))
random.seed(seed)
random.shuffle(sites)
fold_size = len(sites) / num_folds
file_fold_size = len(files) / num_folds
# manually getting the 3 folds
folds = []
folds.append(sites[:fold_size])
folds.append(sites[fold_size:2*fold_size])
folds.append(sites[2*fold_size:3*fold_size])
folds.append(sites[3*fold_size:4*fold_size])
folds.append(sites[4*fold_size:5*fold_size])
folds.append(sites[5*fold_size:])
wav_folds = []
passed = True
for fold in folds:
wav_fold_list = [xx.split('-sceneRect.csv')[0]
for xx in files
if xx.split('_')[0] in fold]
wav_folds.append(wav_fold_list)
num_files = sum([site_counts[xx] for xx in fold])
print num_files
if num_files < 6:
passed = False
if passed: break
# saving the folds to disk
savedir = base + 'splits/'
print "Code commented out to prevent accidently overwriting"
# for idx, (fold, wav_fold) in enumerate(zip(folds, wav_folds)):
# savepath = savedir + 'fold_sites_%d.yaml' % idx
# yaml.dump(fold, open(savepath, 'w'), default_flow_style=False)
# savepath = savedir + 'folds_%d.yaml' % idx
# yaml.dump(wav_fold, open(savepath, 'w'), default_flow_style=False)
print "Code commented out to prevent accidently overwriting"
# savepath = savedir + 'fold_sites_6.yaml'
# yaml.dump(folds, open(savepath, 'w'), default_flow_style=False)
#
# savepath = savedir + 'folds_6.yaml'
# yaml.dump(wav_folds, open(savepath, 'w'), default_flow_style=False)
|
[
"mdfirman@gmail.com"
] |
mdfirman@gmail.com
|
84722925665ed9d37e7a1d4c38bcecb7563b2890
|
0747279a95b16b477084221cc52b8a5d8d7d96cc
|
/src/main-real.py
|
35716516b58f59660f0aaf2fe0766a7931ccc59b
|
[] |
no_license
|
SijingTu/ideological-embeddings
|
98840015874cd0f02b4624a30dca2df9271dc6f2
|
1d301cd63d7db733c09b5cf2ad4d0f25844f71a8
|
refs/heads/master
| 2023-07-29T05:03:01.630335
| 2021-09-08T14:47:38
| 2021-09-08T14:47:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,094
|
py
|
import barberatfmodel
import baselines
import evals
import tfmodel
import reader
import pandas as pd
import sys
from timeit import default_timer as timer
TWITTER, REDDIT = 'twitter', 'reddit'
EXECUTE_FULL_LOGREG_FOR_TWITTER = False
def main(dataset):
print("Dataset:", dataset)
assert dataset in {TWITTER, REDDIT}
if dataset == REDDIT:
N, K, fold_iterator = reader.generate_reddit_kfolds()
else:
N, K, fold_iterator = reader.generate_twitter_kfolds()
results = pd.DataFrame()
label2preds = {}
label2times = {}
for fold_index, (train_set, test_set) in enumerate(fold_iterator):
for dim in ((2*K+1), 128):
for p in (.5, 1., 2.):
for q in (.5, 1., 2.):
for use_topics in (False, True):
threshold = 5 if dataset == TWITTER else 0
key = (f"node2vec [dim={dim}, p={p}, q={q}, t={threshold}"
+ (" + topics" if use_topics else ""))
start = timer()
pred = baselines.logistic_regression(N, train_set, test_set,
use_topics=use_topics,
embeddings=baselines.compute_node2vec(
N, train_set, dimensions=dim, p=p, q=q, threshold=threshold)
)
label2times[key] = timer() - start
label2preds[key] = pred
print(f"Fold {fold_index}, {key}: Done.")
#### OUR MODEL #######
model = tfmodel.build_model(N, K)
start = timer()
estimated_phi, estimated_theta, _loss_values = tfmodel.optimize(model, train_set,
decay_epochs=3,
num_epochs=5)
label2times['Our model'] = timer() - start
predictions = tfmodel.test_predictions(model, estimated_phi, estimated_theta, test_set)
label2preds['Our model'] = predictions
#### BARBERA'S MODEL #######
bmodel = barberatfmodel.build_model(N,1)
start = timer()
estimated_phi_b, estimated_theta_b, estimated_alpha_b, estimated_beta_b, _loss_values_b = barberatfmodel.optimize(bmodel, train_set,
decay_epochs=3,
num_epochs=5)
label2times['Barbera model'] = timer() - start
predictions_barbera = barberatfmodel.test_predictions(bmodel, estimated_phi_b,
estimated_theta_b, estimated_alpha_b, estimated_beta_b, test_set)
label2preds['Barbera model'] = predictions_barbera
if EXECUTE_FULL_LOGREG_FOR_TWITTER or (dataset != TWITTER):
start = timer()
logreg_pred = baselines.logistic_regression(N, train_set, test_set, use_topics=False)
label2preds['Original information'] = logreg_pred
label2times['Original information'] = timer() - start
start = timer()
logreg_t_pred = baselines.logistic_regression(N, train_set, test_set, use_topics=True)
label2preds['Original inf. + Topics'] = logreg_t_pred
label2times['Original inf. + Topics'] = timer() - start
fold_results, _figure_path = evals.plot_curve(test_set,
label2preds=label2preds,
label2times=label2times,
output_basepath=f"../data/results/{dataset}/roc-{fold_index}-fold.png")
print(f"Results for fold {fold_index} =======\n", fold_results.to_string(index=False))
fold_results["Fold"] = fold_index
results = pd.concat([results, fold_results])
results.to_csv(f"../data/results/{dataset}/results.csv", index=False)
print(results.groupby('Algorithm').mean())
if __name__ == '__main__':
main(sys.argv[1] if sys.argv[1:] else REDDIT)
|
[
"corrado.monti@isi.it"
] |
corrado.monti@isi.it
|
00015c61a48894a48a04743807bb33ea6344e7a9
|
3778ad04248d977a1833dc5115ec6200b47bae8e
|
/post/ru_urls.py
|
10d6e30e275585255cad0457f1257efc33841474
|
[] |
no_license
|
Salamonk653/assosiation
|
7b31fd4b599b573924b564083ac1e2ba9b8330bd
|
dc0684aa638bfefa2db23d54699e2636bce79b9b
|
refs/heads/master
| 2020-08-07T10:32:11.019476
| 2019-11-01T09:26:01
| 2019-11-01T09:26:01
| 213,412,884
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,249
|
py
|
# -*- coding: utf-8 -*-
# Create your models here.
from django.conf.urls import url
from .ru_views import *
urlpatterns = [
url(r'^$', News.as_view(), name='ru_news_list'),
url(r'^contact/form/$', contact_form, name='ru_contact_form'),
url(r'^category/news/$', NewsList.as_view(), name='ru_category'),
url(r'^category/anonsy/$', AnonsyList.as_view(), name='ru_anonsy'),
url(r'^category/projects/$', Projects.as_view(), name='ru_projects'),
url(r'^anonsy/(?P<slug>[\w-]+)/$', AnonsyDetail.as_view(), name='ru_anonsy_detail'),
url(r'^category/contacts/$', Contacts.as_view(), name='ru_contacts'),
url(r'^category/katalog/$', Katalog.as_view(), name='ru_katalog'),
url(r'^category/katalog/(?P<slug>[\w-]+)/$', KatalogDetail.as_view(), name='ru_katalog_detail'),
url(r'^category/chlenstvo/$', Chlenstvo.as_view(), name='ru_chlenstvo'),
url(r'^category/onas/$', Onas.as_view(), name='ru_onas'),
url(r'^category/(?P<slug>[\w-]+)/$', ProjectList.as_view(), name='ru_category_detail'),
url(r'^article/(?P<slug>[\w-]+)/$', ArticleDetail.as_view(), name='ru_article_detail'),
url(r'^search/$', Search.as_view(), name='ru_search'),
url(r'^podrobnee/$', Podrobnee.as_view(), name='ru_podrobnee'),
]
|
[
"salamonk653@gmail.com"
] |
salamonk653@gmail.com
|
b432a24aed84a1483e21a42106c5112670155900
|
32743313138f0ad02b93694b25e480c4fe9c10dc
|
/Prac_01/loops.py
|
5637622e1475d61af879a7a893072a0305af13d4
|
[] |
no_license
|
yoon1812/CP1404_practicals
|
2b855fbc4dd755df4dd8805f8497df77a99a68e3
|
872a3b9a23a33f3eef7f3f164b5d40af77682203
|
refs/heads/master
| 2023-02-17T09:38:15.222947
| 2021-01-13T09:11:49
| 2021-01-13T09:11:49
| 321,995,326
| 0
| 0
| null | 2020-12-17T06:45:19
| 2020-12-16T13:44:43
|
Python
|
UTF-8
|
Python
| false
| false
| 406
|
py
|
for i in range(1, 21, 2):
print(i, end=' ')
print()
for i in range(0, 110, 10):
print(i, end=' ')
print()
for i in range(20, 0, -1):
print(i, end=' ')
print()
number = int(input('Number of stars: '))
for i in range(1, number + 1):
print("*", end='')
number = input('Number of stars: ')
for i in range(1, number + 1):
for j in range(1, i + 1):
print("*", end='')
print()
|
[
"yoon1812"
] |
yoon1812
|
9ee11bd0601d37a6f4bffe5cbdb8fa671bdafcf2
|
53e0e35f58dc169d5b4e293ff073f46948fbe278
|
/pytorch_projects/fine-grained-image-recognition/Bilinear_pooling/bilinear_attention_pooling.py
|
935f28870467d302135460d0ebde022283d642a3
|
[] |
no_license
|
Hoodythree/DeepLearningCode
|
25097227d512e80b233352ce2cc58b023591b165
|
3e2d30198956c9f01c5fc8f9df4667eef43c51e9
|
refs/heads/master
| 2022-08-30T09:08:09.760487
| 2020-05-17T13:35:05
| 2020-05-17T13:35:05
| 262,494,365
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,495
|
py
|
import torch
import torch.nn as nn
### Bilinear Attention Pooling
class BAP(nn.Module):
def __init__(self, **kwargs):
super(BAP, self).__init__()
def forward(self,feature_maps,attention_maps):
feature_shape = feature_maps.size() ## 12*768*26*26*
attention_shape = attention_maps.size() ## 12*num_parts*26*26
# print(feature_shape,attention_shape)
phi_I = torch.einsum('imjk,injk->imn', (attention_maps, feature_maps)) ## 12*32*768
phi_I = torch.div(phi_I, float(attention_shape[2] * attention_shape[3]))
phi_I = torch.mul(torch.sign(phi_I), torch.sqrt(torch.abs(phi_I) + 1e-12))
phi_I = phi_I.view(feature_shape[0],-1)
raw_features = torch.nn.functional.normalize(phi_I, dim=-1) ##12*(32*768)
pooling_features = raw_features*100
# print(pooling_features.shape)
return raw_features,pooling_features
class ResizeCat(nn.Module):
def __init__(self, **kwargs):
super(ResizeCat, self).__init__()
def forward(self,at1,at3,at5):
N,C,H,W = at1.size()
resized_at3 = nn.functional.interpolate(at3,(H,W))
resized_at5 = nn.functional.interpolate(at5,(H,W))
cat_at = torch.cat((at1,resized_at3,resized_at5),dim=1)
return cat_at
if __name__ == '__main__':
# a = BAP()
a = ResizeCat()
a1 = torch.Tensor(4,3,14,14)
a3 = torch.Tensor(4,5,12,12)
a5 = torch.Tensor(4,9,9,9)
ret = a(a1,a3,a5)
print(ret.size())
|
[
"luoqiuhongthree@gmail.com"
] |
luoqiuhongthree@gmail.com
|
832cecc66221149f457cd8750d9d2a8e12f7279a
|
825b2f981ef0d10376b1c0ab868b785f7db866d5
|
/recognise_district/recog.py
|
4ff1c2c3419e84a74b6e639a8eea6cfd087bc36e
|
[
"Apache-2.0"
] |
permissive
|
waterblas/ROIBase-lite
|
fb5c7c8e24c021640a8dc9a97ca57d194d31c439
|
935c201c40d82310f4af8a62faf03302cf5d1a5b
|
refs/heads/master
| 2022-10-11T04:35:36.845742
| 2020-01-16T03:43:03
| 2020-01-16T03:43:03
| 234,224,526
| 2
| 0
|
Apache-2.0
| 2022-09-23T22:35:01
| 2020-01-16T03:12:40
|
Python
|
UTF-8
|
Python
| false
| false
| 1,484
|
py
|
import os
import logging
from . import detect
try:
import pkg_resources
get_module_res = lambda *res: pkg_resources.resource_stream(__name__,
os.path.join(*res))
except ImportError:
get_module_res = lambda *res: open(os.path.normpath(os.path.join(
os.getcwd(), os.path.dirname(__file__), *res)), 'rb')
_region_data = '/data/region.pb2'
default_params = {'penalty_factor': 500,
'freq_threshold': 2,
'limit': 1,
'alpha': 1.4}
def init(log_path=None, debug=False, **kwargs):
"""initial setting
Args:
params:
penalty_factor
+: 提高长文稀疏地点的召回 -: 降低长文稀疏地点的召回
freq_threshold
+: 出现多个地点的新闻,召回下降,准确提升 -: 反之
limit
+: 开始判断阈值提高 -: 反之
alpha
模糊结果过滤,+: 要求频率高
"""
# log setting
logger = logging.getLogger(__name__)
if log_path:
file_handler = logging.FileHandler(log_path)
logger.addHandler(file_handler)
console_handler = logging.StreamHandler()
logger.addHandler(console_handler)
level = logging.DEBUG if debug else logging.INFO
logger.setLevel(level=level)
default_params.update(kwargs)
detectD = detect.DetectDistrict(default_params, _region_data)
return detectD
|
[
"dylanliang@tencent.com"
] |
dylanliang@tencent.com
|
e3b3126e6676609e20aa10a8b485b3a059b0fd77
|
8787b2fbb5017b61dcf6075a5261071b403847bf
|
/Programmers/N으로 표현.py
|
21d160641aee1be033211795680b2a0e5c76564b
|
[] |
no_license
|
khw5123/Algorithm
|
a6fe0009e33289813959553c2366d77c93d7b4b9
|
323a829f17a10276ab6f1aec719c496a3e76b974
|
refs/heads/master
| 2023-01-02T00:12:21.848924
| 2020-10-23T06:37:41
| 2020-10-23T06:37:41
| 282,162,235
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,202
|
py
|
def calc(s, N, number):
result = 0
start = 0
tmp = ''
for i in range(len(s)):
if s[i] != str(N):
start = i
result = int(tmp)
break
else:
tmp += s[i]
tmp = ''
operator = []
for i in range(start, len(s)):
if s[i] == str(N):
tmp += s[i]
if i == len(s) - 1 and len(operator) != 0:
if operator[0] == '+':
result += int(tmp)
elif operator[0] == '-':
result -= int(tmp)
elif operator[0] == '*':
result *= int(tmp)
elif operator[0] == '/':
result //= int(tmp)
else:
if len(operator) == 1:
if operator[0] == '+':
result += int(tmp)
elif operator[0] == '-':
result -= int(tmp)
elif operator[0] == '*':
result *= int(tmp)
elif operator[0] == '/':
result //= int(tmp)
tmp = ''
operator.pop()
operator.append(s[i])
return result
def solve(s, N, number):
answer = 9
if s.count(str(N)) < 9:
if s[-1] == str(N):
if eval(''.join(s)) == number or calc(s, N, number) == number:
answer = min(answer, s.count(str(N)))
s.append(str(N))
answer = min(answer, solve(s, N, number))
s.pop()
if s[-1] != '+' and s[-1] != '-' and s[-1] != '*' and s[-1] != '/':
s.append('+')
answer = min(answer, solve(s, N, number))
s.pop()
s.append('-')
answer = min(answer, solve(s, N, number))
s.pop()
s.append('*')
answer = min(answer, solve(s, N, number))
s.pop()
s.append('/')
answer = min(answer, solve(s, N, number))
s.pop()
return answer
return answer
def solution(N, number):
answer = solve([str(N)], N, number)
return -1 if answer == 9 else answer
|
[
"5123khw@hknu.ac.kr"
] |
5123khw@hknu.ac.kr
|
122a05dc3115f6ed66c2747d3dc1e78c44cd4955
|
52e0e1ef7675d8bac51899f23b2722e7e7f58992
|
/core/data/base_collector.py
|
972479bf987185887d7e79d61ee4b166286f1b46
|
[
"Apache-2.0"
] |
permissive
|
knowmefly/DI-drive
|
2c8963a04d00aa8b3c3354630b6df9e3e6a6770e
|
ade3c9dadca29530f20ab49b526ba32818ea804b
|
refs/heads/main
| 2023-07-08T14:40:39.625522
| 2021-07-21T15:54:48
| 2021-07-21T15:54:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,420
|
py
|
import copy
from abc import abstractmethod
from typing import Any, Dict
from easydict import EasyDict
from ding.utils import EasyTimer
class BaseCollector(object):
config = dict()
def __init__(
self,
cfg: Dict,
env: Any = None,
policy: Any = None,
) -> None:
if 'cfg_type' not in cfg:
self._cfg = self.__class__.default_config()
self._cfg.update(cfg)
else:
self._cfg = cfg
self._end_flag = False
self._timer = EasyTimer()
if env is not None:
self.env = env
if policy is not None:
self.policy = policy
@property
def env(self) -> Any:
return self._env
@env.setter
def env(self, _env: Any) -> None:
self._env = _env
@property
def policy(self) -> Any:
return self._policy
@policy.setter
def policy(self, _policy: Any) -> None:
self._policy = _policy
@abstractmethod
def reset(self) -> Any:
raise NotImplementedError
@abstractmethod
def close(self) -> Any:
raise NotImplementedError
@abstractmethod
def collect(self) -> Any:
raise NotImplementedError
@classmethod
def default_config(cls: type) -> EasyDict:
cfg = EasyDict(cls.config)
cfg.cfg_type = cls.__name__ + 'Config'
return copy.deepcopy(cfg)
|
[
"sissure@qq.com"
] |
sissure@qq.com
|
96005790378d6d9083513a1e624cac02aaf72eec
|
f4cdb0ac8b17d3605da61adad468d609b6558fe5
|
/PAM30.py
|
44072aec3910838b3f52cc33df38d171a5305aab
|
[] |
no_license
|
ackdav/PAMIE
|
786e6a0b963b00a71ca30444fa54e66b0e6eefac
|
81e74df135a67770bbb207afc97b1ad4c9f3e970
|
refs/heads/master
| 2022-11-22T04:51:06.962588
| 2019-01-20T10:48:54
| 2019-01-20T10:48:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 81,256
|
py
|
"""
PAMIE Build 3.0a
Based on cPAMIE and PAM.py by RLM
Revised: March 03, 2009
Developers: Robert L. Marchetti
Description: This python class file allow you to write scripts to Automate the Internet Explorer Browser Client.
This software is provided 'as-is', without any express or implied warranty.
In no event will the authors be held liable for any damages arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software.
If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
Special Thanks to: All the Pamie Users and Developers for their time and effort, Steve M., Drunk Bum, Jeff H.,
Dave K., Henry W., Tom C., Scott W.,Margie M. and all others for there support and contributions.
See !whatsnew.txt for modification history.
"""
import sys
import os
import win32com.client
import win32gui
import time
import re
import random
import string
import datetime
import traceback
class PAMIE:
"""
cPAMIE is an automation object based on the work of PAMIE by RLM
http://pamie.sourceforge.net/
"""
__version__ = "3.0"
def __init__(self, url=None, time_out=3000):
""" The class instantiation code. When the object is instantiated you can
pass a starting URL. If no URL is passed then about:blank, a blank
page, is brought up.
parameters:
[url] - url to navigate to initially
[timeOut] - how many 100mS increments to wait, 10 = 1sec, 100=10sec
returns:
Nothing
"""
self.showDebugging = True # Show debug print lines?
self.colorHighlight = "#F6F7AD" # Set to None to turn off highlighting
self.frameName = None # The current frame name or index. Nested frames are
# supported in the format frame1.frame2.frame3
self.formName = None # The current form name or index
self.busyTuner = 1 # Number of consecutive checks to verify document is no longer busy.
self._ie = win32com.client.dynamic.Dispatch('InternetExplorer.Application')
if url:
self._ie.Navigate(url)
else:
self._ie.Navigate('about:blank')
self._timeOut = time_out
self._ie.Visible = 1
self._ie.MenuBar = 1
self._ie.ToolBar = 1
self._ie.AddressBar = 1
self.timer = datetime.datetime.now()
def _docGetReadyState(self, doc):
""" Gets the readyState of a document. This is a seperate function so
the "Access Denied" error that IE throws up every once in a while can
be caught and ignored, without breaking the timing in the wait() functions.
parameters:
doc - The document
returns:
The readyState.
"""
try:
return doc.readyState
except:
return ""
def _frameWait(self, frame=None):
""" Waits for a page to be fully loaded. A completely soundproof method has yet to be found to accomplish
this, but the function works in the majority of instances. The function waits for both the doc busy attribute
to be False and the doc readyState to be 'complete'. It will continue to wait until the maximim timeOut
value has been reached. In addition, the busyTuner can be adjusted to force the function to verify the
specified number of consecutive 'not busy and completed' checks before continuing.
parameters:
[frame] - A frame element.
returns:
True if the wait was successful, else False
"""
readyCount = 0
timeLeft = self._timeOut
try:
if frame:
myFrame = frame
else:
myFrame = self.getFrame(self.frameName)
while readyCount < self.busyTuner and timeLeft > 0:
try:
doc = myFrame.document
except:
continue # if the document never gets itself together this will timeout
if self._ie.Busy == False and self._docGetReadyState(doc) == 'complete':
readyCount += 1
else:
readyCount = 0
time.sleep(0.05)
timeLeft -= 1
except:
(ErrorType, ErrorValue, ErrorTB) = sys.exc_info()
print(sys.exc_info())
traceback.print_exc(ErrorTB)
return False
else:
return True
def _wait(self):
""" Waits for a page to be fully loaded. A completely soundproof method has yet to be found to accomplish
this, but the function works in the majority of instances. The function waits for both the doc busy attribute
to be False and the doc readyState to be 'complete'. It will continue to wait until the maximim timeOut
value has been reached. In addition, the busyTuner can be adjusted to force the function to verify the
specified number of consecutive 'not busy and completed' checks before continuing.
parameters:
None
returns:
True if the wait was successful, else False
"""
readyCount = 0
timeLeft = self._timeOut
try:
while readyCount < self.busyTuner and timeLeft > 0:
try:
doc = self._ie.Document
except:
continue # if the document never gets itself together this will timeout
if self._ie.Busy == False and self._docGetReadyState(doc) == 'complete':
readyCount += 1
else:
readyCount = 0
time.sleep(0.05)
timeLeft -= 1
except:
(ErrorType, ErrorValue, ErrorTB) = sys.exc_info()
print(sys.exc_info())
traceback.print_exc(ErrorTB)
return False
else:
return True
def buttonExists(self, name):
""" Checks to see if a button exists
parameters:
name - The id, name, value or index of the button.
returns:
True if the button is found, else False
"""
myElement = self.getButton(name)
if myElement:
return True
else:
return False
def changeWindow(self, wintext):
""" changeWindow()
changes control to new or existing window
Parms:
wintext - title of window to control
"""
# Grab the POP-UP Window
newWin = self.windowFind(wintext)
# Use Pamie for COM object for POP-UP Window
self._ie = newWin
return self._ie
def checkBoxExists(self, name):
""" Checks to see if a checkbox exists
parameters:
name - The id, name, or value of the button.
returns:
True if the checkbox is found, else False
"""
myElement = self.getCheckBox(name)
if myElement:
return True
else:
return False
def clickButton(self, name):
""" Clicks a button
parameters:
name - The id, name, value or index of the button, or a button element.
returns:
True on success, else False
"""
if isinstance(name, str) or isinstance(name, int):
myButton = self.getButton(name)
else:
myButton = name
return self.clickElement(myButton)
def clickButtonImage(self, name):
""" Click a button of input type "image"
parameters:
name - The id, name, value or index of the button, or a button element.
returns:
True on success, else False
"""
if isinstance(name, str) or isinstance(name, int):
myElements = self.getElementsList("input", "type=image")
foundElement = self.findElement("input", "id;name;value", name, myElements)
else:
foundElement = name
return self.clickElement(foundElement)
def clickElement(self, element):
""" Clicks the passed element
parameters:
element - the element to click
returns:
True on success, else False
"""
try:
if not element:
if self.showDebugging: print("** clickElement() was not passed a valid element")
return False
if self.colorHighlight: element.style.backgroundColor = self.colorHighlight
element.focus()
element.blur()
element.click()
return True
except:
(ErrorType, ErrorValue, ErrorTB) = sys.exc_info()
print(sys.exc_info())
traceback.print_exc(ErrorTB)
return False
def clickHiddenElement(self, element):
""" Clicks the passed element
parameters:
element - the element to click
returns:
True on success, else False
"""
try:
if not element:
if self.showDebugging: print("** clickElement() was not passed a valid element")
return False
if self.colorHighlight: element.style.backgroundColor = self.colorHighlight
element.click()
return True
except:
(ErrorType, ErrorValue, ErrorTB) = sys.exc_info()
print(sys.exc_info())
traceback.print_exc(ErrorTB)
return False
def clickHiddenLink(self, name):
""" Clicks a hidden link.
parameters:
name - The id or innerText of the link
returns:
True on success, else False
"""
if isinstance(name, str) or isinstance(name, int):
myLink = self.getLink(name)
else:
myLink = name
return self.clickHiddenElement(myLink)
def clickImage(self, name):
""" Clicks an image
parameters:
name The id, name, src or index of the image
returns:
True on success, else False
"""
if isinstance(name, str) or isinstance(name, int):
myImage = self.getImage(name)
else:
myImage = name
return self.clickElement(myImage)
def clickLink(self, name):
""" Clicks a link.
parameters:
name - The id or innerText of the link
returns:
True on success, else False
"""
if isinstance(name, str) or isinstance(name, int):
myLink = self.getLink(name)
else:
myLink = name
return self.clickElement(myLink)
def clickMenu(self, tag, className, controlname, event=None):
""" Gets a div
parameters:
name - The id, name, or index of the div
returns:
The div if found, else None
"""
self._wait()
try:
doc = self._ie.Document.getElementsByTagName(tag)
for element in doc:
if element is None:
break
if element.className == className:
if element.id == controlname:
element.style.backgroundColor = "cyan"
element.FireEvent(tag, controlname, event)
return True
except:
(ErrorType, ErrorValue, ErrorTB) = sys.exc_info()
print(sys.exc_info())
traceback.print_exc(ErrorTB)
return None
def close_window(self, title=None):
try:
self._ie.Close()
return True
except:
return False
def divExists(self, name):
""" Checks to see if a div exists
parameters:
name - The id, name, or index of the button.
returns:
True if the div is found, else False
"""
myElement = self.getDiv(name)
if myElement:
return True
else:
return False
def elementExists(self, tag, att, val):
""" Checks to see if an element exists.
parameters:
tag - The HTML tag name
att - The tag attribute to search for
val - The attribute value to match
returns:
True if the element exists, else False
"""
foundElement = self.findElement(tag, att, val)
if foundElement == None:
return False
else:
return True
def executeJavaScript(self, name):
""" Executes a java script function
parameters:
name - The name of the javascript function
returns:
True on success, else False
"""
self._wait()
try:
doc = self._ie.Document
pw = doc.parentWindow
script = name
print("script"), script
pw.execScript(script)
except:
(ErrorType, ErrorValue, ErrorTB) = sys.exc_info()
print(sys.exc_info())
traceback.print_exc(ErrorTB)
sys.exit(2)
def findElement(self, tag, attributes, val, elementList=None):
""" The main find function that hunts down an element on the page according
to the specified parameters. Tries to take into account class
specified frames or forms.
parameters:
tag - The HTML tag name.
attributes - The semi-colon seperated tag attribute to search for.
val - The attribute value to match. Regular Expressions
can be used by starting the val with an !
[elementList] - Find the element in the passed list.
returns:
The found element
"""
self._wait()
atts = attributes.split(";")
regEx = False
if isinstance(val, str):
if val[0] == "!":
val = val.replace("!", "", 1)
myRE = re.compile(val)
regEx = True
if elementList:
if tag:
elements = self.getElementsList(tag, "tagName=" + tag, elementList)
if isinstance(val, int): # Do we want the index?
return elements[val]
else:
elements = self.getElementsList(tag)
for el in elements[:]:
if regEx:
for att in atts[:]:
valText = el.getAttribute(att)
if valText != None:
m = myRE.match(valText)
if m:
return el
else:
for att in atts[:]:
valText = el.getAttribute(att)
if valText != None:
if isinstance(valText, str):
valText = valText.strip()
if valText == val:
return el
if self.showDebugging: print("** findElement() did not find " + tag + "-" + attributes + "-" + str(val))
return None
def findElementByIndex(self, tag, indexNum, filter=None, elementList=None):
""" Find a specific element based on tag and the index number.
parameters:
tag - The HTML tag name
indexNum - The index number of the element
attributes - The semi-colon seperated tag attribute to search for
val - The attribute value to match
[elementList] - Find the element in the passed list
returns:
The found element
"""
try:
myElements = self.getElementsList(tag, filter=None, elementList=None)
return myElements[indexNum]
except:
(ErrorType, ErrorValue, ErrorTB) = sys.exc_info()
print(sys.exc_info())
traceback.print_exc(ErrorTB)
return None
else:
return None
def findText(self, text):
"""
Searches for text on the Web Page
parameters:
text - text to search for
"""
self._wait()
pageText = self.outerHTML()
# print pageText
# Search the doc for the text
text_found = pageText.find(text)
try:
# A "-1" means nothing is found
if text_found is not -1:
return True
else:
print("Text %s Not Found!" % (text))
except:
(ErrorType, ErrorValue, ErrorTB) = sys.exc_info()
print(sys.exc_info())
traceback.print_exc(ErrorTB)
return None
def fireElementEvent(self, tag, controlName, eventName):
""" Fire a named event for a given control
parameters:
tag - The HTML tag name
controlName - the control to act on
eventName - the event name to signal
returns:
True on success, else False
"""
foundElement = self.findElement(tag, "name", controlName)
if foundElement:
if self.colorHighlight: foundElement.style.backgroundColor = self.colorHighlight
foundElement.FireEvent(eventName)
return True
else:
if self.showDebugging: print("fireEvent() did not find " + controlName + " control.")
return False
def findWindow(self, title, indexNum=1):
""" Finds all ie open windows returns them if title matches.
parameters:
title - The window title to find
[indexNum] - The index number of the window to find
returns:
The window if found, else None
"""
thisCount = self._timeOut
found = False
while not found:
shellWnd = win32com.client.Dispatch('Shell.Application')
wins = shellWnd.Windows()
winsCount = wins.Count
indexCnt = 1
time.sleep(.5)
thisCount = thisCount - 5
if thisCount < 1:
break
for index in range(winsCount):
try:
ieObj = wins.Item(index)
doc = ieObj.Document
if doc.title == title:
if indexCnt == indexNum:
return ieObj
indexCnt += 1
elif ieObj.LocationName == title:
if indexCnt == indexNum:
return ieObj
indexCnt += 1
except:
pass
if self.showDebugging: print("** windowFind() did not find the " + title + "-" + str(indexNum) + " window.")
return None
def formExists(self, name):
""" Checks to see if a form exists
parameters:
None
returns:
True if the form is found, else False
"""
myElement = self.getForm(name)
if myElement:
return True
else:
return False
def frameExists(self, name):
""" Checks to see if a frame exists
parameters:
name - The id or name of the frame
returns:
True if the frame is found, else False
"""
self._wait()
try:
frames = self._ie.Document.frames
for i in range(frames.length):
if frames[i].name == name:
return True
return False
except:
(ErrorType, ErrorValue, ErrorTB) = sys.exc_info()
print(sys.exc_info())
traceback.print_exc(ErrorTB)
return False
def getBodyValue(self, attribute):
""" Gets the value of an attribute on the document.
parameters:
attribute - The name of the attribute to get the value for
returns:
The value of the attribute
examples:
val = getBodyValue("id")
"""
self._wait()
if self.frameName:
myDoc = self._ie.Document.frames[self.frameName].Document.body
else:
myDoc = self._ie.Document.body
return self.getElementValue(myDoc, attribute)
def getButton(self, name):
""" Gets a button
parameters:
name - The id, name, value or index of the button.
returns:
The button if found, else None
"""
myElements = self.getElementsList("input", "type=submit;type=button")
if isinstance(name, int):
foundElement = self.findElementByIndex("input", name, None, myElements)
else:
foundElement = self.findElement("input", "id;name;value", name, myElements)
if foundElement == None:
if self.showDebugging: print("** getButton() did not find " + name)
return None
else:
if self.colorHighlight: foundElement.style.backgroundColor = self.colorHighlight
return foundElement
def getButtonValue(self, name, attribute):
""" Gets the value of an attribute on a button
parameters:
name - The id, name, value or index of the button, or a button element.
attribute - The name of the attribute to get the value for
returns:
The value of the attribute
"""
if isinstance(name, str) or isinstance(name, int):
foundElement = self.getButton(name)
else:
foundElement = name
if foundElement == None:
if self.showDebugging: print("** getButtonValue() did not find " + name)
return None
else:
if self.colorHighlight: foundElement.style.backgroundColor = self.colorHighlight
return self.getElementValue(foundElement, attribute)
def getButtons(self, filter=None):
""" Gets all the buttons
parameters:
[filter] - Get only buttons specified by the filter
returns:
A list of buttons
"""
if filter:
filter = "type=submit;" + filter
else:
filter = "type=submit"
return self.getElementsList("input", filter)
def getButtonsValue(self, attribute, filter=None):
""" Gets a list of values for the specified attribute
parameters:
attribute - The name of the attribute to get the value for
[filter] - Get only buttons specified by the filter
returns:
A list of the specified value of the attribute
"""
myValues = []
myButtons = self.getButtons()
for button in myButtons[:]:
myValues.append(button.getAttribute(attribute))
return myValues
def getCheckBox(self, name):
""" Gets a checkbox
parameters:
name - The id, name, or value of the checkbox.
returns:
The checkbox if found, else None
"""
myElements = self.getElementsList("input", "type=checkbox")
foundElement = self.findElement("input", "id;name;value", name, myElements)
if foundElement == None:
if self.showDebugging: print("** getCheckBox() did not find " + name)
return None
else:
if self.colorHighlight: foundElement.style.backgroundColor = self.colorHighlight
return foundElement
def getCheckBoxValue(self, name, attribute):
""" Gets a checkbox
parameters:
name - The id, name, or value of the checkbox, or a checkbox element.
attribute - The name of the attribute to get the value for
returns:
The checkbox if found, else None
"""
if isinstance(name, str) or isinstance(name, int):
foundElement = self.getCheckBox(name)
else:
foundElement = name
if foundElement == None:
if self.showDebugging: print("** getCheckBoxValue() did not find " + name)
return None
else:
if self.colorHighlight: foundElement.style.backgroundColor = self.colorHighlight
return self.getElementValue(foundElement, attribute)
def getCheckBoxes(self, filter=None):
""" Gets all the checkboxes
parameters:
[filter] - Get only checkboxes specified by the filter
returns:
A list of checkboxes
"""
if filter:
filter = "type=checkbox;" + filter
else:
filter = "type=checkbox"
return self.getElementsList("input", filter)
def getCheckBoxesChecked(self, name):
""" Gets a list of checked checkbox values for a specified checkbox name
parameters:
name - checkbox name
returns:
A list of checked values for the checkbox group
"""
return self.getCheckBoxes("type=checkbox;checked=True;name=" + name)
def getCheckBoxesValue(self, attribute, filter=None):
""" Gets the value of an attribute for all the checkboxes
parameters:
attribute - The name of the attribute to get the value for
[filter] - Get only checkboxes specified by the filter
returns:
A list of the specified value of the attribute
"""
myValues = []
myCheckBoxes = self.getCheckBoxes()
for checkbox in myCheckBoxes[:]:
myValues.append(checkbox.getAttribute(attribute))
return myValues
def getConfig(self, cfpath):
""" Set the config path"""
pathname = os.path.dirname(sys.argv[0])
pathname = os.chdir('..')
path = os.path.abspath(pathname)
path = path + cfpath
return path
def getCookie(self):
""" Gets the Cookie information for the current page
parameters:
None
returns:
The Cookie information of the current page
"""
self._wait()
return self._ie.Document.cookie
def getDiv(self, name):
""" Gets a div
parameters:
name - The id, name, or index of the div
returns:
The div if found, else None
"""
if isinstance(name, int):
foundElement = self.findElementByIndex("div", name)
else:
foundElement = self.findElement("div", "id;name", name)
if foundElement == None:
if self.showDebugging: print("** getDiv() did not find " + name)
return None
else:
if self.colorHighlight: foundElement.style.backgroundColor = self.colorHighlight
return foundElement
def getDivValue(self, name, attribute):
""" Gets the value of an attribute on a div.
parameters:
name - The id, name, or index of the div, or a div element.
attribute - The name of the attribute to get the value for
returns:
The value of the attribute
"""
if isinstance(name, str) or isinstance(name, int):
foundElement = self.getDiv(name)
else:
foundElement = name
if foundElement == None:
if self.showDebugging: print("** getDivValue() did not find " + name)
return None
else:
if self.colorHighlight: foundElement.style.backgroundColor = self.colorHighlight
return self.getElementValue(foundElement, attribute)
def getDivs(self, filter=None):
""" Gets a list of divs
parameters:
[filter] - Get only buttons specified by the filter
returns:
A list of divs
"""
return self.getElementsList("div", filter)
def getDivsValue(self, attribute, filter=None):
""" Gets a list of values for the specified attribute.
parameters:
attribute - The name of the attribute to get the value for
[filter] - Get only divs specified by the filter
returns:
A list of images
"""
myValues = []
myDivs = self.getDivs(filter)
for div in myDivs[:]:
myValues.append(div.getAttribute(attribute))
return myValues
def getElementChildren(self, element, all=True):
""" Gets a list of children for the specified element
parameters:
element - The element
elementList - The attribute name
[all] - True gets all descendants, False gets direct children only
returns:
The value of the attribute.
"""
try:
count = 0
myElements = []
if all:
elements = element.all
else:
elements = element.childNodes
while count < elements.length:
myElements.append(elements[count])
count += 1
return myElements
except:
(ErrorType, ErrorValue, ErrorTB) = sys.exc_info()
print(sys.exc_info())
traceback.print_exc(ErrorTB)
return None
def getElementParent(self, element):
""" Gets the parent of the passed element.
parameters:
element - The element
returns:
The parent element
"""
try:
return element.parentElement
except:
(ErrorType, ErrorValue, ErrorTB) = sys.exc_info()
print(sys.exc_info())
traceback.print_exc(ErrorTB)
return None
def getElementValue(self, element, attribute):
""" Gets the value of the attribute from the element.
parameters:
element - The element
elementList - The attribute name
returns:
The value of the attribute.
"""
try:
return element.getAttribute(attribute)
except:
(ErrorType, ErrorValue, ErrorTB) = sys.exc_info()
print(sys.exc_info())
traceback.print_exc(ErrorTB)
return None
def getElementsList(self, tag, filter=None, elementList=None):
""" Sets the specified attribute of any element
parameters:
tag - The HTML tag name
[filter] - Only return elements that match this filter in format
(att1=val1;att2=val2), ie. "type=checkbox;checked=True"
returns:
A filtered list of the found elements
"""
self._wait()
if elementList:
allElements = elementList
else:
if self.frameName:
myFrame = self.getFrame(self.frameName)
if self.formName:
elements = myFrame.Document.forms[self.formName].getElementsByTagName(tag)
else:
elements = myFrame.Document.getElementsByTagName(tag)
else:
if self.formName:
elements = self._ie.Document.forms[self.formName].getElementsByTagName(tag)
else:
elements = self._ie.Document.getElementsByTagName(tag)
# Convert the IE COM object to a list
count = 0
allElements = []
while count < elements.length:
allElements.append(elements[count])
count += 1
try:
if filter:
myElements = []
filters = filter.split(";")
for el in allElements:
match = False
for f in filters[:]:
atts = f.split("=")
valText = el.getAttribute(atts[0])
if valText != None:
valText = str(valText)
valText = valText.strip()
valText = valText.lower()
wantText = atts[1].lower()
if valText == wantText:
match = True
if match:
myElements.append(el)
else:
myElements = allElements
return myElements
except:
(ErrorType, ErrorValue, ErrorTB) = sys.exc_info()
print(sys.exc_info())
traceback.print_exc(ErrorTB)
return None
def getErrorText(self, className):
""" Gets the Error Text
This is only an example you may need to tweak for you needs
parameters:
redTxtSmall - This is the class name for the error text
returns:
The the innerText of that class, else None
"""
pass
"""
EXAMPLE Below
self._wait()
className = className
try:
doc = self._ie.Document.getElementsByTagName("SPAN")
for element in doc:
if element is None:break
if element.className == className :
element.style.backgroundColor="cyan"
val = element.innertext
# stripout any spaces
val = val.strip()
return val
except:
(ErrorType,ErrorValue,ErrorTB)=sys.exc_info()
print (sys.exc_info())
traceback.print_exc(ErrorTB)
return None
"""
def getForm(self, name=None):
""" Gets a form
parameters:
[name] - The name, id or index of the form.
returns:
The form if found, else None
"""
if name == None: name = self.formName
if isinstance(name, int):
foundElement = self.findElementByIndex("form", name)
else:
foundElement = self.findElement("form", "id;name", name)
if foundElement == None:
if self.showDebugging: print("** getForm() did not find " + name)
return None
else:
return foundElement
def getFormControlNames(self, name=None):
""" Gets a list of controls for a given form
parameters:
[name] - the form name
returns:
a list of control names located in the form
"""
if name == None: name = self.formName
self._wait()
d = []
if self.frameName:
self._frameWait()
thisForm = self._ie.Document.frames[self.frameName].Document.forms[self.formName]
else:
thisForm = self._ie.Document.forms[self.formName]
if thisForm != None:
for control in thisForm:
if control == None: break # Some browser bug
d.append(control.name)
return d
def getFormValue(self, name, attribute):
""" Gets the value of an attribute on a form
parameters:
name - The id, name or index of the form, or a form element.
attribute - The name of the attribute to get the value for
returns:
The value of the attribute
"""
if name == None: name = self.formName
if isinstance(name, str) or isinstance(name, int):
foundElement = self.getForm(name)
else:
foundElement = name
if foundElement == None:
if self.showDebugging: print("** getFormValue() did not find " + name)
return None
else:
return self.getElementValue(foundElement, attribute)
def getFormVisibleControlNames(self, name=None):
""" Gets a list of controls for a given form
parameters:
[name] - the form name
returns:
a list of visible control names located in the form
"""
if name == None: name = self.formName
self._wait()
d = []
if self.frameName:
thisForm = self._ie.Document.frames[self.frameName].Document.forms[self.formName]
else:
thisForm = self._ie.Document.forms[self.formName]
if thisForm != None:
for control in thisForm:
if control == None: break # some browser bug
if control.type != 'hidden':
if control.id == None or control.id == '':
d.append(control.name)
else:
d.append(control.id)
return d
def getForms(self, filter=None):
""" Gets a list of forms
parameters:
[filter] - Get only buttons specified by the filter
returns:
A list of forms
"""
return self.getElementsList("form", filter)
def getFormsValue(self, attribute, filter=None):
""" Use this to get the form object names on the page
parameters:
attribute - The name of the attribute to get the value for
[filter] - Get only forms specified by the filter
returns:
a list of form names
"""
myValues = []
myForms = self.getForms(filter)
for form in myForms[:]:
myValues.append(form.getAttribute(attribute))
return myValues
def getFrame(self, name):
""" Gets a a frame
parameters:
name - The name or index of the frame
returns:
a frame element
"""
self._wait()
frames = self._ie.Document.frames
destFrames = name.split(".")
if isinstance(name, int):
return frames[name]
else:
j = 0
for destFrame in destFrames:
j += 1
for i in range(frames.length):
fName = frames[i].name
if fName == destFrame:
if j == len(destFrames):
myFrame = frames[i]
self._frameWait(myFrame)
return myFrame
else:
frames = frames[i].document.frames
return None
def getFrameValue(self, name, attribute):
""" Gets the value of an attribute on a frame
parameters:
name - The name of the frame
attribute - The name of the attribute to get the value for
returns:
The value of the attribute
"""
foundElement = self.getFrame(name)
if foundElement == None:
if self.showDebugging: print("** getFrameValue() did not find " + name)
return None
else:
return foundElement.name # can't call getElementValue() here
def getFramesValue(self):
""" Gets the value of an attribute on a frame
parameters:
none
returns:
The list of frame values
"""
self._wait()
l = []
frames = self._ie.Document.frames
for i in range(frames.length):
l.append(frames[i].name) # can't call getAttribute() here
return l
def getIE(self):
""" Get the current IE Application
parameters:
None
returns:
The current IE document
"""
return self._ie
def getImage(self, name):
""" Gets an image
parameters:
name - The id, name, src or index of the image
returns:
an image
"""
if isinstance(name, int):
foundElement = self.findElementByIndex("img", name)
else:
foundElement = self.findElement("img", "id;name;nameProp;src", name)
if foundElement == None:
if self.showDebugging: print("** getImage() did not find " + str(name))
return None
else:
if self.colorHighlight: foundElement.style.backgroundColor = self.colorHighlight
return foundElement
def getImageValue(self, name, attribute):
""" Gets the value of an attribute on a image
parameters:
name - The id, name, value or index of the image, or image element.
attribute - The name of the attribute to get the value for
returns:
The value of the attribute
"""
if isinstance(name, str) or isinstance(name, int):
foundElement = self.getImage(name)
else:
foundElement = name
if foundElement == None:
if self.showDebugging: print("** getImageValue() did not find " + name)
return None
else:
if self.colorHighlight: foundElement.style.backgroundColor = self.colorHighlight
return self.getElementValue(foundElement, attribute)
def getImages(self, filter=None):
""" Gets a list of images
parameters:
[filter] - Get only buttons specified by the filter
returns:
A list of images
"""
return self.getElementsList("img", filter)
def getImagesValue(self, attribute, filter=None):
""" Gets a list of the specified value for the images
parameters:
attribute - The name of the attribute to get the value for
[filter] - Get only images specified by the filter
returns:
A list of image values.
"""
myValues = []
myImages = self.getImages(filter)
for image in myImages[:]:
myValues.append(image.getAttribute(attribute))
return myValues
def getInputElements(self, filter=None):
""" Get all the input elements
parameters:
[filter] - Get only buttons specified by the filter
returns:
A list of input elements
"""
return self.getElementsList("input", filter)
def getLink(self, name):
""" Gets a link
parameters:
name - The id, innerText or index of the link
returns:
an image
"""
if isinstance(name, int):
foundElement = self.findElementByIndex("a", name)
else:
foundElement = self.findElement("a", "id;innerText", name)
if foundElement == None:
if self.showDebugging: print("** getLink() did not find " + name)
return None
else:
if self.colorHighlight: foundElement.style.backgroundColor = self.colorHighlight
return foundElement
def getLinkValue(self, name, attribute):
""" Gets the value of an attribute on a link
parameters:
name - The id, innerText or index of the link, or a link element.
attribute - The name of the attribute to get the value for
returns:
The value of the attribute
"""
if isinstance(name, str) or isinstance(name, int):
foundElement = self.getLink(name)
else:
foundElement = name
if foundElement == None:
if self.showDebugging: print("** getLinkValue() did not find " + name)
return None
else:
if self.colorHighlight: foundElement.style.backgroundColor = self.colorHighlight
return self.getElementValue(foundElement, attribute)
def getLinks(self, filter=None):
""" Gets a list of links
parameters:
[filter] - Get only links specified by the filter
returns:
A list of links
"""
return self.getElementsList("a", filter)
def getLinksValue(self, attribute, filter=None):
""" Gets a list of the specified value for the links
parameters:
attribute - The name of the attribute to get the value for
[filter] - Get only links specified by the filter
returns:
A list of link values.
"""
myValues = []
myLinks = self.getLinks(filter)
for link in myLinks[:]:
myValues.append(link.getAttribute(attribute))
return myValues
def getListBox(self, name):
""" Gets a list box.
parameters:
name - The name or index of the listbox
returns:
A list box
"""
if isinstance(name, int):
foundElement = self.findElementByIndex("select", name)
else:
foundElement = self.findElement("select", "name;id", name)
if foundElement == None:
if self.showDebugging: print("** getListBox() did not find " + name)
return None
else:
if self.colorHighlight: foundElement.style.backgroundColor = self.colorHighlight
return foundElement
def getListBoxItemCount(self, name):
""" Gets a count of selected options associated with a listbox.
parameters:
The name or id of the list box
returns:
The selected text
"""
foundElement = self.findElement("select", "name;id", name)
if foundElement == None:
if self.showDebugging: print("** getListBoxSelected() did not find " + name)
return None
else:
if self.colorHighlight: foundElement.style.backgroundColor = self.colorHighlight
myValues = []
myElements = foundElement.options
count = 0
while count < myElements.length:
count += 1
return count
def getListBoxOptions(self, name):
""" Gets the list of options associated with a listbox.
parameters:
The name or id of the list box
returns:
A list of options
"""
foundElement = self.findElement("select", "name;id", name)
if foundElement == None:
if self.showDebugging: print("** getListBoxOptions() did not find " + name)
return None
else:
if self.colorHighlight: foundElement.style.backgroundColor = self.colorHighlight
myValues = []
count = 0
myElements = foundElement.options
while count < myElements.length:
myValues.append(myElements[count].innerText)
count += 1
return myValues
def getListBoxSelected(self, name):
""" Gets the list of selected options associated with a listbox.
parameters:
The name or id of the list box
returns:
The selected text
"""
foundElement = self.findElement("select", "name;id", name)
if foundElement == None:
if self.showDebugging: print("** getListBoxSelected() did not find " + name)
return None
else:
if self.colorHighlight: foundElement.style.backgroundColor = self.colorHighlight
myValues = []
myElements = foundElement.options
count = 0
while count < myElements.length:
if myElements[count].selected:
myValues.append(myElements[count].innerText)
count += 1
return myValues
def getListBoxValue(self, name, attribute):
""" Gets the value of an attribute on a listbox
parameters:
name - The id, innerText or index of the listbox, or a listbox element.
attribute - The name of the attribute to get the value for
returns:
The value of the attribute
"""
if isinstance(name, str) or isinstance(name, int):
foundElement = self.getListBox(name)
else:
foundElement = name
if foundElement == None:
if self.showDebugging: print("** getListBoxValue() did not find " + name)
return None
else:
if self.colorHighlight: foundElement.style.backgroundColor = self.colorHighlight
return self.getElementValue(foundElement, attribute)
def getPageText(self):
""" Gets the URL, Title and outerHTML
parameters:
None
returns:
a string consisting of:
URL,
Title
Body block
as a string. Unfortunately, IE doesn't give a workable solution to
saving the complete source so this is as good as it gets until
someone brighter comes along. This is useful if you want to compare
against a previous run for QCing purposes.
"""
self._wait()
if self.frameName:
return '%s\n%s\n%s' % (self._ie.LocationURL,
self._ie.LocationName,
self._ie.Document.frames[self.frameName].document.body.outerHTML)
else:
return '%s\n%s\n%s' % (self._ie.LocationURL,
self._ie.LocationName,
self._ie.Document.body.outerHTML)
def getRadioButton(self, name):
""" Gets a radio button by the name. If there are multiple radio buttons
with the same name, the first one found is returned.
parameters:
name - radio button group name or index
returns:
a list values for the group
"""
myElements = self.getElementsList("input", "type=radio")
if isinstance(name, int):
foundElement = self.findElementByIndex("input", name, None, myElements)
else:
foundElement = self.findElement("input", "name", name, myElements)
if foundElement == None:
if self.showDebugging: print("** getRadioButton() did not find " + name)
return None
else:
if self.colorHighlight: foundElement.style.backgroundColor = self.colorHighlight
return foundElement
def getRadioButtonSelected(self, name):
""" Gets a list of selected radio button values for a Radio Button group
parameters:
name - radio button group name
returns:
a list of selected buttons from the group
"""
myValues = []
myElements = self.getElementsList("input", "type=radio;checked=True;name=" + name)
for el in myElements[:]:
myValues.append(el.value)
return myValues
def getRadioButtonValues(self, name):
""" Gets a list of selected radio button values for a Radio Button group
parameters:
name - radio button group name
returns:
a list of selected buttons from the group
"""
myValues = []
myElements = self.getElementsList("input", "type=radio;checked=False;name=" + name)
for el in myElements[:]:
myValues.append(el.value)
return myValues
def getRadioButtons(self, filter=None):
""" Gets all the radio buttons
parameters:
[filter] - Get only radio buttons specified by the filter
returns:
A list of checkboxes
"""
if filter:
filter = "type=radio;" + filter
else:
filter = "type=radio"
return self.getElementsList("input", filter)
def getTable(self, name):
""" Gets a table
parameters:
name - The id or name of the table
returns:
a table
"""
if isinstance(name, int):
foundElement = self.findElementByIndex("table", name, name)
else:
foundElement = self.findElement("table", "id;name", name)
if foundElement == None:
if self.showDebugging: print("** getTable() did not find " + name)
return None
else:
if self.colorHighlight: foundElement.style.backgroundColor = self.colorHighlight
return foundElement
def getTableData(self, name):
""" Gets the data from a table
parameters:
name - The id, name or index of the table, or a table element.
returns:
a string containing all the table data
"""
if isinstance(name, str) or isinstance(name, int):
myTable = self.getTable(name)
else:
myTable = name
myCells = myTable.cells
try:
myData = ""
lastIndex = -1
for myCell in myCells:
if myCell.cellIndex <= lastIndex: myData += "\n"
myData += str(myCell.innerText.strip()) + " "
lastIndex = myCell.cellIndex
return myData
except:
(ErrorType, ErrorValue, ErrorTB) = sys.exc_info()
print(sys.exc_info())
traceback.print_exc(ErrorTB)
return None
def getTableRowIndex(self, name, row):
""" Gets the index of a row in a table.
parameters:
Name - The id, name or index of the table
row[] - The row to search for. Use * to ignore cell.
returns:
index of the row if found
"""
if isinstance(name, str) or isinstance(name, int):
myTable = self.getTable(name)
else:
myTable = name
myCells = myTable.cells
try:
myData = ""
colIndex = 0
cIndex = -1
matches = True
rowIndex = 0
for myCell in myCells:
if myCell.cellIndex <= cIndex:
if matches == True:
return rowIndex
else:
matches = True
rowIndex += 1
colIndex = 0
if row[colIndex] != "*":
foundVal = myCell.innerText.strip()
if foundVal != row[colIndex]:
matches = False
cIndex = myCell.cellIndex
colIndex += 1
return matches
except:
(ErrorType, ErrorValue, ErrorTB) = sys.exc_info()
print(sys.exc_info())
traceback.print_exc(ErrorTB)
return None
def getTableText(self, tableName, rownum, cellnum, frameName=None):
""" getTableData - returns data from a cell in a table
parms:
tableName - name of table
rownum - row number
cellnum - cell number
"""
self._wait()
table = self._ie.Document.getElementsByTagName('table')
if table.length > 0:
table[tableName].rows[rownum].cells[cellnum].style.backgroundColor = 'cyan'
data = table[tableName].rows[rownum].cells[cellnum].innerText
# print "Here:",data
data = data.strip() # strip off any spaces
return data
# except: print "Failed not get the text from the Cell"
else:
print("No Table Found")
def getTables(self, filter=None):
""" Gets a list of tables
parameters:
[filter] - Get only tables specified by the filter
returns:
A list of tables
"""
return self.getElementsList("table", filter)
def getTextArea(self, name):
""" Gets a text area.
parameters:
name - The name, id or index of the textarea
returns:
The text area if found.
"""
if isinstance(name, int):
foundElement = self.findElementByIndex("textarea", name)
else:
foundElement = self.findElement("textarea", "name;id", name)
if foundElement == None:
if self.showDebugging: print("** getTextArea () did not find " + name)
return None
else:
if self.colorHighlight: foundElement.style.backgroundColor = self.colorHighlight
return foundElement
def getTextAreaValue(self, name, attribute):
""" Gets the value of an attribute on a textarea
parameters:
name - The id, name or index of the textarea, or a textarea element.
attribute - The name of the attribute to get the value for
returns:
The value of the attribute
"""
if isinstance(name, str) or isinstance(name, int):
foundElement = self.getTextArea(name)
else:
foundElement = name
if foundElement == None:
if self.showDebugging: print("** getTextArea Value() did not find " + name)
return None
else:
if self.colorHighlight: foundElement.style.backgroundColor = self.colorHighlight
return self.getElementValue(foundElement, attribute)
def getTextAreas(self, filter=None):
""" Gets a list of textareas
parameters:
[filter] - Get only textareas specified by the filter
returns:
A list of textareas
"""
return self.getElementsList("textarea")
def getTextAreasValue(self, attribute, filter=None):
""" Gets a list of the specified value for the textareas
parameters:
attribute - The name of the attribute to get the value for
[filter] - Get only textareas specified by the filter
returns:
A list of link values.
"""
myValues = []
myAreas = self.getTextAreas(filter)
for area in myAreas[:]:
myValues.append(area.getAttribute(attribute))
return myValues
def getTextBox(self, name):
""" Gets a text box.
parameters:
name - The name, id or index of the textbox
returns:
The text area if found.
"""
if isinstance(name, int):
foundElement = self.findElementByIndex("input", name)
else:
foundElement = self.findElement("input", "id;name;value", name)
if foundElement == None:
if self.showDebugging: print("** getTextBox () did not find " + name)
return None
else:
if self.colorHighlight: foundElement.style.backgroundColor = self.colorHighlight
return foundElement
def getTextBoxValue(self, name, attribute):
""" Gets the value of an attribute on a textbox
parameters:
name - The id, name or index of the textbox, or a textbox element
attribute - The name of the attribute to get the value for
returns:
The value of the attribute
"""
if isinstance(name, str) or isinstance(name, int):
foundElement = self.getTextBox(name)
else:
foundElement = name
if foundElement == None:
if self.showDebugging: print("** getTextBox Value() did not find " + name)
return None
else:
if self.colorHighlight: foundElement.style.backgroundColor = self.colorHighlight
return self.getElementValue(foundElement, attribute)
def getTextBoxes(self, filter=None):
""" Gets all the textboxes
parameters:
[filter] - Get only textboxes specified by the filter
returns:
A list of textboxes
"""
if filter:
filter = "type=text;" + filter
else:
filter = "type=text"
return self.getElementsList("input", filter)
def getTextBoxesValue(self, attribute, filter=None):
""" Gets a list of values for the specified attribute
parameters:
attribute - The name of the attribute to get the value for
[filter] - Get only textboxes specified by the filter
returns:
A list of the specified value of the attribute
"""
myValues = []
myBoxes = self.getTextBoxes()
for box in myBoxes[:]:
myValues.append(box.getAttribute(attribute))
return myValues
def goBack(self):
"""
Navigates backward one item in the history list
"""
self._wait()
self._ie.GoBack()
def imageExists(self, name):
""" Checks to see if a image exists in the HTML document. It does not
check to see if the image actually exists on the server.
parameters:
name - The id, name, src or index of the image.
returns:
True if the image is found, else False
"""
myElement = self.getImage(name)
if myElement:
return True
else:
return False
def linkExists(self, name):
""" Checks to see if a link exists
parameters:
name - The id or innerText of the link.
returns:
True if the link is found, else False
"""
myElement = self.getLink(name)
if myElement:
return True
else:
return False
def listBoxUnSelect(self, name, value):
""" Selects an item in a list box.
parameters:
name - The name or id of the listbox
value - The value of the item to select in the list
returns:
True on success, else False
"""
self._wait()
foundElement = self.findElement("select", "name;id", name)
if foundElement == None:
if self.showDebugging: print("** selectListBox() did not find " + name + "-" + str(value))
return False
else:
for el in foundElement:
if el.text == value:
if self.colorHighlight: el.style.backgroundColor = self.colorHighlight
el.selected = False
# foundElement.FireEvent("onChange")
bResult = True
return True
def locationName(self):
""" Gets the location name of the current page. If the resource is an HTML page on the World Wide Web, the name is the title of that page.
If the resource is a folder or file on the network or local computer, the name is the
full path of the folder or file in Universal Naming Convention (UNC) format.
**NOTE** If you have "Hide extensions for known file types" enabled, then of course that is not
returned.
parameters:
None
returns:
The name of the location
"""
self._wait()
return self._ie.LocationName
def locationURL(self):
""" Gets the URL of the current page
parameters:
None
returns:
The URL of the page
"""
self._wait()
return self._ie.LocationURL
def navigate(self, url):
""" Go to the specified URL.
parameters:
url - URL to navigate to
returns:
True on success, else False
"""
try:
self._wait()
self._ie.Navigate(url)
return True
except:
(ErrorType, ErrorValue, ErrorTB) = sys.exc_info()
print(sys.exc_info())
traceback.print_exc(ErrorTB)
return False
def outerHTML(self):
""" Gets the outerHTML
parameters:
None
returns:
a string consisting of:
Body block as a string.
"""
self._wait()
if self.frameName:
return '%s' % (self._ie.Document.frames[self.frameName].document.body.outerHTML)
else:
return '%s' % (self._ie.Document.body.outerHTML)
def pause(self, string="Click to Continue test"):
""" Wait for the user to click a button to continue testing.
parameters:
[string] = Message to display to user
returns:
None
"""
self._wait()
try:
win32gui.MessageBox(0, string, "Pausing test...", 0)
except:
(ErrorType, ErrorValue, ErrorTB) = sys.exc_info()
print(sys.exc_info())
traceback.print_exc(ErrorTB)
else:
return True
def quit(self):
""" Quit the IE browser and close it.
parameters:
None
returns:
True on success, else False
"""
self._wait()
try:
self._ie.Quit()
except:
(ErrorType, ErrorValue, ErrorTB) = sys.exc_info()
print(sys.exc_info())
traceback.print_exc(ErrorTB)
return False
else:
return True
def randomDigits(self, length):
""" Creates a string of random digits.
parameters:
length - The length of the number to be created
returns:
The string of random digits
"""
a = "".join([random.choice(string.digits) for _ in range(length)])
count = a.count(a)
count = 0
while count <= length:
return ''.join(a)
def randomString(self, length):
""" Creates a string of random upper and lower case characters
parameters:
length - The length of the string to be created
returns:
The string of random characters
"""
a = "".join([random.choice(string.letters) for _ in range(length)])
count = a.count(a)
count = 0
while count <= length:
return ''.join(a)
def refresh(self):
""" Refresh the current page in the broswer
parameters:
None
returns:
True on success, else False
"""
self._wait()
try:
self._ie.Refresh()
except:
(ErrorType, ErrorValue, ErrorTB) = sys.exc_info()
print(sys.exc_info())
traceback.print_exc(ErrorTB)
return False
else:
return True
def resize(self, iWidth, iHeight):
"Resize the window"
self._ie.resizeTo(iWidth, iHeight)
def selectListBox(self, name, value):
""" Selects an item in a list box.
parameters:
name - The name or id of the listbox
value - The value of the item to select in the list
returns:
True on success, else False
"""
self._wait()
foundElement = self.findElement("select", "name;id", name)
if foundElement == None:
if self.showDebugging: print("** selectListBox() did not find " + name + "-" + str(value))
return False
else:
for el in foundElement:
if el.text == value:
if self.colorHighlight: el.style.backgroundColor = self.colorHighlight
el.selected = True
# foundElement.FireEvent("onChange")
bResult = True
return True
def setCheckBox(self, name, value):
""" Sets the value of a check box.
parameters:
name - The id, name, or value of the checkbox.
value - 0 for false (not checked)
1 for true (checked)
returns:
True on success, else False
"""
myElements = self.getElementsList("input", "type=checkbox")
return self.setElement("input", "id;name;value", name, "checked", value, None, myElements)
def setElement(self, tag, att, val, setAtt, setVal, element=None, elementList=None):
""" Sets the specified attribute of any element
parameters:
tag - The HTML tag name
att - The tag attribute to search for
val - The attribute value to match
setAtt - The attribute to set
setVal - The values you are setting
[element] - Specify a specific element
[elementList] - Find the element in the passed list
returns:
True on success, else False
"""
if element:
foundElement = element
else:
foundElement = self.findElement(tag, att, val, elementList)
if foundElement == None:
if self.showDebugging: print("** setElement() did not find " + tag + "-" + att + "-" + str(val))
return False
else:
try:
if self.colorHighlight: foundElement.style.backgroundColor = self.colorHighlight
foundElement.focus()
foundElement.blur()
foundElement.setAttribute(setAtt, setVal)
return True
except:
(ErrorType, ErrorValue, ErrorTB) = sys.exc_info()
print(sys.exc_info())
traceback.print_exc(ErrorTB)
return False
def setRadioButton(self, name, value, checked=True):
""" Sets a Radio Button value
parameters:
name - radio button group name
value - Which item to pick by name
[checked] - Check the button, True or False
returns:
True on success, else False
"""
# TODO: Find way to get innerText
myElements = self.getElementsList("input", "type=radio;name=%s" % (name))
for el in myElements[:]:
if el.value == value:
if self.colorHighlight: el.style.backgroundColor = self.colorHighlight
el.checked = checked
el.FireEvent("onClick")
return True
if self.showDebugging: print("** setRadioButton() did not find %s" % (name))
return False
def setTextArea(self, name, value):
""" Sets the text in a textarea.
parameters:
name - The id, name or index of the text area, or a textarea element.
value - The value to set the text area to.
returns:
True on succes, else False
"""
if isinstance(name, str) or isinstance(name, int):
foundElement = self.findElement("textarea", "name;id", name)
else:
foundElement = name
if foundElement == None:
if self.showDebugging: print("** setTextArea() did not find " + name + "-" + str(value))
return False
else:
if self.colorHighlight: foundElement.style.backgroundColor = self.colorHighlight
foundElement.value = value
return True
def setTextBox(self, name, value):
""" Sets the text in a text box.
parameters:
name - The id, name or index of a textbox, or a textbox element.
value - The value to set the textbox to.
returns:
True on succes, else False
"""
if isinstance(name, str) or isinstance(name, int):
foundElement = self.getTextBox(name)
else:
foundElement = name
if foundElement == None:
if self.showDebugging: print("** setTextBox() did not find " + name + "-" + str(value))
return False
else:
if self.colorHighlight: foundElement.style.backgroundColor = self.colorHighlight
foundElement.value = value
return True
def showAllTableText(self):
""" verifies text in a table
"""
self._wait()
# Get tags names table
table = self._ie.Document.getElementsByTagName('table')
# loop thru all the tables
for i in range(table.length):
tablecnt = 0
errortxt = table[i].rows[0].cells[0].innerText
tablecnt = i + 1
errortxt = errortxt.strip()
print("tableNum:%s and Text: %s" % (tablecnt, errortxt))
def showTableText(self, tableName, rownum, cellnum):
""" Print out table index and the innertext
"""
self._wait()
table = self._ie.Document.getElementsByTagName('table')
table[tableName].rows[rownum].cells[cellnum].style.backgroundColor = 'red'
print(table[tableName].rows[rownum].cells[cellnum].innerText)
def showlinkByIndex(self):
links = self._ie.Document.links.length
for i in range(links):
print(i, self._ie.Document.links[i].innertext)
def startTimer(self):
"""
Start time for this timer
"""
self.timer = datetime.datetime.now()
def stop(self):
"""
Cancels any in process navigation
"""
self._wait()
self._ie.Stop()
def stopTimer(self):
"""
Stop timer and calc the time difference
"""
# Wait is very important - wait for the doc to complete
self._wait()
td = datetime.datetime.now() - self.timer
# Calc in seconds, days, and microseconds
# Change to seconds
seconds = td.seconds + td.days * 24 * 60 * 60
# return time
return 'Total time:%s - The time for this script to run was aprox. %s seconds' % (td, seconds)
def submitForm(self, name=None):
""" Submits a form. For proper testing you should submit a form as a user
would, such as clicking the submit button.
parameters:
[name] - name of form
returns:
True on success, else False
"""
try:
if name == None: name = self.formName
foundElement = self.findElement("form", "id;name", name)
if foundElement:
foundElement.submit()
return True
else:
if self.showDebugging: print("** submitForm() did not find the " + name + " form")
return False
except:
(ErrorType, ErrorValue, ErrorTB) = sys.exc_info()
print(sys.exc_info())
traceback.print_exc(ErrorTB)
return False
def tableCellExists(self, tableName, cellText):
""" Checks to see if a cell in a table exists
parameters:
tableName - The id, name or index of the table, or a table element.
cellText - The cell text to search for
returns:
True if the table is found, else False
"""
if isinstance(tableName, str) or isinstance(tableName, int):
myTable = self.getTable(tableName)
else:
myTable = tableName
myCells = myTable.cells
try:
myData = ""
for myCell in myCells:
if myCell.innerText.strip() == cellText:
return True
return False
except:
(ErrorType, ErrorValue, ErrorTB) = sys.exc_info()
print(sys.exc_info())
traceback.print_exc(ErrorTB)
return False
def tableExists(self, name):
""" Checks to see if a table exists
parameters:
name - The id or name of the table
returns:
True if the table is found, else False
"""
myElement = self.getTable(name)
if myElement:
return True
else:
return False
def tableRowExists(self, name, row):
""" Checks to see if a row in a table exists
parameters:
Name - The id, name or index of the table, or a table element.
row[] - The row to search for. Use * to ignore cell.
returns:
True if the table is found, else False
"""
if self.getTableRowIndex(name, row):
return True
else:
return False
def textAreaExists(self, name):
""" Checks to see if a textarea exists
parameters:
name - The name, id or index of the textarea
returns:
True if the textarea is found, else False
"""
myElement = self.getTextArea(name)
if myElement:
return True
else:
return False
def textBoxExists(self, name):
""" Checks to see if a textbox exists
parameters:
name - The name or id of the textbox
returns:
True if the textbox is found, else False
"""
myElement = self.getTextBox(name)
if myElement:
return True
else:
return False
def textBoxValue(self, name):
""" Sets the text in a text box.
parameters:
name - The id, name or index of a textbox, or a textbox element.
value - The value to set the textbox to.
returns:
True on succes, else False
"""
if isinstance(name, str) or isinstance(name, int):
foundElement = self.getTextBox(name)
else:
foundElement = name
if foundElement == None:
if self.showDebugging: print("** setTextBox() did not find " + name)
return False
else:
if self.colorHighlight:
foundElement.style.backgroundColor = self.colorHighlight
#foundElement.value
return foundElement.value
def textFinder(self, text):
"""
Find text on a page then highlites it. It also returns a tru/false
parameters:
text - text to search for
"""
self._wait()
rng = self._ie.Document.body.createTextRange();
if rng.findText(text.strip()) == True:
rng.select()
rng.scrollIntoView()
return True
else:
return False
def writeAttrs(self):
""" WriteScript - Writes out a element attrs.
Parmeters:
frmName - form name
frameName - frame name defaults to none
"""
self._wait()
items = ["input", "select"]
for i in items:
doc = self._ie.Document.getElementsByTagName(i)
for i in range(doc.length):
x = doc[i]
etype = getattr(x, "type")
# Check for Name, ID or value
name = getattr(x, "name", None)
id = getattr(x, "id", None)
value = getattr(x, "value", None)
if etype == "select-one":
print("Type:%s, ID:%s, Value:%s" % (etype, name, value))
elif etype == "select-multiple":
print("Type:%s, ID:%s, Value:%s" % (etype, name, value))
else:
print("Type:%s, ID:%s, Value:%s" % (etype, name, value))
|
[
"ack.dav@gmail.com"
] |
ack.dav@gmail.com
|
39cef36bc0d58167ed758de24878b2696119c184
|
3f6681942d3fdc4b5944349298e434f9ddb24358
|
/binary_neuron/rnn/baseline.py
|
f17896668c22fb3e5503672a5454603292b45eff
|
[] |
no_license
|
danhorvath/BNN
|
b4460fdf70ca5817d136e31f53310ba117f667a8
|
87bdb8803de94d8fbcb9a5439612c3fcf8c184cd
|
refs/heads/master
| 2022-02-17T06:19:14.480886
| 2019-05-28T21:15:24
| 2019-05-28T21:15:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,876
|
py
|
# LSTM for international airline passengers problem with regression framing
import numpy
import matplotlib.pyplot as plt
from pandas import read_csv
import math
import tensorflow as tf
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
# convert an array of values into a dataset matrix
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset) - look_back - 1):
a = dataset[i:(i + look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return numpy.array(dataX), numpy.array(dataY)
# fix random seed for reproducibility
numpy.random.seed(7)
# load the dataset
dataframe = read_csv('../airline_passengers.csv', usecols=[1], engine='python')
dataset = dataframe.values
dataset = dataset.astype('float32')
# normalize the dataset
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)
# split into train and test sets
train_size = int(len(dataset) * 0.67)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size, :], dataset[train_size:len(dataset), :]
# reshape into X=t and Y=t+1
look_back = 1
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
# reshape input to be [samples, time steps, features]
trainX = numpy.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = numpy.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
# create and fit the LSTM network
model = tf.keras.Sequential()
model.add(tf.keras.layers.LSTM(4, input_shape=(1, look_back)))
model.add(tf.keras.layers.Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(trainX, trainY, epochs=100, batch_size=1, verbose=2)
# make predictions
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
# invert predictions
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform([testY])
# calculate root mean squared error
trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:, 0]))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:, 0]))
print('Test Score: %.2f RMSE' % (testScore))
# shift train predictions for plotting
trainPredictPlot = numpy.empty_like(dataset)
trainPredictPlot[:, :] = numpy.nan
trainPredictPlot[look_back:len(trainPredict) + look_back, :] = trainPredict
# shift test predictions for plotting
testPredictPlot = numpy.empty_like(dataset)
testPredictPlot[:, :] = numpy.nan
testPredictPlot[len(trainPredict) + (look_back * 2) + 1:len(dataset) - 1, :] = testPredict
# plot baseline and predictions
plt.plot(scaler.inverse_transform(dataset))
plt.plot(trainPredictPlot)
plt.plot(testPredictPlot)
plt.show()
|
[
"dhorvath19@gmail.com"
] |
dhorvath19@gmail.com
|
ff018bb0afaa9350c99b5870f055c3ee688bedf9
|
3ab86d92139335f212dcb880a4047a20cc79d924
|
/library/visual_studio_code_install_extension.py
|
2f96b4173087aa67173ea1d6605be76a5fcb122f
|
[
"MIT"
] |
permissive
|
sosheskaz/ansible-role-visual-studio-code
|
61c6f4bfbf2102b14633cc0f825e9e169ccb92ae
|
e17ecd5bdf956857a00f181e9dd773bf0cc8a54e
|
refs/heads/master
| 2020-04-04T12:51:07.918613
| 2018-10-31T13:03:29
| 2018-10-31T13:03:29
| 155,939,661
| 0
| 0
|
MIT
| 2018-11-03T01:43:35
| 2018-11-03T01:43:34
| null |
UTF-8
|
Python
| false
| false
| 2,688
|
py
|
#!/usr/bin/python
# Make coding more python3-ish
from __future__ import absolute_import, division, print_function
import os
from ansible.module_utils.basic import AnsibleModule
__metaclass__ = type
def is_extension_installed(module, name):
rc, out, err = module.run_command(['code', '--list-extensions', name])
if rc != 0 or err:
module.fail_json(
msg='Error querying installed extensions [%s]: %s' % (name,
out + err))
lowername = name.lower()
match = next((x for x in out.splitlines() if x.lower() == lowername), None)
return match is not None
def list_extension_dirs(module):
ext_dir = os.path.expanduser(
os.path.join('~', '.vscode', 'extensions'))
ext_dirs = [f for f in os.listdir(
ext_dir) if os.path.isdir(os.path.join(ext_dir, f))]
ext_dirs.sort()
return ext_dirs
def install_extension(module, name):
if is_extension_installed(module, name):
# Use the fact that extension directories names contain the version number
before_ext_dirs = list_extension_dirs(module)
# Unfortunately `--force` suppresses errors (such as extension not found)
rc, out, err = module.run_command(
['code', '--install-extension', '--force', name])
if rc != 0 or err:
module.fail_json(
msg='Error while upgrading extension [%s]: %s' % (name,
out + err))
after_ext_dirs = list_extension_dirs(module)
changed = before_ext_dirs != after_ext_dirs
return changed, 'upgrade'
else:
rc, out, err = module.run_command(
['code', '--install-extension', name])
if rc != 0 or err:
module.fail_json(
msg='Error while installing extension [%s]: %s' % (name,
out + err))
changed = not 'already installed' in out
return changed, 'install'
def run_module():
module_args = dict(
name=dict(type='str', required=True))
module = AnsibleModule(argument_spec=module_args,
supports_check_mode=False)
name = module.params['name']
changed, change = install_extension(module, name)
if changed:
if change == 'upgrade':
msg = '%s was upgraded' % name
else:
msg = '%s is now installed' % name
else:
msg = '%s is already installed' % name
module.exit_json(changed=changed, msg=msg)
def main():
run_module()
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
sosheskaz.noreply@github.com
|
6855bfc73dbfcaccd2958b3fcf2df31a7a79e0a6
|
a6308fa4d2a92a1ee15f7dc2c403010108f0daa3
|
/Practice-TCA/Practice-TCA-2/Task-6.py
|
ec5d5b059fd753c53775b18691f5ca008babedbe
|
[] |
no_license
|
IIvanov29/com404
|
57540d326fb532aa6a3cea06ebf6b92df9650638
|
94974add4a5688ba726d80dbcabf5b8a965c583d
|
refs/heads/master
| 2020-08-01T20:24:35.746513
| 2019-11-28T17:04:22
| 2019-11-28T17:04:22
| 211,105,007
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 932
|
py
|
#Functions
def is_league_united(Hero_1,Hero_2):
if Hero_1 == "superman" and Hero_2 == "wonder woman":
return True
else:
return False
def decide_plan(Hero_1,Hero_2):
league_is_united = is_league_united(Hero_1,Hero_2)
if league_is_united == True:
print("Time to save the world!")
elif league_is_united == False:
print("We must unite the league!")
def run():
print("What is the name of the first Hero?")
Hero_1 = input().lower()
print("What is the name of the second Hero?")
Hero_2 = input().lower()
print("Please select an optiong (league/plan)")
function_option = input().lower()
if function_option == "league":
is_league_united(Hero_1,Hero_2)
print(is_league_united(Hero_1,Hero_2))
elif function_option == "plan":
decide_plan(Hero_1,Hero_2)
else:
print("Invalid command! Please try again.")
run()
|
[
"4Ivani51@solent.ac.uk"
] |
4Ivani51@solent.ac.uk
|
6a049dd3ef506c79f51ddce14afacd9ef7f1dce0
|
3c4b1eb96173414492decec1872360bf8b920f71
|
/Chapter 15/mpl_squares.py
|
c130d7338bb1184f8b7afdbad6b09b09e78b51f5
|
[] |
no_license
|
robbiecares/Python_Crash_Course
|
a7b4d97f6be919ae197c2216a883eb316acf6272
|
a481938bbbb0fba737db2b4a6b0420c0b2766d8a
|
refs/heads/master
| 2023-04-13T20:23:48.196930
| 2021-04-23T11:51:11
| 2021-04-23T11:51:11
| 360,775,932
| 0
| 0
| null | 2021-04-23T09:03:46
| 2021-04-23T05:45:18
|
HTML
|
UTF-8
|
Python
| false
| false
| 668
|
py
|
import matplotlib.pyplot as plt
plt.style.available
#input_values = [1,2,3,4,5]
#squares = [1,4,9,16,25]
x_values = range(1,5001)
y_values = [x**3 for x in x_values]
plt.style.use("seaborn")
fig, ax = plt.subplots()
ax.scatter(x_values, y_values, c=y_values, cmap=plt.cm.Blues, s=10)
#ax.plot(input_values, squares, linewidth = 3)
#Set chart title & label axes
ax.set_title("Cubes", fontsize = 14)
ax.set_xlabel("Value", fontsize = 14)
ax.set_ylabel("Cube of value", fontsize = 14)
#Set size of tick labels
ax.tick_params(axis="both", which = "major", labelsize = 14)
#Set the range for each axis
ax.axis([0,(x_values[-1]),0,y_values[-1]])
plt.show()
#pg. 313
|
[
"robbiecares@gmail.com"
] |
robbiecares@gmail.com
|
2ad7f8907bd282c066e9db3e2553e053f204e9a8
|
a70778e730f6d3e3be04ba449e6ed0a9ff7d7e6d
|
/classifier_5b_rough_fine_tune_from3z.py
|
4e132cf8513dd1cd901bd4a0c5a2f1a6c88b44fc
|
[] |
no_license
|
previtus/two_classes_ml
|
0351e62544cc46f9c09847de641fd84aac94d38b
|
0f780e2e3736e6280dddd25540911d60c9d721d8
|
refs/heads/master
| 2021-05-10T10:05:38.526602
| 2018-08-06T19:59:26
| 2018-08-06T19:59:26
| 118,946,345
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,192
|
py
|
img_size = None #(20,20)
img_size = (150,150)
epochs_first = 10
epochs_second = 40
batch_size = 16
validation_split = 0.3
RESCALE = 1. / 255 # put data from 0-255 into 0-1
# GET ALL DATA
# define the classes in here directly
from data_handling import LOAD_DATASET, LOAD_DATASET_VAL_LONGER_THR2, sample_random_subset_from_list, y_from_x
from data_handling import load_images_with_keras, convert_labels_to_int, convert_back_from_categorical_data, how_many_are_in_each_category
TRAIN_WITH_LONGER_THAN = 1000
TRAIN_C_balanced = 5000
SPLIT = 0.3 # 70% and 30%
FOLDER = 'chillan_saved_images_square_224_ALL_with_len'
folders = ['data/'+FOLDER+'/LP/', 'data/'+FOLDER+'/TR/', 'data/'+FOLDER+'/VT/']
VAL_ONLY_LONGER_THR2 = 1000
BalancedVal = False
StillBalance10to1to1 = True
X_TRAIN_BAL, X_VAL_FULL = LOAD_DATASET_VAL_LONGER_THR2(
TRAIN_WITH_LONGER_THAN, TRAIN_C_balanced, SPLIT, FOLDER, folders, VAL_ONLY_LONGER_THR2,
BalancedVal=BalancedVal,StillBalance10to1to1 = StillBalance10to1to1)
specialname = '__Finetuned'
classes_names = ['LP', 'TR', 'VT']
num_classes = len(classes_names)
labels_texts = classes_names
labels = [0, 1, 2]
DROP=0.2
SUBSET_FOR_TRAIN = 8000
SUBSET_FOR_VAL = 8000
############ Whats bellow doesn't have to be changed dramatically
X_TRAIN_BAL,_ = sample_random_subset_from_list(X_TRAIN_BAL, SUBSET_FOR_TRAIN)
Y_TRAIN_BAL = y_from_x(X_TRAIN_BAL)
X_VAL,_ = sample_random_subset_from_list(X_VAL_FULL, SUBSET_FOR_VAL)
Y_VAL = y_from_x(X_VAL)
from keras.preprocessing.image import load_img, img_to_array
import numpy as np
import keras
from matplotlib import pyplot as plt
print("Loading image data!")
# X_TRAIN_BAL, Y_TRAIN_BAL
x_train = load_images_with_keras(X_TRAIN_BAL, target_size=img_size)
y_train = convert_labels_to_int(Y_TRAIN_BAL, classes_names, labels)
y_train = keras.utils.to_categorical(y_train, num_classes=num_classes)
# X_VAL, Y_VAL
x_test = load_images_with_keras(X_VAL, target_size=img_size)
y_test = convert_labels_to_int(Y_VAL, classes_names, labels)
y_test = keras.utils.to_categorical(y_test, num_classes=num_classes)
print("x_train:", x_train.shape)
print("y_train:", y_train.shape)#, y_train[0:10])
print("x_test:", x_test.shape)
print("y_test:", y_test.shape)#, y_test[0:10])
print("---")
print("SanityCheck Test dist:")
how_many_are_in_each_category(convert_back_from_categorical_data(y_test))
print("SanityCheck Train dist:")
how_many_are_in_each_category(convert_back_from_categorical_data(y_train))
print("---")
x_train *= RESCALE
x_test *= RESCALE
# =============================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ROUGH
from keras import optimizers
from keras.applications import VGG16
vgg_conv = VGG16(weights='imagenet', include_top=False, input_shape=(img_size[0], img_size[1], 3))
print("calculating high lvl features...")
X_bottleneck_train = vgg_conv.predict(x_train)
X_bottleneck_test = vgg_conv.predict(x_test)
print("X_bottleneck_train:", X_bottleneck_train.shape)
print("y_test:", y_train.shape)#, y_train[0:10])
print("X_bottleneck_test:", X_bottleneck_test.shape)
print("y_test:", y_test.shape)#, y_test[0:10])
print("---")
print("train_data.shape[1:]", X_bottleneck_train.shape[1:])
from keras.models import Sequential
from keras.layers import Dropout, Flatten, Dense
classifier_model = Sequential()
classifier_model.add(Flatten(input_shape=X_bottleneck_train.shape[1:]))
classifier_model.add(Dense(256, activation='relu'))
classifier_model.add(Dropout(0.5))
classifier_model.add(Dense(num_classes, activation='sigmoid'))
print("FIRST ROUGH MODEL:")
classifier_model.summary()
#classifier_model.compile(loss='categorical_crossentropy', optimizer=optimizers.RMSprop(lr=1e-4),metrics=['accuracy'])
classifier_model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
# ==============================================================================
# TRAIN 1
# ==============================================================================
#
history1 = classifier_model.fit(X_bottleneck_train, y_train,
batch_size=batch_size,
epochs=epochs_first,
validation_data=(X_bottleneck_test, y_test),
verbose=1)
# Works well, gets us till cca 96% even in 10 epochs (possibly even 5)
# ==============================================================================
# ==============================================================================
# Freeze the layers except the last 4 layers
for layer in vgg_conv.layers[:-4]:
layer.trainable = False
# Check the trainable status of the individual layers
for layer in vgg_conv.layers:
print(layer, layer.trainable)
from keras import models
from keras import layers
# Create the model
fine_model = models.Sequential()
fine_model.add(vgg_conv)
fine_model.add(classifier_model)
print("SECOND FINE MODEL:")
fine_model.summary()
# Compile the model
# TRY other?
#fine_model.compile(loss='categorical_crossentropy', optimizer=optimizers.RMSprop(lr=1e-4),metrics=['accuracy'])
# clip norm didnt help with loss: nan
#fine_model.compile(loss='categorical_crossentropy', optimizer=optimizers.RMSprop(lr=1e-4, clipnorm=1.),metrics=['accuracy'])
#model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) # default lr lr=0.001
# TRY
sgd = optimizers.SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
fine_model.compile(loss='categorical_crossentropy', optimizer=sgd,metrics=['accuracy'])
# ==============================================================================
# TRAIN 2
# ==============================================================================
#
history2 = fine_model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs_second,
validation_data=(x_test, y_test),
verbose=1)
# Whoops, sudden drop to loss: nan
# ==============================================================================
# REPORT
# ==============================================================================
#
#print(history1.history)
#print(history2.history)
split_n = len(history1.history['val_loss'])
# val_loss', 'val_acc', 'loss', 'acc
history1.history['val_loss'] += history2.history['val_loss']
history1.history['val_acc'] += history2.history['val_acc']
history1.history['loss'] += history2.history['loss']
history1.history['acc'] += history2.history['acc']
from visualize_history import visualize_history
plt = visualize_history(history1.history, show_also='acc', show=False, save=False)
#visualize_history(history2.history, show_also='acc', save=False, save_path='classifier5b_'+str(epochs)+'epochs_')
plt.axvline(x=split_n-0.5, linestyle='dashed', color='black')
filename = 'classifier5b_CHILL_'+str(epochs_first)+'+'+str(epochs_second)+'epochs_'
plt.savefig(filename)
plt.show()
fine_model.save('5b_final_fine_model.h5')
|
[
"previtus@gmail.com"
] |
previtus@gmail.com
|
f040cc2c3bcc0b27174802337d61601ed34c13a6
|
38c1e589388752100c4afcbe0b445bfff033bab2
|
/friend/migrations/0003_auto_20200819_1444.py
|
0bf415da2081910e1e2d42a9465ac80b351f2e6a
|
[] |
no_license
|
ruhullahil/Codingwithmitch-Chat
|
02c83f17fd51329fb3e4c0af74f1890ffd7ac012
|
dd854e6357e98684c3fe7c87da028de1f356030b
|
refs/heads/master
| 2023-01-03T00:38:38.225127
| 2020-10-29T21:09:37
| 2020-10-29T21:09:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 538
|
py
|
# Generated by Django 2.2.15 on 2020-08-19 21:44
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('friend', '0002_auto_20200819_1443'),
]
operations = [
migrations.AlterField(
model_name='friendlist',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='user', to=settings.AUTH_USER_MODEL),
),
]
|
[
"mitch@tabian.ca"
] |
mitch@tabian.ca
|
ae677af6e8fa264d763b3a65dddbae88139e63a9
|
fbe8a9f98d9256d47e51589c6d8fad7da07bbe28
|
/VeterinarioSW/ControlDeVeterinaria/forms.py
|
f83a79aad7e5f8b9bf8a56665a3deaeb6cb6fbc1
|
[] |
no_license
|
BryanBonillaFraire/VeterinariaGPS
|
40d77ecf745f6eef3db56391d95e35a5e144d2bc
|
93f245d6a3a5119a4b0902be4ae3cfc3dbfc11b5
|
refs/heads/master
| 2023-02-13T01:00:19.487220
| 2021-01-17T21:51:14
| 2021-01-17T21:51:14
| 304,478,821
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,097
|
py
|
from django import forms
from .models import Mascota, Propietario, PagoServicio, Cita, Producto, PagoProducto, Proveedor, Comprador, Factura, Cirugias, Vacunas, Otros, Enfermedades, Account
class MascotaForm(forms.ModelForm):
class Meta:
model = Mascota
fields = ['nombreMascota', 'especie', 'raza', 'edad', 'propietario']
def __init__(self, *args, **kwargs):
super(MascotaForm, self).__init__(*args, **kwargs)
self.fields['nombreMascota'].widget.attrs['class'] = 'form-control'
self.fields['especie'].widget.attrs['class'] = 'form-control'
self.fields['raza'].widget.attrs['class'] = 'form-control'
self.fields['edad'].widget.attrs['class'] = 'form-control'
self.fields['propietario'].widget.attrs['class'] = 'form-control'
class PropietarioForm(forms.ModelForm):
class Meta:
model = Propietario
fields = ['nombrePropietario', 'telefonoMovil', 'telefonoCasa', 'correoElectronico']
def __init__(self, *args, **kwargs):
super(PropietarioForm, self).__init__(*args, **kwargs)
self.fields['nombrePropietario'].widget.attrs['class'] = 'form-control'
self.fields['telefonoMovil'].widget.attrs['class'] = 'form-control'
self.fields['telefonoCasa'].widget.attrs['class'] = 'form-control'
self.fields['correoElectronico'].widget.attrs['class'] = 'form-control'
class PagoServicioForm(forms.ModelForm):
class Meta:
model = PagoServicio
fields = ['propietario', 'servicios', 'costoUnitario', 'montoTotal','montoPagado','formaPago']
def __init__(self, *args, **kwargs):
super(PagoServicioForm, self).__init__(*args, **kwargs)
self.fields['propietario'].widget.attrs['class'] = 'form-control'
self.fields['servicios'].widget.attrs['class'] = 'form-control'
self.fields['costoUnitario'].widget.attrs['class'] = 'form-control'
self.fields['montoTotal'].widget.attrs['class'] = 'form-control'
self.fields['formaPago'].widget.attrs['class'] = 'form-control'
self.fields['montoPagado'].widget.attrs['class'] = 'form-control'
class CitaForm(forms.ModelForm):
class Meta:
model = Cita
fields = ['mascota', 'motivo', 'fecha']
def __init__(self, *args, **kwargs):
super(CitaForm, self).__init__(*args, **kwargs)
self.fields['mascota'].widget.attrs['class'] = 'form-control'
self.fields['motivo'].widget.attrs['class'] = 'form-control'
self.fields['fecha'].widget.attrs['placeholder'] = 'EJ. 2020-11-02 7:00:00'
self.fields['fecha'].widget.attrs['class'] = 'form-control'
class ProductoForm(forms.ModelForm):
class Meta:
model = Producto
fields = ['nombre', 'tipo', 'marca', 'especificaciones','costoUnitario']
def __init__(self, *args, **kwargs):
super(ProductoForm, self).__init__(*args, **kwargs)
self.fields['nombre'].widget.attrs['class'] = 'form-control'
self.fields['tipo'].widget.attrs['class'] = 'form-control'
self.fields['marca'].widget.attrs['class'] = 'form-control'
self.fields['especificaciones'].widget.attrs['class'] = 'form-control'
self.fields['costoUnitario'].widget.attrs['class'] = 'form-control'
class CirugiasForm(forms.ModelForm):
class Meta:
model = Cirugias
fields = ['mascota', 'cirugia']
def __init__(self, *args, **kwargs):
super(CirugiasForm, self).__init__(*args, **kwargs)
self.fields['mascota'].widget.attrs['class'] = 'form-control'
self.fields['cirugia'].widget.attrs['class'] = 'form-control'
class VacunasForm(forms.ModelForm):
class Meta:
model = Vacunas
fields = ['mascota', 'vacuna']
def __init__(self, *args, **kwargs):
super(VacunasForm, self).__init__(*args, **kwargs)
self.fields['mascota'].widget.attrs['class'] = 'form-control'
self.fields['vacuna'].widget.attrs['class'] = 'form-control'
class EnfermedadesForm(forms.ModelForm):
class Meta:
model = Enfermedades
fields = ['mascota', 'enfermedad']
def __init__(self, *args, **kwargs):
super(EnfermedadesForm, self).__init__(*args, **kwargs)
self.fields['mascota'].widget.attrs['class'] = 'form-control'
self.fields['enfermedad'].widget.attrs['class'] = 'form-control'
class OtrosForm(forms.ModelForm):
class Meta:
model = Otros
fields = ['mascota', 'otro']
def __init__(self, *args, **kwargs):
super(OtrosForm, self).__init__(*args, **kwargs)
self.fields['mascota'].widget.attrs['class'] = 'form-control'
self.fields['otro'].widget.attrs['class'] = 'form-control'
class PagoProductoForm(forms.ModelForm):
class Meta:
model = PagoProducto
fields = ['producto', 'montoTotal', 'formaPago']
def __init__(self, *args, **kwargs):
super(PagoProductoForm, self).__init__(*args, **kwargs)
self.fields['producto'].widget.attrs['class'] = 'form-control'
self.fields['montoTotal'].widget.attrs['class'] = 'form-control'
self.fields['formaPago'].widget.attrs['class'] = 'form-control'
class ProveedorForm(forms.ModelForm):
class Meta:
model = Proveedor
fields = ['nombre', 'rfc', 'domicilio','cp','poblacion','telefono','correo']
def __init__(self, *args, **kwargs):
super(ProveedorForm, self).__init__(*args, **kwargs)
self.fields['nombre'].widget.attrs['class'] = 'form-control'
self.fields['rfc'].widget.attrs['class'] = 'form-control'
self.fields['domicilio'].widget.attrs['class'] = 'form-control'
self.fields['cp'].widget.attrs['class'] = 'form-control'
self.fields['poblacion'].widget.attrs['class'] = 'form-control'
self.fields['telefono'].widget.attrs['class'] = 'form-control'
self.fields['correo'].widget.attrs['class'] = 'form-control'
class CompradorForm(forms.ModelForm):
class Meta:
model = Comprador
fields = ['nombre', 'rfc', 'domicilio','cp','poblacion','telefono','correo']
def __init__(self, *args, **kwargs):
super(CompradorForm, self).__init__(*args, **kwargs)
self.fields['nombre'].widget.attrs['class'] = 'form-control'
self.fields['rfc'].widget.attrs['class'] = 'form-control'
self.fields['domicilio'].widget.attrs['class'] = 'form-control'
self.fields['cp'].widget.attrs['class'] = 'form-control'
self.fields['poblacion'].widget.attrs['class'] = 'form-control'
self.fields['telefono'].widget.attrs['class'] = 'form-control'
self.fields['correo'].widget.attrs['class'] = 'form-control'
class FacturaForm(forms.ModelForm):
class Meta:
model = Factura
fields = ['proveedor', 'comprador', 'numeroSerie','fechaExpedicion','concepto','costoUnitario','montoTotal','tipoImposito']
def __init__(self, *args, **kwargs):
super(FacturaForm, self).__init__(*args, **kwargs)
self.fields['proveedor'].widget.attrs['class'] = 'form-control'
self.fields['comprador'].widget.attrs['class'] = 'form-control'
self.fields['numeroSerie'].widget.attrs['class'] = 'form-control'
self.fields['fechaExpedicion'].widget.attrs['placeholder'] = 'EJ. 2020-11-02'
self.fields['fechaExpedicion'].widget.attrs['class'] = 'form-control'
self.fields['concepto'].widget.attrs['class'] = 'form-control'
self.fields['costoUnitario'].widget.attrs['class'] = 'form-control'
self.fields['montoTotal'].widget.attrs['class'] = 'form-control'
self.fields['tipoImposito'].widget.attrs['class'] = 'form-control'
class AccountForm(forms.ModelForm):
class Meta:
model = Account
fields = ['username', 'password', 'rol']
def __init__(self, *args, **kwargs):
super(AccountForm, self).__init__(*args, **kwargs)
self.fields['username'].widget.attrs['class'] = 'form-control'
self.fields['password'].widget.attrs['class'] = 'form-control'
self.fields['rol'].widget.attrs['class'] = 'form-control'
|
[
"EvilBBF@gmail.com"
] |
EvilBBF@gmail.com
|
7c3de6ac23a5796d7675e6ed3bf8151de5a1c8c6
|
a6b6294dd573e7a8429f6e1817a0598c7b315c5e
|
/examples/finance_vix.py
|
d0a1e8139e68366c05b1e389003532561c2be261
|
[
"MIT"
] |
permissive
|
openknowledge-archive/datapackage-bigquery-py
|
4bef9c960c4efc9131d4673fab1f999f5ae09221
|
f1d822a1846eac4cfcdfd0f9e94bc27d2458f00b
|
refs/heads/master
| 2021-05-31T09:52:09.884572
| 2016-01-30T16:23:02
| 2016-01-30T16:23:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 577
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import sys
from pprint import pprint
sys.path.insert(0, '.')
from examples.base import run
# Fixtures
dataset = 'datapackage'
prefix = 'finance_vix_%s_%s_' % (sys.version_info.major, sys.version_info.minor)
source = 'examples/packages/finance-vix/datapackage.json'
target = 'tmp/packages/finance-vix/datapackage.json'
# Execution
if __name__ == '__main__':
run(dataset, prefix, source, target)
|
[
"roll@post.agency"
] |
roll@post.agency
|
7b2f3ffb266a6b73b251aa0bed91d044d1201bd4
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03162/s990109089.py
|
40f2258c0867493398fd6c13585706e99574813b
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 363
|
py
|
n=int(input())
happines=[list(map(int,input().split())) for _ in range(n)]
solution=[[0,0,0] for _ in range(n)]
solution[0][0]=happines[0][0]
solution[0][1]=happines[0][1]
solution[0][2]=happines[0][2]
for i in range(1,n):
for j in range(3):
solution[i][j]=happines[i][j]+max(solution[i-1][(j+1)%3],solution[i-1][(j+2)%3])
print(max(solution[-1]))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
98e2fd9aca40497bd38d0b356abb49cce8cce88d
|
6be81c38e28732321d4e321c684de833b7b8693c
|
/day_06/line_parser.py
|
06bc6357ab478ee6fd836c1e6912bf4c5cf89572
|
[] |
no_license
|
matyasfodor/advent-of-code-solutions
|
9d8e278afb5b49403fdefba7df06985fdebee213
|
74d942e3e730e4b8363fe082b1026a22169f9a33
|
refs/heads/master
| 2021-01-10T07:25:58.554794
| 2015-12-31T12:33:12
| 2015-12-31T12:33:12
| 47,652,271
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,035
|
py
|
import numpy as np
INCREASE = TURN_ON = 0
DECREASE = TURN_OFF = 1
INCREASE_BY_TWO = TOGGLE = 2
line_part_to_instruction = {
'on': TURN_ON,
'off': TURN_OFF,
'toggle': TOGGLE,
}
class LineParser:
def __init__(self, instruction, selector):
self.instruction = instruction
self.selector = selector
def get_instruction(self):
return self.instruction
def get_selector(self):
return self.selector
@staticmethod
def parse_line(line):
line_parts = line.split(' ')
if line_parts[0] == 'turn':
line_parts = line_parts[1:]
instruction = line_part_to_instruction[line_parts[0]]
start_coords = tuple(int(x) for x in line_parts[1].split(','))
end_coords = tuple(int(x) + 1 for x in line_parts[3].split(','))
x_coords = (start_coords[0], end_coords[0])
y_coords = (start_coords[1], end_coords[1])
selector = np.ix_(range(*x_coords), range(*y_coords))
return LineParser(instruction, selector)
|
[
"matyas.fodor@prezi.com"
] |
matyas.fodor@prezi.com
|
838dcc0ff6a1660d838fe309bd0fbea2ee55a934
|
96facac081446dd7e903523d2514c3bcdebf51c0
|
/src/internal/migrations/0025_secret_extra_view_permissions_and_historicalsecret_extra_view_permissions.py
|
2133b5b4b38428e5f87868afd911b9ebaee6928f
|
[
"MIT"
] |
permissive
|
MAKENTNU/web
|
f49d90cf3e0209edecd9c638bbcd5b01f7c4273a
|
a90ac79f5756721c9a3864658a87fa62633dbc6c
|
refs/heads/main
| 2023-08-09T05:35:21.048504
| 2023-05-10T17:19:40
| 2023-05-10T17:19:40
| 110,425,123
| 12
| 7
|
MIT
| 2023-09-11T16:23:40
| 2017-11-12T10:59:20
|
Python
|
UTF-8
|
Python
| false
| false
| 2,741
|
py
|
# Generated by Django 4.1.7 on 2023-03-24 10:16
from django.db import migrations, models
import django.db.models.deletion
import simple_history.models
class Migration(migrations.Migration):
dependencies = [
("auth", "0012_alter_user_first_name_max_length"),
("internal", "0024_historicalmember_and_historicalmember_committees"),
]
operations = [
migrations.AddField(
model_name="secret",
name="extra_view_permissions",
field=models.ManyToManyField(
blank=True,
help_text="Extra permissions that are required for viewing the secret. If a user does not have all the chosen permissions, it will be hidden to them.",
related_name="secrets_with_extra_view_perm",
to="auth.permission",
verbose_name="extra view permissions",
),
),
migrations.CreateModel(
name="HistoricalSecret_extra_view_permissions",
fields=[
(
"id",
models.BigIntegerField(
auto_created=True, blank=True, db_index=True, verbose_name="ID"
),
),
("m2m_history_id", models.AutoField(primary_key=True, serialize=False)),
(
"history",
models.ForeignKey(
db_constraint=False,
on_delete=django.db.models.deletion.DO_NOTHING,
to="internal.historicalsecret",
),
),
(
"permission",
models.ForeignKey(
blank=True,
db_constraint=False,
db_tablespace="",
null=True,
on_delete=django.db.models.deletion.DO_NOTHING,
related_name="+",
to="auth.permission",
),
),
(
"secret",
models.ForeignKey(
blank=True,
db_constraint=False,
db_tablespace="",
null=True,
on_delete=django.db.models.deletion.DO_NOTHING,
related_name="+",
to="internal.secret",
),
),
],
options={
"verbose_name": "HistoricalSecret_extra_view_permissions",
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
]
|
[
"6058745+ddabble@users.noreply.github.com"
] |
6058745+ddabble@users.noreply.github.com
|
7d1fe2d018dd144e5f2638caf6996debb07587d3
|
f93e5da9f78d3b8bcdd96d2314dd09c8cbaf2336
|
/FastAPI/main/sql_app/main.py
|
66eb5aa4b1bd7948d8ebe29d956e76b6a981ad75
|
[] |
no_license
|
Bahat159/bahat159.github.io
|
f4f7e12031c86c39e358b0ad9f064facedfd1a88
|
ff2f0d037d8f469b69dcd6b9ec3135eab9dde87e
|
refs/heads/master
| 2023-03-15T19:00:40.125849
| 2023-03-11T20:59:09
| 2023-03-11T20:59:09
| 253,337,084
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,954
|
py
|
from typing import List
import crud, models, schemas
from sqlalchemy.orm import Session
from fastapi import Depends, FastAPI, HTTPException,Request, Response
from database import SessionLocal, engine
models.Base.metadata.create_all(bind=engine)
app = FastAPI()
@app.middleware("http")
async def db_session_middleware(request: Request, call_next):
response = Response("Internal server error", status_code=500)
try:
request.state.db = SessionLocal()
response = await call_next(request)
finally:
request.state.db.close()
return response
# Dependency
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
@app.post("/users/", response_model=schemas.User)
def create_user(user: schemas.UserCreate, db: Session = Depends(get_db)):
db_user = crud.get_user_by_email(db, email=user.email)
if db_user:
raise HTTPException(status_code=400, detail="Email already registered")
return crud.create_user(db=db, user=user)
@app.get("/users/", response_model=List[schemas.User])
def read_users(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
users = crud.get_users(db, skip=skip, limit=limit)
return users
@app.get("/users/{user_id}", response_model=schemas.User)
def read_user(user_id: int, db: Session = Depends(get_db)):
db_user = crud.get_user(db, user_id=user_id)
if db_user is None:
raise HTTPException(status_code=404, detail="User not found")
return db_user
@app.post("/users/{user_id}/items/", response_model=schemas.Item)
def create_item_for_user(
user_id: int, item: schemas.ItemCreate, db: Session = Depends(get_db)
):
return crud.create_user_item(db=db, item=item, user_id=user_id)
@app.get("/items/", response_model=List[schemas.Item])
def read_items(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
items = crud.get_items(db, skip=skip, limit=limit)
return items
|
[
"noreply@github.com"
] |
Bahat159.noreply@github.com
|
84b6bcb36e6da3ca1f96cafb6d5fa93b4bfb895f
|
7a69dc9f5a2a1ce43d8f5fe6e9a33e4b6c44f154
|
/pdfsplit.py
|
95e00c1bc995907adcbd99ba2f223465a9a57d86
|
[] |
no_license
|
whxb69/pdf_tools
|
26e21cedb8174aed94a707d54fbc43b2839d4df3
|
f7a2b7dc7a0e87cb23d57604e8049158b1e4aeda
|
refs/heads/master
| 2020-07-06T01:43:17.093634
| 2019-11-23T09:31:37
| 2019-11-23T09:31:37
| 202,849,841
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,253
|
py
|
import PyPDF2
import os
import time
import traceback
start = time.clock()
f = open('outline.ml', 'w', encoding='utf-8')
nameSplit = 'ProblemSolvingAndProgramDesignInC new.pdf'
def getlaveNum(strTmp):
for i in range(len(strTmp)):
if '\t' != strTmp[i]:
return i + 1
def setsub(indexs, level):
# 递归方式遍历目录 生成目录树
sub = []
for i, index in enumerate(indexs):
clevel = getlaveNum(index)
if clevel == level + 1: # 深度增加 递归进入子树
value, page = index.split('@')
sub.append([value, page, setsub(indexs[i + 1:], level + 1)])
elif clevel > level + 1: # 深度增加大于1 跳过 继续寻找同级目录
continue
else: # 深度不变 无子树直接跳出
break
return sub
def settree(indexs):
'''
:param indexs: list -> 整个章节标题页码 value:[\t title,page]
:return: list -> [titel,page,subtree] subtree同为[title,page,subtitle] 为子项subtree为[]
'''
res = []
for i, index in enumerate(indexs):
level = getlaveNum(index)
value, page = index.split('@')
if level == 1:
subtree = setsub(indexs[i + 1:], level)
res.append([value, page, subtree])
return res
def addtag(pdf, filei=None, offset=0):
# print('*起始页为书籍目录第一页在pdf中对应的页码')
if os.path.splitext(pdf)[1] == '.pdf':
# 对当前pdf检测 是否已有目录 是否有匹配目录文件
filep = pdf
title = os.path.splitext(pdf)[0]
print('当前pdf为 ' + title)
pdfobj = open(filep, 'rb')
reader = PyPDF2.PdfFileReader(pdfobj)
outline = reader.outlines
if outline:
return '当前书籍已有目录!'
# print(outline)
# 识别并读取目录文件
if not filei:
filei = title + '.ml'
if not os.path.exists(filei):
return '未找到匹配的目录文件'
fi = open(filei, 'r', encoding='utf-8')
indexs = fi.readlines()
# 建立目录树
tree = settree(indexs)
# 设置偏置页数
# offset = input('请输入起始页:')
writer = PyPDF2.PdfFileWriter()
for i in range(0, reader.numPages):
writer.addPage(reader.getPage(i))
def addmarks(tree, parent):
for value, page, sub in tree:
cur = writer.addBookmark(value, int(page) + int(offset) - 1, parent)
if sub != []:
addmarks(sub, cur)
# 添加目录 设置信号量 失败不储存
save = 0
try:
addmarks(tree, None)
save = 1
except:
print(traceback.print_exc())
save = 0
return (title + ' 失败')
if save == 1:
if '.pdf' in title:
title = title.split('.pdf')[0]
if 'result\\' in title:
title = title.replace('result\\', '')
try:
if os.path.exists('result\\' + title + '_ml.pdf'):
os.remove('result\\' + title + '_ml.pdf')
#最终写结果
with open('result\\' + nameResult + '\\' + title + '.pdf', 'wb') as fout:
writer.write(fout)
print(title + ' 完成')
except:
print('请检查文件是否未关闭并重试')
time.sleep(1)
pdfobj.close()
def main(pdfdir):
# pdfdir = '(Lecture Notes in Computer Science 3418 _ Theoretical Computer Science and General Issues) Ulrik Brandes, Thomas Erlebach (auth.), Ulrik Brandes, Thomas Erlebach (eds.) - Network Analysis_ Methodologi.pdf'
# file = '怎样解题 new.pdf'
pdfobj = open(pdfdir, 'rb')
reader = PyPDF2.PdfFileReader(pdfobj)
lines = reader.outlines
get_data(reader, lines)
pdfsplit(reader)
def pdfsplit(reader):
# TODO:2.重建pdf文件3.重建同时按目录加书签
idx = outline_split()
for chp in idx:
# 重置目录 将全局页码换为章节页码
oldir = r'outline\\%s.ml' % chp
fo = open(oldir, 'r', encoding='utf-8')
lines = fo.readlines()
fo.close()
title, page = lines[0].split('@')
offset = int(page) - 1
fo = open(oldir, 'w', encoding='utf-8')
for line in lines:
line = newline(offset, line)
print(line)
fo.write(line + '\n')
fo.close()
# 重写pdf
start = int(idx[chp].split('@')[1])
if len(idx[chp].split('@')) < 3:
end = reader.getNumPages() + 1
else:
end = int(idx[chp].split('@')[2]) + 1
pdf_writer = PyPDF2.PdfFileWriter()
for index in range(start, end):
pdf_writer.addPage(reader.getPage(index - 1))
for x in ['<','>' ,'/' ,'\\' ,'|', ':' ,'"' ,'*' ,'?']:
if x in title:
title = title.replace(x,' ')
with open('result\\' + title + '.pdf', 'wb') as outfile:
pdf_writer.write(outfile)
# 添加新目录
pdf = 'result\\' + title + '.pdf' # 目标pdf
ml = 'outline\\' + str(chp) + '.ml' # 目录文件
addtag(pdf, ml)
# 删除中间文件
os.remove('result\\' + title + '.pdf')
def outline_split():
f.close()
fm = open('outline.ml', 'r', encoding='utf-8')
lines = fm.readlines()
idx = {}
temp = []
num = 0
for index, line in enumerate(lines):
if index != (len(lines) - 1):
if '\t' not in lines[index + 1]:
temp.append(line)
title, start = temp[0].split('@')
temp = ''.join(temp)
num += 1
idx[num] = title + '@' + start # 用数字存储标题 有的标题无法存为文件名
fc = open('outline\\' + str(num) + '.ml', 'w', encoding='utf-8')
fc.write(temp)
fc.close()
temp = []
else:
temp.append(line)
else:
temp.append(line)
title, start = temp[0].split('@')
temp = ''.join(temp)
num += 1
idx[num] = title + '@' + start
fc = open('outline\\' + str(num) + '.ml', 'w', encoding='utf-8')
fc.write(temp)
fc.close()
for index, item in enumerate(idx):
if index != len(idx) - 1:
end = idx[index + 2].split('@')[1]
end = int(end) - 1
idx[index + 1] = idx[index + 1].strip() + '@' + str(end)
return idx
def newline(offset, line):
title, page = line.split('@')
page = int(page) - offset
return title + '@' + str(page)
def get_data(reader, datas, deep=0):
for index, item in enumerate(datas):
if not isinstance(item, list): # 不是列表 无子目录
if deep > 0 and index > 0 and isinstance(datas[index - 1], list):
deep -= 1 # 刚从子目录跳出
title = item.title.strip()
page = reader.getDestinationPageNumber(item) + 1
tab = '\t' * deep
# print('%s%s@%d' % (tab, title, page))
f.write('%s%s@%d\n' % (tab, title, page))
else: # 是列表 有子目录
deep += 1
get_data(reader, item, deep)
def getResult(strNameSplit):
if ' ' in strNameSplit:
tmp = strNameSplit.split(' ')
else:
tmp = strNameSplit.split('.')
return tmp[0]
if __name__ == '__main__':
nameResult = getResult(nameSplit)
if not os.path.isdir('result'):
os.makedirs('result')
if not os.path.isdir('result\\' + nameResult):
os.makedirs('result\\' + nameResult)
if not os.path.isdir('outline'):
os.makedirs('outline')
main(nameSplit)
elapsed = (time.clock() - start)
print("Time used:", elapsed)
|
[
"noreply@github.com"
] |
whxb69.noreply@github.com
|
e938435982e4bca35a3bbaf1e7c4c35be18545a9
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/182/usersdata/265/105453/submittedfiles/diagonaldominante.py
|
2a3603de9bfd26bb77e24473ed3f5a3d2574df4e
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 500
|
py
|
# -*- coding: utf-8 -*-
import numpy as np
def soma(A):
somalinhas=[]
for i in range (0,A.shape[0],1):
cont=0
for j in range (0,A,shape[1],1):
cont=cont+a[i,j]
somalinhas.append(cont)
return(somalinhas)
linhas=int(input('digite a quantidade de linhas: '))
a=np.zeros((linhas,linhas))
for i in range (0,a.shape[0],1):
for j in range (0,a.shape[1],1):
a[i,j]=float(input('digite os valores da matriz: '))
print(a)
print(diagonal(linhas))
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
f2aa22a6ef30dda30a0016764697e8e17c545936
|
c1451d5bd0b9b559b5742a9c7afd671c030606ec
|
/doc/conf.py
|
88b67367c040e2716cdcc9160f9ec15cae6ce00a
|
[] |
no_license
|
frasanz/Furnivall
|
f9e94a026a28277216bb842acb16727bf9616937
|
b537d8fcfa7741e65cfc8046ae8b62005180cafa
|
refs/heads/master
| 2021-01-17T21:52:43.936793
| 2012-09-26T15:42:40
| 2012-09-26T15:42:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,049
|
py
|
# -*- coding: utf-8 -*-
#
# Furnivall documentation build configuration file, created by
# sphinx-quickstart on Tue Jun 21 15:23:01 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.inheritance_diagram' ]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['ytemplates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Furnivall'
copyright = u'2011, Ibercivis'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['ystatic']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Furnivalldoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Furnivall.tex', u'Furnivall Documentation',
u'Ibercivis', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'furnivall', u'Furnivall Documentation',
[u'Ibercivis'], 1)
]
|
[
"xayon@xayon.net"
] |
xayon@xayon.net
|
6d15ee1b51af0bfb10e9ece54993a82cb555e0dd
|
bf7e43ddc1f50773bb8558c29363c26bd3344727
|
/BlokusAgentSimple.py
|
dc3518fa64192e47b6ddc7e1d72c930f911d5f8e
|
[] |
no_license
|
bananushka/ai2
|
a1c715b04aef1cd389b7599e875bc44af31c4ccd
|
cae32dc2048fdca2c3e26c7efe398d0503e5f349
|
refs/heads/master
| 2021-03-12T23:02:47.563876
| 2014-01-04T22:58:35
| 2014-01-04T22:58:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 173
|
py
|
from BlokusGameAgentExample import BlokusGameAgentExample
class BlokusAgentSimple(BlokusGameAgentExample):
def __init__(self, depth=2):
self.fixedDepth = depth
|
[
"bananushka@gmail.com"
] |
bananushka@gmail.com
|
60adcee8de6d52b0608dd96426bd377901eddd1d
|
2d48377ac0127fb8301dc1754e4fede8e4b5c6ef
|
/showtime.py
|
a71dcb6bc592624bbd843f65dc3407e02c195487
|
[] |
no_license
|
konman1989/cinema
|
eadb41a367ede255c48d6e12d2f4d7932e0595d3
|
27cb1d2ed8a24f1636a92d47f4e90c69bbc443e4
|
refs/heads/master
| 2023-05-25T19:10:12.450532
| 2020-01-14T10:45:54
| 2020-01-14T10:45:54
| 231,745,371
| 0
| 0
| null | 2023-05-22T22:37:07
| 2020-01-04T10:31:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,316
|
py
|
import re
import requests
from bs4 import BeautifulSoup
def fetch_cinema_and_session_id(data: dict) -> list:
"""Getting CinemaID, sessionID from the link provided"""
request = requests.get(data.get('link')).content
soup = BeautifulSoup(request, 'html.parser')
# fetching cinema_id using cinema name
result = soup.find('div', {'data-name': data.get('cinema')})
cinema_id = re.search(r'\d+', str(result)).group()
# fetching data id using date provided
data_selector = ""
for _ in soup.find_all('li'):
if data.get('date') in str(_):
data_selector = _
result = re.search(r'''data-selector="[0-9]*"''', str(data_selector))
data_anchor = re.search(r'\d+', result.group()).group()
# fetching session id and data id
session_data = ''
for _ in soup.find_all('div', {'data-anchor': data_anchor}):
if data.get('time') in str(_) and cinema_id in str(_):
session_data = _
result = re.search(r'\d+-\d+', str(session_data))
session_data = result.group().split('-')
link = f"https://gate.multiplex.ua/site/seats.html?CinemaId=" \
f"{session_data[0]}&SessionId={session_data[1]}&anchor=" \
f"{data_anchor}&back_url={data.get('link')}"
session_data.append(link)
return session_data
|
[
"manoilo.blog@gmail.com"
] |
manoilo.blog@gmail.com
|
8871896d5379ec5750e6fb6433622c846811c30b
|
b8fed8222b41e447cd5ce83513eb4d014c01742b
|
/ad_report_salesadmin/po/po_form.py
|
ae2a831ae88665d254b25eafbddb16d0e61cf761
|
[] |
no_license
|
lajayuhniyarsyah/ERP-Supra
|
e993d8face6e022b6f863d1dff7cb51cda36be8d
|
5a64dbb57ee40070354926700091fb9025c1350c
|
refs/heads/master
| 2021-01-25T22:09:46.306990
| 2017-11-08T05:32:04
| 2017-11-08T05:32:04
| 23,605,825
| 0
| 10
| null | 2017-11-08T05:32:05
| 2014-09-03T03:58:28
|
Python
|
UTF-8
|
Python
| false
| false
| 2,035
|
py
|
import time
from report import report_sxw
from osv import osv,fields
from report.render import render
#from ad_num2word_id import num2word
import pooler
#from report_tools import pdf_fill,pdf_merge
from tools.translate import _
import tools
from tools.translate import _
import decimal_precision as dp
#from ad_amount2text_idr import amount_to_text_id
from tools import amount_to_text_en
class po_form(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(invoice_form, self).__init__(cr, uid, name, context=context)
#if self.pool.get('sale.order').browse(cr, uid, context['active_ids'])[0].state <> 'approved':
# raise osv.except_osv(_('Can not Print PO Form !'), _('You can not Print PO Form If State not Approved'))
#
# self.line_no = 0
self.localcontext.update({
'get_object':self._get_object,
# 'time': time,
# 'convert':self.convert,
# 'get_company_address': self._get_company_address,
# #'angka':self.angka,
## 'alamat': self.alamat_npwp,
# 'convert':self.convert,
# 'charge':self.charge,
## 'nourut': self.no_urut,
## 'get_ppn': self.get_ppn,
# 'line_no':self._line_no,
# 'blank_line':self.blank_line,
# 'blank_line_rfq':self.blank_line_rfq,
# 'get_grand_total':self.get_grand_total,
# 'get_internal':self._get_internal,
# 'sum_tax':self._sum_tax,
# 'get_curr2':self.get_curr,
# 'get_invoice':self._get_invoice,
# 'get_curr':self._get_used_currency,
})
def _get_object(self,data):
obj_data=self.pool.get(data['model']).browse(self.cr,self.uid,[data['id']])
# seq=obj_data[0].print_seq
# seq+=1
# obj_data[0].write({'print_seq':seq})
return obj_data
report_sxw.report_sxw('report.po.form', 'purchase.order', 'ad_report_salesadmin/po/po_form.mako', parser=po_form,header=False)
|
[
"lajayuhni@gmail.com"
] |
lajayuhni@gmail.com
|
d073cf0e510babb4c2329508f3b0d549e0cf3cec
|
0bc2a2963cb72c09c0ec0b3e3b10911c7bc31342
|
/examples/manila/script.py
|
a5f6b5d55011b15a3bcca5fbe09c09f48968cb7b
|
[] |
no_license
|
cloudify-cosmo/cloudify-openstack-plugin
|
eb5730d0b75442e6a49069164fde03020dcca1de
|
7d2cd4162897333adcaab4bd83361bbd369fcf17
|
refs/heads/master
| 2023-09-06T09:10:53.372638
| 2023-03-06T15:02:59
| 2023-03-06T15:02:59
| 18,327,738
| 19
| 75
| null | 2023-03-06T15:03:01
| 2014-04-01T11:52:24
|
Python
|
UTF-8
|
Python
| false
| false
| 391
|
py
|
# For development help:
from manilaclient import client
# Fill in with real values.
manila = client.Client(
client_version='2',
username='admin',
password='openstack',
project_name='demo',
auth_url='http://10.11.12.2/identity',
user_domain_name='Default',
project_domain_name='default')
share_networks = manila.share_networks.list()
shares = manila.shares.list()
|
[
"noreply@github.com"
] |
cloudify-cosmo.noreply@github.com
|
23b5b8241c446c7369de7dc25044362f340ba19b
|
28b874b6c03f8b319cf3e8e87301337958f9a0be
|
/tests/test_variable.py
|
1b00227e78aed0c0cfb42a961aec541e5a7717af
|
[] |
no_license
|
thar/sistema_ecuaciones_EACS_Programaci-n-Extrema
|
5e0467666b1c5005491362b4de34dbb1d217efb2
|
76a88533806b0a4fe1870ce1c6eb222de514fa2d
|
refs/heads/master
| 2020-03-19T06:37:43.403631
| 2018-05-27T11:03:40
| 2018-05-27T11:03:40
| 136,040,452
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,284
|
py
|
import unittest
from mock import patch, Mock
from equationsolver.fraction import Fraction
from equationsolver.variable_builder import VariableBuilder
class VariableTestCase(unittest.TestCase):
def testValue(self):
variable = VariableBuilder().value(3.0).build()
self.assertEqual(variable.value, 3.0)
def testMultiply(self):
variable = VariableBuilder().value(3.0).build()
variable.multiply(2.0)
self.assertEqual(variable.value, 6.0)
def testEqualPositive(self):
variable1 = VariableBuilder().name('x').value(3.0).build()
variable2 = VariableBuilder().name('x').value(3.0).build()
self.assertTrue(variable1.equal(variable2))
def testEqualNegativeValue(self):
variable1 = VariableBuilder().name('x').value(3.0).build()
variable2 = VariableBuilder().name('x').value(3.1).build()
self.assertFalse(variable1.equal(variable2))
self.assertNotEqual(variable1, variable2)
def testEqualNegativeName(self):
variable1 = VariableBuilder().name('x').value(3.0).build()
variable2 = VariableBuilder().name('y').value(3.0).build()
self.assertFalse(variable1.equal(variable2))
self.assertNotEqual(variable1, variable2)
def testEqualNegativeValueAndName(self):
variable1 = VariableBuilder().name('x').value(3.0).build()
variable2 = VariableBuilder().name('y').value(3.1).build()
self.assertFalse(variable1.equal(variable2))
self.assertNotEqual(variable1, variable2)
def testEqualWithConstantSameValueNegative(self):
variable1 = VariableBuilder().value(3.0).build()
variable2 = Mock()
variable2.value = 3.0
variable2.dispatch = Mock(side_effect=lambda x: x.visit_constant(variable2))
self.assertFalse(variable1.equal(variable2))
self.assertNotEqual(variable1, variable2)
def testEqualWithConstantDifferentValueNegative(self):
variable1 = VariableBuilder().value(3.0).build()
variable2 = Mock()
variable2.value = 2.0
variable2.dispatch = Mock(side_effect=lambda x: x.visit_constant(variable2))
self.assertFalse(variable1.equal(variable2))
self.assertNotEqual(variable1, variable2)
def testClon(self):
variable1 = VariableBuilder().build()
variable2 = variable1.clon()
self.assertFalse(variable1 is variable2)
self.assertTrue(variable1.equal(variable2))
def testHasNamePositive(self):
variable1 = VariableBuilder().name('x').build()
self.assertTrue(variable1.has_name('x'))
def testHasNameNegative(self):
variable1 = VariableBuilder().name('x').build()
self.assertFalse(variable1.has_name('y'))
def testHasNameSetPositive(self):
variable1 = VariableBuilder().name('x').build()
name_set = ['x', 'y']
self.assertTrue(variable1.has_name_set(name_set))
def testHasNameSetNegative(self):
variable1 = VariableBuilder().name('x').build()
name_set = ['z', 'y']
self.assertFalse(variable1.has_name_set(name_set))
def testStrVariable(self):
self.assertEqual(str(VariableBuilder().name('x').value(Fraction(1, 2)).build()), '+(1/2)*x')
self.assertEqual(str(VariableBuilder().name('x').value(Fraction(-1, 2)).build()), '-(1/2)*x')
self.assertEqual(str(VariableBuilder().name('x').value(Fraction(1, 1)).build()), '+x')
self.assertEqual(str(VariableBuilder().name('x').value(Fraction(-1, 1)).build()), '-x')
self.assertEqual(str(VariableBuilder().name('x').value(Fraction(-4, 2)).build()), '-2*x')
self.assertEqual(str(VariableBuilder().name('x').value(Fraction(4, 2)).build()), '+2*x')
self.assertEqual(str(VariableBuilder().name('x').value(Fraction(0, 2)).build()), '+0*x')
def testReprVariable(self):
self.assertEqual(repr(VariableBuilder().name('x').value(Fraction(1, 2)).build()), 'Variable(\'x\', Fraction(1, 2))')
@patch('equationsolver.term_visitor.TermVisitor')
def testDispatcher(self, TermVisitor):
variable1 = VariableBuilder().build()
term_visitor = TermVisitor()
variable1.dispatch(term_visitor)
self.assertEqual(term_visitor.visit_variable.call_count, 1)
|
[
"miguel.a.j82@gmail.com"
] |
miguel.a.j82@gmail.com
|
248e026a58b25e727390dcc43ea0a4361135df82
|
cc6b33c24069d6934741d1abfee1b8499497535a
|
/products/forms.py
|
f66ce2c2a57e6e7f0be1d30032fe4dbcc119b589
|
[] |
no_license
|
sukubhattu/django-style-guide
|
b85e34418926a7ec0330f802fa2e0b9f4dc82cd8
|
da4552405c6e1c5f445f30c5a69b64a4ed2b6d74
|
refs/heads/master
| 2023-03-15T06:32:22.814081
| 2021-03-29T07:28:24
| 2021-03-29T07:28:24
| 352,405,748
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 571
|
py
|
from django import forms
from django.forms import fields, widgets
from .models import Product
class ProductForm(forms.ModelForm):
class Meta:
model = Product
fields = '__all__'
widgets = {
'title': forms.TextInput(
attrs={'class': 'heheBoi', 'placeholder': 'Enter you title'}
),
}
# for django pure forms
class RawProductForm(forms.Form):
title = forms.CharField(
widget=forms.TextInput(
attrs={'class': 'heheBoi', 'placeholder': 'Enter your title'}
)
)
|
[
"sukubhattusandesh8@gmail.com"
] |
sukubhattusandesh8@gmail.com
|
7e1ccc3c0c0d628fe5629e0ba6ef33d3b0101291
|
bf0ecad5f2d9853944e6bbc1ab6160359e9a6803
|
/blog/migrations/0001_initial.py
|
d30bdf5599f3883def76510678e4cb1d43d9f3c0
|
[] |
no_license
|
NiteshPidiparars/icoder-blog-post
|
9addc53a83ec916c421ee16de7b04b8035be5d6b
|
19c5a333faf520b4133a0fa5d5ccf37320ed4181
|
refs/heads/master
| 2023-05-28T01:41:11.114065
| 2021-06-16T07:05:48
| 2021-06-16T07:05:48
| 374,288,121
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 683
|
py
|
# Generated by Django 3.2.4 on 2021-06-04 06:36
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('sno', models.AutoField(primary_key=True, serialize=False)),
('title', models.CharField(max_length=255)),
('author', models.CharField(max_length=14)),
('slug', models.CharField(max_length=130)),
('timeStamp', models.DateTimeField(blank=True)),
('content', models.TextField()),
],
),
]
|
[
"niteshpidiparas76@gmail.com"
] |
niteshpidiparas76@gmail.com
|
344513f40b84e70156a271a556a0a7afa60bb84b
|
6febc1719503d0f9dbc97f6b1202116370391b10
|
/public_holiday/models/hr_holidays_inherited_model.py
|
fa5c2a57f2e8a69880f076eb808b1dbb72e214ac
|
[] |
no_license
|
arshakil/Odoo-Development
|
5c6a1795cd64a8ebef5abfdf7d6245804594bcd8
|
df37f6e8c2f7d89cdbdb36d0a8fd501ef8bfe563
|
refs/heads/master
| 2022-12-11T05:17:12.123339
| 2020-07-28T07:38:58
| 2020-07-28T07:38:58
| 248,154,189
| 0
| 2
| null | 2022-12-08T03:51:50
| 2020-03-18T06:20:59
|
Python
|
UTF-8
|
Python
| false
| false
| 3,029
|
py
|
from odoo import models, fields, api, _
from odoo.exceptions import ValidationError
from datetime import date, datetime
from datetime import datetime, timedelta
class Hr_Holidays_inherited_Model(models.Model):
_inherit = 'hr.holidays'
public_holiday=fields.Float(string='Public Holiday In Between',compute='check_public_holiday')
@api.model
def create(self, vals):
holiday_status_id=vals['holiday_status_id']
# print ("vals date_from",vals['date_from'])
# print ('state', vals['state'])
# print ('holiday_status_id is called',holiday_status_id)
if vals['type'] == 'remove':
Is_check_hr_holidays_status= self.env['hr.holidays.status'].search([('id','=',holiday_status_id),('exclude_public_holidays','=',True)])
if Is_check_hr_holidays_status:
if vals['date_from'] and vals['date_to']:
count = 0;
start_date = datetime.strptime(vals['date_from'], '%Y-%m-%d %H:%M:%S').date()
end_date = datetime.strptime(vals['date_to'], '%Y-%m-%d %H:%M:%S').date()
range_of_dates = [start_date + timedelta(days=x) for x in range((end_date - start_date).days + 1)]
for public_holiday_date in range_of_dates:
check_public_holidays = self.env['public_holiday.public_holiday'].search([])
for pub_holiday in check_public_holidays:
if str(public_holiday_date)==pub_holiday.start:
count+=1
else:
pass
set_count=vals['number_of_days_temp']-float(count)
if vals['number_of_days_temp']<1:
vals['number_of_days_temp']=0
vals['public_holiday']=0
else:
vals['number_of_days_temp']=set_count
vals['public_holiday'] = float(count)
return super(Hr_Holidays_inherited_Model, self).create(vals)
else:
return super(Hr_Holidays_inherited_Model, self).create(vals)
@api.depends('date_from', 'date_to')
def check_public_holiday(self):
if self.date_from and self.date_to:
count = 0;
start_date = datetime.strptime(self.date_from, '%Y-%m-%d %H:%M:%S').date()
end_date = datetime.strptime(self.date_to, '%Y-%m-%d %H:%M:%S').date()
range_of_dates = [start_date + timedelta(days=x) for x in range((end_date - start_date).days + 1)]
for public_holiday_date in range_of_dates:
check_public_holidays = self.env['public_holiday.public_holiday'].search([])
for pub_holiday in check_public_holidays:
if str(public_holiday_date) == pub_holiday.start:
count += 1
else:
pass
self.public_holiday=count
|
[
"azizur.rahman363410@gmail.com"
] |
azizur.rahman363410@gmail.com
|
dd30c5254405af64ce994ba786c148924ddf521c
|
fd0194543a142c63812352e79c417e54a19d0cd5
|
/Auxiliary_Scripts/Plot_Relocate.py
|
7633b63d02c80b1e30093bd97aeca0eb93c5d1b2
|
[] |
no_license
|
mwilensky768/MJW-MWA
|
2ac85b8f07577e3112c418595bf62902d720c3c2
|
ebda1e273a401c88f014bc698743547ec86a6f35
|
refs/heads/master
| 2021-05-02T00:51:48.591198
| 2021-03-31T22:34:06
| 2021-03-31T22:34:06
| 78,403,875
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 324
|
py
|
import glob
import shutil
import os
plot_dir = '/Users/mike_e_dubs/MWA/Catalogs/Wenyang_Phase2/data_eva/unflagged/'
target_dir = '/Users/mike_e_dubs/MWA/Catalogs/Wenyang_Phase2/data_eva/frac_diff/'
plots = glob.glob('%s*__INS_frac_diff.png' % (plot_dir))
print(plots)
for plot in plots:
shutil.copy(plot, target_dir)
|
[
"mjw768@uw.edu"
] |
mjw768@uw.edu
|
e8bed198e20aac524cdf4677fc17c963af31386c
|
dccd223528133fe27c809d10533956b796034412
|
/MyMgAcct.py
|
8e28698219ffea8d503fbe7ff6ae93ed1c563270
|
[] |
no_license
|
gggrokthis/Twitter-
|
b22d79c1c7de2d285b48374ef212a1fb2d5c1325
|
f86c5ce365f7454af84baa6e07ba32fdd957a540
|
refs/heads/master
| 2020-03-27T23:11:25.657321
| 2019-07-29T18:30:55
| 2019-07-29T18:30:55
| 147,299,340
| 0
| 0
| null | 2018-09-04T06:59:44
| 2018-09-04T06:37:15
| null |
UTF-8
|
Python
| false
| false
| 6,491
|
py
|
# -*- coding: utf-8 -*-
####2017
#Check for twitter API and other changes if you are looking at this after 2017
#This script will delete tweets identified by tweetid listed in the input file
#from the twitter specified account
#Setup:
# You will need to get the following
# 1. consumer key
# consumer secret token
# access token
# access token secret
# A. To do so, register a twitter application at https://apps.twitter.com
# B. Ensure the application has read AND write access to be able to delete
# C. To use this script register the name of this script as the name of app
# (without the .py part)
# D. It is recommended that you delete the app (and remove its access to
# your profile) when not in use for the sake of security and
# recreate and regenerate the keys and secrets when needed and edit
# the program file as needed (cut and paste will cause minimal errors)
#
#
# 2. A .csv file with at least the 1st 6 fields populated per row
# using the same format and column order as that of the archive file
# generated by Twitter. The easiest way to do so is to use a Twitter
# generated archive file and then edit the file as needed before use
#
# Some necessary edits to use a copy of the twitter archive file:
# A. Remove the header line
# B. Remove any rows for any tweets you do not wish to delete
# C. Trim the file to at most 1000 rows. (Script may handle more rows
# seamlessly which accounts for good error/status reporting but
# I haven’t tested this yet. Never ran into rate limits so far)
# D. Currently the input file name is hardcoded "tweets_to_delete.csv"
# E. Curently the output/logfile name is hardcoded "delstatfile"
# F. You may want to look for strange newlines appearing as column1
# and eliminate them. I use vi/vim and it’s easy to join such lines
# but there are other far better methods available as well.
#
#3. You may want to look at some simple Unix awk and grep scripts I wrote
# to post-process output file to do sanity checks that the desired results
# occurred e.g. Count status ids in both, check for failed messages, use
# some/all failed status ids to manually look up tweet and verify status
# etc. File named “awkex” is checked in.
#
#
# Future: I wrote this quickly to meet my need. If I was trying to do this
# properly the following updates would make sense:
# 1. Use the keys and secrets in a more secure fashion (store in DB
# encrypt or pass as command line parameters etc.)
# 2. Allow the input file name to be a command line parameter
# 3. Allow the error file/output file name to be a command line parameter
# 4. Enforce no size limit on the input file
# 5. Use an awk script wrapper to do all the set up work such as
# generate input files(e.g. by date etc.), version and manage the output files
# 6. For very large file sizes, utilize forked processes / multithreading
# 7. Use scripts to pre-process archive file to remove the strange newlines that
# occasionally cause the csv.reader to pick something other than a status-id
#
# While using the archive file feels like an extra step it makes the code
# run more efficiently for more use cases since
# 1. You get to select which tweets to not delete by removing them from your copy
# of the archive file before executing the code
# 2. There are fewer api calls made as the script doesn't have to first
# read the timeline or otherwise collect the tweetids from the server.
# 3. 1. Above is the only potentially time consuming forced manual step which could
# be semi automated with custom unix scripts run on the .csv archive file as a
# pre-processing step
# 4. You can retain a copy of your archive (pre delete) and have it for posterity
# Note: Archive won’t save your photos and some links may be unusable.
#@requirements:Python 2.5+,Tweepy (http://pypi.python.org/pypi/tweepy/1.7.1)
#@author: Miss Gitanjali GulveSehgal (A.K.A Gigi Sehgal)
#I used oauth_login lines from code samples avaialble online - various authors.
####
#!/usr/bin/python
import csv
import tweepy
import tweepy.error
logfile = open("delstatfile","a")
CONSUMER_KEY = ''
CONSUMER_SECRET = ''
ACCESS_TOKEN = ''
ACCESS_TOKEN_SECRET = ''
def oauth_login(consumer_key, consumer_secret):
####Authenticate with twitter using OAuth
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth_url = auth.get_authorization_url()
auth.set_access_token(ACCESS_TOKEN,ACCESS_TOKEN_SECRET)
return tweepy.API(auth)
def batch_delete(api, del_file):
print "You are about to Delete all selected tweets posted from account @%s . " % api.verify_credentials().screen_name
print "Please confirm all details and only if certain type yes - there is NO UNDO!"
do_delete = raw_input("> ")
errors = 0
if do_delete.lower() == 'yes':
for row in del_file:
print "calling api.destroy_status statusid =", row[0], " created on", row[3], " with text ", row[5]
logfile.write("calling api.destroy_status with statusid = %s created on %s with text %s\n" %(row[0], row[3], row[5]))
try:
api.destroy_status(row[0])
print "Deleted: tweet with statusid = ", row[0]
logfile.write("Deleted: tweet with statusid = %s\n" %(row[0]))
errors = errors
except tweepy.error.TweepError as e:
errors = errors + 1
print "Failed to delete tweet got TweepError with statusid = ", row[0], " created on", row[3], " with text ", row[5]
print "Error code reported : "
print (e.api_code)
logfile.write("Failed to delete: tweet with statusid = %s and error code %d \n" %(row[0], e.api_code))
else:
print "Didn't understand input %s, quitting " % do_delete
errors = errors
return(errors)
mycsvfile = open("tweets_to_delete.csv","r")
csv_reader = csv.reader(mycsvfile, delimiter=',')
print "del_file set to %s " % csv_reader
logfile.write("New batch session started \n")
api = oauth_login(CONSUMER_KEY, CONSUMER_SECRET)
print "Authenticated as: %s" % api.me().screen_name
batch_error = 0
batch_error = batch_delete(api,csv_reader)
if ( batch_error == 0 ) :
print "Deleted all needed tweetids as requested"
else :
print "Found %d errors in bulk delete attempt " % batch_error
|
[
"noreply@github.com"
] |
gggrokthis.noreply@github.com
|
98fe6950dde4c613c1aa84004a4e88ae3fab25b0
|
a937f0263bc95069fa77463cd7b1fcd4c3a83afd
|
/Problem06/main.py
|
a014cb47bcf47d45b3ee5dbc77d37ebc6afd9d8f
|
[] |
no_license
|
Aquarius314/Project-Euler
|
0a1c41e7c9a79c544d4e308eb61ccf391fd615ee
|
ad5b14d81598ce3a9a2d61cde8c007e6d93358b6
|
refs/heads/master
| 2021-01-23T03:42:46.562887
| 2017-06-16T20:48:34
| 2017-06-16T20:48:34
| 86,110,397
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 489
|
py
|
# The sum of the squares of the first ten natural numbers is 385
# The square of the sum of the first ten natural numbers is 3025
# Difference between them is 2640
# Find the difference between the sum of the squares of the
# first one hundred natural numbers and the square of the sum.
# BRUTE
# Solution is 25164150
squared_sum = 0
sum_squares = 0
for i in range(100):
squared_sum += i+1
sum_squares += (i+1)**2
squared_sum = squared_sum**2
print(squared_sum - sum_squares)
|
[
"jakubwojcik.post@gmail.com"
] |
jakubwojcik.post@gmail.com
|
29c7bbf44bdc8b73d1072eac1e09a2addf835026
|
44d88cbdfbe4af402e0323de5af94b96226e60b9
|
/decotimer/constants.py
|
e6f92b0a113b3412a35c4f1f635a7179c64a19dd
|
[] |
no_license
|
Excp281/decotimer
|
3108f5d498f8257b923560d905c4a1481177a2fa
|
ca9789acba96bbbe88f84e53de52ee8871b012f7
|
refs/heads/master
| 2022-11-26T07:56:24.187094
| 2020-07-24T12:09:57
| 2020-07-24T12:09:57
| 282,195,360
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 24
|
py
|
ENABLED = 0
DISABLED = 1
|
[
"pypcdev@gmail.com"
] |
pypcdev@gmail.com
|
b50adbfdbf95e1fba92be2b60782ae7a2e3c2a03
|
cae00cf2d22208668a9df7b06efbb80e76146fd9
|
/HT3.py
|
8933eff7c63e15563cd09fdb96face129df5a423
|
[] |
no_license
|
Leonel1098/Curso-Basico-Python
|
ea711f2569905ce92eac0624bb3893c8b92dfd82
|
cd8819899fb4d6d297e7aff008511dfaaf964f5b
|
refs/heads/main
| 2023-03-20T09:37:19.420754
| 2021-03-13T03:51:52
| 2021-03-13T03:51:52
| 344,327,413
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 639
|
py
|
print("Ingrese la contraseña")
contraseña = input()
print("Confirme la contraseña")
contra = input()
if(contraseña == contra.lower() ):
print("Bienvenido al Sistema")
else:
print("Ingrese la contraseña de nuevo")
print("------------------------------------------------------------")
print("Ingrese su nombre")
name=input()
print("Ingrese su genero")
genero=input()
if genero == "M":
if name.lower() < "m":
grupo = "A"
else:
grupo = "B"
else:
if name.lower() >"n":
grupo = "A"
else:
grupo = "B"
print("Su grupo asignado es " +grupo)
|
[
"noreply@github.com"
] |
Leonel1098.noreply@github.com
|
eb61d968ba311fe56e5bfc26939fb22f35adfe4e
|
86896e2d29c44da3a0746ba631a63379005af038
|
/files.py
|
897cb73085571a770cf92581d91c52a312de2401
|
[] |
no_license
|
pjburkeiii/beacons_f16
|
dfc423ef21dbe892b78b0454a9c9f076397fa3a0
|
f5e05dc60d6c07f96b8b1333b20058f32b64eeef
|
refs/heads/master
| 2020-07-29T11:49:29.322360
| 2016-12-07T00:01:07
| 2016-12-07T00:01:07
| 73,669,910
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,268
|
py
|
import json
import requests
import datetime
from flask import Flask, jsonify, request
app = Flask(__name__)
# YOUR CANVAS KEY
canvasKey = '4511~U5CTjXuSLJKdaAoFTSRv7GeIFd1lGzDYVohzKHeHqUbfW1MTVQ0L1kFX1Fzkjx9Y'
courseID_1 = 36417 #exploration of beacons
#static data
items = { 'b1cfd7baf1349880': 'pi', '0daad9c90f98e60d': 'arduino', '4abe08dd66289027': 'puzzle' }
usersUUID = 'b9407f30f5f8466eaff925556b57fe6d'
users = { 'b9407f30f5f8466eaff925556b57fe6d-13137-13952': 16177, 'b9407f30f5f8466eaff925556b57fe6e-41822-33333': 32159 }
# users: ben, jordan
courses = { 'F3F73797-8720-45A1-9C6A-B105E24D1484~1000~1012': 36417 }
# courses: exploration of beacons
def assoc_itemUUID(uuid):
return items[uuid]
def assoc_courseUUID(uuid, major, minor):
lookupStr = str(uuid) + '~' + str(major) + '~' + str(minor)
if ( lookupStr in courses.keys() ):
return courses[lookupStr]
else:
return None
def assoc_userUUID(uuid, major, minor):
lookupStr = str(uuid) + '-' + str(major) + '-' + str(minor)
if( lookupStr in users.keys() ):
return users[lookupStr]
else:
return None
def get_user_id():
op = {'access_token': canvasKey}
req = requests.get('https://canvas.vt.edu/api/v1/users/self/profile', params=op)
return req.json()['id']
def get_user_name(canvasID):
username = None
op = {'access_token': canvasKey}
req = requests.get('https://canvas.vt.edu/api/v1/courses/' + str(courseID_1) + '/users', params=op)
for i in range(0, len(req.json())):
if(req.json()[i]['id'] == canvasID):
username = req.json()[i]['name']
return username
def get_file_id(courseID, folderPath, fileName):
id = 0
op = {'access_token': canvasKey}
# get folder ID from path
req1 = requests.get( ('https://canvas.vt.edu/api/v1/courses/' + str(courseID) +
'/folders/by_path' + folderPath ), params=op)
# list files in folder
req2 = requests.get( ('https://canvas.vt.edu/api/v1/folders/' +
str(req1.json()[len(req1.json()) - 1]['id']) + '/files' ), params=op)
# get file ID from list
for i in range(0, len(req2.json())):
if(req2.json()[i]['display_name'] == fileName):
id = req2.json()[i]['id']
return id
def get_file_url(fileID):
op = {'access_token': canvasKey}
req = requests.get('https://canvas.vt.edu/api/v1/files/' + str(fileID), params=op)
if( 'url' in req.json().keys() ):
return req.json()['url']
else:
return None
# function looks at the groups for the specified user and returns the groups name (str)
# of the group for the respective course
def get_group_name(courseID, userID):
group = ''
op = {'access_token': canvasKey}
req = requests.get('https://canvas.vt.edu/api/v1/users/' + str(userID) + '/groups/', params=op)
for i in range(0, len(req.json())):
if(req.json()[i]['course_id'] == courseID):
group = req.json()[i]['name']
return group
@app.route('/get_agenda', methods=['GET'])
def return_agenda():
data = {}
# lookup uuid to student id
uuid = request.args.get('uuid')
major = request.args.get('major')
minor = request.args.get('minor')
userID = request.args.get('canvas_id')
print (uuid, major, minor)
print userID
if( (uuid == None or major == None or minor == None) ):
data = {'error': 'invalid parameter set; requires uuid, major, and minor'}
else:
courseID = assoc_courseUUID(uuid, major, minor)
url = get_file_url(get_file_id(courseID, '/demo/', 'syllabus.pdf'))
grp = get_group_name(courseID, userID)
data = { 'url': str(url), 'group_name': str(grp) }
return jsonify(**data)
@app.route('/get_item', methods=['GET'])
def return_item():
data = {}
uuid = request.args.get('uuid')
if( (uuid == None) ):
data = {'error': 'invalid parameter set; requires uuid'}
else:
itemID = assoc_itemUUID(uuid)
# TODO replace these with proper path and files once uploaded
# add front and back
# ***
i_top = get_file_url(get_file_id(courseID_1, ('/' + itemID + '/'), 'top.jpg'))
i_bottom = get_file_url(get_file_id(courseID_1, ('/' + itemID + '/'), 'bottom.jpg'))
i_right = get_file_url(get_file_id(courseID_1, ('/' + itemID + '/'), 'right.jpg'))
i_left = get_file_url(get_file_id(courseID_1, ('/' + itemID + '/'), 'left.jpg'))
i_front = get_file_url(get_file_id(courseID_1, ('/' + itemID + '/'), 'front.jpg'))
i_back = get_file_url(get_file_id(courseID_1, ('/' + itemID + '/'), 'back.jpg'))
data = { 'top': str(i_top), 'bottom': str(i_bottom), 'right': str(i_right),
'left': str(i_left), 'front': str(i_front), 'back': str(i_back) }
return jsonify(**data)
@app.route('/get_user', methods=['GET'])
def return_user():
data = {}
# lookup uuid to student id
uuid = request.args.get('uuid')
major = request.args.get('major')
minor = request.args.get('minor')
if( (uuid == None or major == None or minor == None) ):
data = {'error': 'invalid parameter set; requires uuid, major, and minor'}
elif( assoc_userUUID(uuid, major, minor) == None ):
data = {'error': 'student not found'}
else:
user = assoc_userUUID(uuid, major, minor)
username = get_user_name(user)
data = {'id': user, 'username': username}
return jsonify(**data)
print get_file_url(get_file_id(courseID_1, '/demo/', 'test_file.txt'))
print get_group_name(courseID_1, 'self')
print str(datetime.datetime.now().date())
app.run(host='208.113.133.165', port=5000)
|
[
"p.j.burkeiii@gmail.com"
] |
p.j.burkeiii@gmail.com
|
8a7d55caa81a5c81fa616f5f2ed3c6f0142efd0a
|
69bc23a7baf65b276496d76d02645b5a76cfe083
|
/thu_python_16/program3.py
|
f3332c8639d67bd315206d183acabe04cb6c234d
|
[] |
no_license
|
pylinx64/thu_python_16
|
a12e4ec8f82e6470b496116342b777e0a6676be1
|
617e75618a5a3117ec34278c41dadb38aa39fdb8
|
refs/heads/main
| 2023-04-23T03:17:31.347867
| 2021-05-05T10:09:57
| 2021-05-05T10:09:57
| 336,022,469
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 568
|
py
|
#x=10
#print(x)
#y=10
#print(x+y)
#print('x+y')
#x=20
#z = 10
#print(x+y+z)
#x='Яготинское'
#k='молоко'
#print(x+' '+k)
#print('Яготинское'+' молоко')
#print(k * 143543543)
#print(11 > 10)
#print(8 > 9)
#print(9 != 9)
#print(9 == 9)
#x = 8
#y = 9
#print(x >= y)
#print('a' == 'a')
#print('с' == 'c')
#print('z' > 'a')
password = input('Введите пароль: ')
if 'abc123' == password:
print('Вход выполнен')
else:
print('Невход выполнен 404')
|
[
"noreply@github.com"
] |
pylinx64.noreply@github.com
|
ddd882154d6482285e5cf75620af0ca019e5af41
|
c00cea43929684d5a584a1d102bf081b3d6cd348
|
/19_rest/app.py
|
e8e8e7f44530311ca626bd7291ddca37a99fb769
|
[] |
no_license
|
amelia-chin/achin10
|
2372baca901308115e5e1598c347988f73f66f6c
|
7b0e68d58720dd9e6d4647e09675fe63ba16a3f0
|
refs/heads/master
| 2023-05-02T22:17:53.499873
| 2021-05-18T01:17:54
| 2021-05-18T01:17:54
| 298,331,511
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 827
|
py
|
"""
Team Eggs and Bagels (Amelia Chin, Ari Schechter)
SoftDev
K19 -- A RESTful Journey Skyward // exploration into REST APIs
2021-04-05
"""
from flask import Flask, render_template
import urllib as url
import json
app = Flask(__name__)
API_KEY = open("key_nasa.txt", "r").read()
@app.route("/")
def index():
# this is the HTTP request for the data
data = url.request.urlopen("https://api.nasa.gov/planetary/apod?api_key=" + API_KEY)
# returns the contents of the webpage
readable = data.read()
# returns the JSON data as a dic
d = json.loads(readable)
# accesses the data in the json with key "url"
img = d["url"]
explanation = d["explanation"]
return(render_template("main.html", img_url = img, exp = explanation))
if __name__ == "__main__":
app.debug = True
app.run()
|
[
"achin10@stuy.edu"
] |
achin10@stuy.edu
|
688324093ad3f13ec5d60a2ed5b09953f0e0d9d5
|
5d5d96e06aa643a8f2e868304e2f9e2e928bcfeb
|
/week10-lab/sorthema.py
|
f0c2d43b590d7391f7f8c42a864135e851ca3069
|
[] |
no_license
|
natdjaja/qbb2019-answers
|
59334397a0b475b8e2f17d4f7e1bbd88ad44fbb2
|
0b154e159aae612fe146aed87ce238209e8cba0b
|
refs/heads/master
| 2022-04-13T17:27:12.896664
| 2020-02-14T19:52:29
| 2020-02-14T19:52:29
| 204,489,725
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,819
|
py
|
#!/usr/bin/env python3
import sys
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import pandas as pd
from scipy.cluster.hierarchy import dendrogram, linkage, leaves_list
import seaborn as sns
from sklearn.cluster import KMeans
import scipy.stats as sp
hema = open(sys.argv[1])
df = pd.read_csv(hema, sep = "\t", index_col = "gene", header = 0)
cfu = df['CFU'].values
poly = df["poly"].values
Data = {'x':cfu, 'y': poly}
#print(Data)
df6 = pd.DataFrame(Data, columns =['x','y'])
#print(df6)
linky=linkage(df, 'single', 'euclidean')
leaf= leaves_list(linky)
#print(leaf)
df2 = df.iloc[leaf]
#print(df2)
# fig, ax = plt.subplots()
# sns.heatmap(df2)
df3 = df.transpose()
# link2=linkage(df3, 'single', 'euclidean')
link2=linkage(df3, 'average')
leaf2= leaves_list(link2)
df4 = df.iloc[leaf,:]
df5 = df4.iloc[:,leaf2]
fig,ax =plt.subplots()
plt.pcolor(df)
plt.yticks(np.arange(0.5, len(df.index), 1), df.index)
plt.xticks(np.arange(0.5, len(df.columns), 1), df.columns)
fig.savefig("heatmap.png")
plt.close(fig)
label_list = ["CFU", "poly", "unk", "int", "mys", "mid"]
labels = np.array(label_list)
sort_label = labels[leaf2]
ax1 = dendrogram(link2, labels = sort_label)
plt.savefig('dendogram.png')
plt.close(fig)
fig.savefig("plot.png")
plt.close(fig)
kmeans = KMeans(n_clusters=5).fit(df6)
centroids = kmeans.cluster_centers_
fig, ax = plt.subplots()
plt.scatter(df6['x'], df6['y'], c= kmeans.labels_.astype(float), s=50, alpha=0.5)
plt.scatter(centroids[:, 0], centroids[:, 1], c='red', s=50)
fig.savefig("kmeans.png")
plt.close(fig)
df_data = pd.read_csv(sys.argv[1], sep='\t', header=0, index_col=0)
# diff_exp_high = ((df_data['poly'] + df_data['int']/2))/((df_data['mid'] + df_data['int']/2))>= 2
# diff_exp_low = ((df_data['CFU'] + df_data['unk']/2))/((df_data['mid'] + df_data['int']/2)) <= 0.5
#
#
# diff_exp_genes = df_data[diff_exp_high | diff_exp_low]
#CFU and unk
#poly and int
df_data = pd.read_csv(sys.argv[1], sep='\t', header=0, index_col=0)
diff_exp_high = ((df_data['CFU'] + df_data['unk'])/2)/((df_data['poly'] + df_data['int'])/2) >= 2
diff_exp_low = ((df_data['CFU'] + df_data['unk'])/2)/((df_data['poly'] + df_data['int'])/2) <= 0.5
diff_exp_genes = df_data[diff_exp_high | diff_exp_low]
# for gene_name, row in diff_exp_genes.iterrows():
# sample1 = [row['CFU'], row['unk']]
# sample2 = [row['poly'], row['int']]
# print(gene_name, stats.ttest_ind(sample1, sample2).pvalue)
cfu1 = list(diff_exp_genes["CFU"].values)
poly1 = list(diff_exp_genes["poly"].values)
int1 = list(diff_exp_genes["int"].values)
unk1 = list(diff_exp_genes["unk"].values)
gene_name1 = list(diff_exp_genes.index.values)
l = len(gene_name1)
# print(l)
# print(gene_name1)
sig_de_genes = []
for i in range(l):
early = [cfu1[i], unk1[i]]
late = [poly1[i], int1[i]]
t, p = (sp.ttest_rel(early, late))
if p < 0.05:
sig_de_genes.append(gene_name1[i])
print(sig_de_genes)
labels = list(kmeans.labels_)
genes = list(df_data.index.values)
goi_index = genes.index(sys.argv[2])
goi_cluster = labels[goi_index]
related_genes = []
for i, gene in enumerate(genes):
if labels[i] == goi_cluster:
related_genes.append(gene)
print(related_genes)
# with open('list_of_genes.txt', 'w') as f:
# for item in related_genes:
# f.write("%s," % item)
fig.savefig('plot.png')
# Index= ['aaa', 'bbb', 'ccc', 'ddd', 'eee']
# Cols = ['A', 'B', 'C', 'D']
# df = DataFrame(abs(np.random.randn(5, 4)), index=Index, columns=Cols)
#
# sns.heatmap(df, annot=True)
#
#
#
# # df = np.loadtxt(open(sys.argv[1]), dtype = "str", delimiter = "\t")
# # array = np.loadtxt
#
# data = pd.read_csv(open(sys.argv[1]), delimiter = "\t", header=0)
# print(data)
# gene_array = np.random.rand(10, 12)
# ax = sns.heatmap(gene_array, linewidth=0.5)
# plt.show()
|
[
"nathaliedjaja@gmail.com"
] |
nathaliedjaja@gmail.com
|
d3b6650494506a0738afea264cb7ca7077bc7d2f
|
22b9871352c6aa91b7c028d5ddcf7adaaeac09f9
|
/mbta/__init__.py
|
c7b1be083503a4d879cc25599e2c129b2524f798
|
[
"MIT"
] |
permissive
|
JaredStufft/mbta
|
257e0122029844d27016b419789810eeaf3e2a9b
|
9c497d6718ddfbf8a69de7d3a7e043594d9cf0ef
|
refs/heads/master
| 2022-12-14T20:54:02.893588
| 2019-03-04T17:38:35
| 2019-03-04T17:38:35
| 143,295,378
| 2
| 0
|
MIT
| 2022-12-08T02:20:34
| 2018-08-02T13:01:11
|
Python
|
UTF-8
|
Python
| false
| false
| 15
|
py
|
name = 'mbta'
|
[
"jared@stufft.us"
] |
jared@stufft.us
|
b5000ca9d75fbdbc1f3173b2d37b4b01952505f1
|
83d512f770a7af5de96f711b068095aae21f3875
|
/python/ph.py
|
63201f5d05acb02062ee8238a31edc594b7e71fc
|
[] |
no_license
|
MonkeyTaolun/play_around
|
bcdf5172d03bffd527ac7f51426a205c40c75e29
|
39337c447379c21786788636f9af0557954c11d3
|
refs/heads/master
| 2021-01-10T20:04:11.126923
| 2012-02-28T20:28:04
| 2012-02-28T20:28:04
| 2,185,849
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 169
|
py
|
import urllib
from lxml import html
f=urllib.urlopen("http://www.python.org")
s=f.read()
tree = html.fromstring(s)
[td.text for td in tree.xpath("//td")]
#print tree
|
[
"ctcooll@gmail.com"
] |
ctcooll@gmail.com
|
fdd37d7b786f3d9ca71aff75f5399b323327af37
|
d41d0f5a16dea31186d8e862c538489af91c57f9
|
/plot.py
|
d99c57f1882cb1be7fa6a07788cf7d93d949837a
|
[] |
no_license
|
digitalscientists/scribe
|
7c906c2a8a1b663419bc922b242bc9b16be8766a
|
d6c83e36c5e9181387077bdfdb10433b86cb60b1
|
refs/heads/master
| 2021-08-08T07:51:22.709460
| 2017-11-09T23:12:10
| 2017-11-09T23:12:10
| 107,588,462
| 0
| 0
| null | 2017-10-19T19:19:38
| 2017-10-19T19:19:38
| null |
UTF-8
|
Python
| false
| false
| 758
|
py
|
import matplotlib.pyplot as plt
import sys
import os
import xml.etree.ElementTree as ET
fig = plt.figure()
ax = fig.add_subplot(111)
#os.getcwd() + '/' +
file_path = sys.argv[1]
tree = ET.parse(file_path)
root = tree.getroot()
stroke_sets = root.findall('StrokeSet')
for stroke_set in stroke_sets:
for stroke in stroke_set.findall('Stroke'):
x_points = []
y_points = []
for point in stroke.findall('Point'):
x_points.append(int(point.attrib['x']))
y_points.append(int(point.attrib['y']))
p = ax.plot(x_points, y_points)
ax.set_xlabel('x-points')
ax.set_ylabel('y-points')
plt.gca().set_aspect('equal', adjustable='box')
plt.gca().invert_yaxis()
plt.show()
# plot to see capture data seems to confirm captured data is correct
|
[
"jerry.deng@digitalscientists.com"
] |
jerry.deng@digitalscientists.com
|
cb743f7d80b535f3425bff1576c305628e2398ac
|
454b62b464c9fc280be3e2248eaeb3ea6a6676b1
|
/Tkinter-Asynchronous-Programming/tkinter-after/metodos.py
|
f73ed17ca3553945f9bd5e847c2fb250f64930d6
|
[] |
no_license
|
afhernani/tk-gui-templates
|
2aea9a6cf95a144940656cddd729f1c83cf0ec3a
|
6b23209b7398377f3aa5403ffef0f680741aa469
|
refs/heads/main
| 2023-06-18T23:51:32.369247
| 2021-07-15T19:59:52
| 2021-07-15T19:59:52
| 376,554,045
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,165
|
py
|
import tkinter as tk
from tkinter import ttk
import time
class DigitalClock(tk.Tk):
def __init__(self):
super().__init__()
# configure the root window
self.title('Digital Clock')
self.resizable(0, 0)
self.geometry('250x80')
self['bg'] = 'black'
# change the background color to black
self.style = ttk.Style(self)
self.style.configure(
'TLabel',
background='black',
foreground='red')
# label
self.label = ttk.Label(
self,
text=self.time_string(),
font=('Digital-7', 40))
self.label.pack(expand=True)
# schedule an update every 1 second
self.label.after(1000, self.update)
def time_string(self):
return time.strftime('%H:%M:%S')
def update(self):
""" update the label every 1 second """
self.label.configure(text=self.time_string())
# schedule another timer
self.label.after(1000, self.update)
if __name__ == "__main__":
clock = DigitalClock()
clock.mainloop()
|
[
"afernani@gmail.com"
] |
afernani@gmail.com
|
dde36e2eae98fd6ebba3dc430abdd47affdd0f65
|
a3e7583b70584f62554c5969a9963ba79afd7ac3
|
/check.py
|
b7630edc09bab8c7a639472f47604386f4a53a32
|
[
"MIT"
] |
permissive
|
foamliu/DeepRankIQA
|
4f677a2fe1912b16cf4bbcc05c8571d46260711f
|
7801cb4ff2c934a9d954ace9ad52600f96396125
|
refs/heads/master
| 2020-09-03T06:02:05.896210
| 2019-11-05T01:30:27
| 2019-11-05T01:30:27
| 219,402,631
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 717
|
py
|
import os
import pickle
import cv2 as cv
from tqdm import tqdm
from config import data_file, image_folder
if __name__ == "__main__":
with open(data_file, 'rb') as f:
samples = pickle.load(f)
filenames = set()
for sample in tqdm(samples):
before = sample['before']
fullpath = os.path.join(image_folder, before)
img = cv.imread(fullpath)
assert (img is not None)
filenames.add(before)
after = sample['after']
fullpath = os.path.join(image_folder, before)
img = cv.imread(fullpath)
assert (img is not None)
filenames.add(after)
num_samples = len(list(filenames))
print('num_samples: ' + str(num_samples))
|
[
"liuyang12@focusmedia.cn"
] |
liuyang12@focusmedia.cn
|
753f83705d4b14450b57358743f60359fde6545a
|
4a6a1e1ce876118bb0bda46181c457ad1dbc9320
|
/dataAugment.py
|
31a7c707934bce1e59f965f7b4abce42fc69b71d
|
[] |
no_license
|
MDrance/Filbi-project
|
c9b143e8447516b0a23b12b346b878bd02c60864
|
0729748706b08c82aee0b80224153288f5553576
|
refs/heads/main
| 2023-01-10T03:21:38.350036
| 2020-11-17T17:03:05
| 2020-11-17T17:03:05
| 313,684,520
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 617
|
py
|
import glob
import cv2
import os
import random
import shutil
def data_augmentation():
print("DATA AUGMENTATION ...")
target = "./augmented_data/"
if os.path.isdir(target):
shutil.rmtree(target)
os.mkdir("./augmented_data/")
for file in glob.glob("data/*.jpg"):
shutil.copy(file, target)
base = os.path.basename(file)
name_file = os.path.splitext(base)
image = io.imread(file)
rotated = rotate(image, random.choice([15, -15]))
resized = cv2.resize(rotated, (250,250))
io.imsave(target+name_file[0]+"_1"+name_file[1], resized)
|
[
"martindrance@hotmail.com"
] |
martindrance@hotmail.com
|
053d272a8dd615e354067ac7a039ec23434eea10
|
3e4de43ddfef39f79c9e06e78c7789ded0855001
|
/thola_client/models/check_identify_request.py
|
19d6b5d4362f0bc176207aa734c1a86e8d276bfe
|
[
"BSD-2-Clause"
] |
permissive
|
inexio/thola-client-module-python
|
9721f27b1305b25445c4e2594b134cfd17ed7641
|
f9a6812885738e33b1aed43ca55335b71e3d2b2d
|
refs/heads/main
| 2023-04-30T06:55:25.015950
| 2021-05-19T11:05:26
| 2021-05-19T11:05:26
| 358,176,683
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,563
|
py
|
# coding: utf-8
"""
Thola
REST API for Thola. For more information look at our Github : https://github.com/inexio/thola # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from thola_client.configuration import Configuration
class CheckIdentifyRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'device_data': 'DeviceData',
'expectations': 'Device',
'json_metrics': 'bool',
'model_diff_warning': 'bool',
'model_series_diff_warning': 'bool',
'os_diff_warning': 'bool',
'os_version_diff_warning': 'bool',
'print_performance_data': 'bool',
'serial_number_diff_warning': 'bool',
'timeout': 'int',
'vendor_diff_warning': 'bool'
}
attribute_map = {
'device_data': 'device_data',
'expectations': 'expectations',
'json_metrics': 'json_metrics',
'model_diff_warning': 'model_diff_warning',
'model_series_diff_warning': 'model_series_diff_warning',
'os_diff_warning': 'os_diff_warning',
'os_version_diff_warning': 'os_version_diff_warning',
'print_performance_data': 'print_performance_data',
'serial_number_diff_warning': 'serial_number_diff_warning',
'timeout': 'timeout',
'vendor_diff_warning': 'vendor_diff_warning'
}
def __init__(self, device_data=None, expectations=None, json_metrics=None, model_diff_warning=None, model_series_diff_warning=None, os_diff_warning=None, os_version_diff_warning=None, print_performance_data=None, serial_number_diff_warning=None, timeout=None, vendor_diff_warning=None, _configuration=None): # noqa: E501
"""CheckIdentifyRequest - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._device_data = None
self._expectations = None
self._json_metrics = None
self._model_diff_warning = None
self._model_series_diff_warning = None
self._os_diff_warning = None
self._os_version_diff_warning = None
self._print_performance_data = None
self._serial_number_diff_warning = None
self._timeout = None
self._vendor_diff_warning = None
self.discriminator = None
if device_data is not None:
self.device_data = device_data
if expectations is not None:
self.expectations = expectations
if json_metrics is not None:
self.json_metrics = json_metrics
if model_diff_warning is not None:
self.model_diff_warning = model_diff_warning
if model_series_diff_warning is not None:
self.model_series_diff_warning = model_series_diff_warning
if os_diff_warning is not None:
self.os_diff_warning = os_diff_warning
if os_version_diff_warning is not None:
self.os_version_diff_warning = os_version_diff_warning
if print_performance_data is not None:
self.print_performance_data = print_performance_data
if serial_number_diff_warning is not None:
self.serial_number_diff_warning = serial_number_diff_warning
if timeout is not None:
self.timeout = timeout
if vendor_diff_warning is not None:
self.vendor_diff_warning = vendor_diff_warning
@property
def device_data(self):
"""Gets the device_data of this CheckIdentifyRequest. # noqa: E501
:return: The device_data of this CheckIdentifyRequest. # noqa: E501
:rtype: DeviceData
"""
return self._device_data
@device_data.setter
def device_data(self, device_data):
"""Sets the device_data of this CheckIdentifyRequest.
:param device_data: The device_data of this CheckIdentifyRequest. # noqa: E501
:type: DeviceData
"""
self._device_data = device_data
@property
def expectations(self):
"""Gets the expectations of this CheckIdentifyRequest. # noqa: E501
:return: The expectations of this CheckIdentifyRequest. # noqa: E501
:rtype: Device
"""
return self._expectations
@expectations.setter
def expectations(self, expectations):
"""Sets the expectations of this CheckIdentifyRequest.
:param expectations: The expectations of this CheckIdentifyRequest. # noqa: E501
:type: Device
"""
self._expectations = expectations
@property
def json_metrics(self):
"""Gets the json_metrics of this CheckIdentifyRequest. # noqa: E501
:return: The json_metrics of this CheckIdentifyRequest. # noqa: E501
:rtype: bool
"""
return self._json_metrics
@json_metrics.setter
def json_metrics(self, json_metrics):
"""Sets the json_metrics of this CheckIdentifyRequest.
:param json_metrics: The json_metrics of this CheckIdentifyRequest. # noqa: E501
:type: bool
"""
self._json_metrics = json_metrics
@property
def model_diff_warning(self):
"""Gets the model_diff_warning of this CheckIdentifyRequest. # noqa: E501
:return: The model_diff_warning of this CheckIdentifyRequest. # noqa: E501
:rtype: bool
"""
return self._model_diff_warning
@model_diff_warning.setter
def model_diff_warning(self, model_diff_warning):
"""Sets the model_diff_warning of this CheckIdentifyRequest.
:param model_diff_warning: The model_diff_warning of this CheckIdentifyRequest. # noqa: E501
:type: bool
"""
self._model_diff_warning = model_diff_warning
@property
def model_series_diff_warning(self):
"""Gets the model_series_diff_warning of this CheckIdentifyRequest. # noqa: E501
:return: The model_series_diff_warning of this CheckIdentifyRequest. # noqa: E501
:rtype: bool
"""
return self._model_series_diff_warning
@model_series_diff_warning.setter
def model_series_diff_warning(self, model_series_diff_warning):
"""Sets the model_series_diff_warning of this CheckIdentifyRequest.
:param model_series_diff_warning: The model_series_diff_warning of this CheckIdentifyRequest. # noqa: E501
:type: bool
"""
self._model_series_diff_warning = model_series_diff_warning
@property
def os_diff_warning(self):
"""Gets the os_diff_warning of this CheckIdentifyRequest. # noqa: E501
:return: The os_diff_warning of this CheckIdentifyRequest. # noqa: E501
:rtype: bool
"""
return self._os_diff_warning
@os_diff_warning.setter
def os_diff_warning(self, os_diff_warning):
"""Sets the os_diff_warning of this CheckIdentifyRequest.
:param os_diff_warning: The os_diff_warning of this CheckIdentifyRequest. # noqa: E501
:type: bool
"""
self._os_diff_warning = os_diff_warning
@property
def os_version_diff_warning(self):
"""Gets the os_version_diff_warning of this CheckIdentifyRequest. # noqa: E501
:return: The os_version_diff_warning of this CheckIdentifyRequest. # noqa: E501
:rtype: bool
"""
return self._os_version_diff_warning
@os_version_diff_warning.setter
def os_version_diff_warning(self, os_version_diff_warning):
"""Sets the os_version_diff_warning of this CheckIdentifyRequest.
:param os_version_diff_warning: The os_version_diff_warning of this CheckIdentifyRequest. # noqa: E501
:type: bool
"""
self._os_version_diff_warning = os_version_diff_warning
@property
def print_performance_data(self):
"""Gets the print_performance_data of this CheckIdentifyRequest. # noqa: E501
:return: The print_performance_data of this CheckIdentifyRequest. # noqa: E501
:rtype: bool
"""
return self._print_performance_data
@print_performance_data.setter
def print_performance_data(self, print_performance_data):
"""Sets the print_performance_data of this CheckIdentifyRequest.
:param print_performance_data: The print_performance_data of this CheckIdentifyRequest. # noqa: E501
:type: bool
"""
self._print_performance_data = print_performance_data
@property
def serial_number_diff_warning(self):
"""Gets the serial_number_diff_warning of this CheckIdentifyRequest. # noqa: E501
:return: The serial_number_diff_warning of this CheckIdentifyRequest. # noqa: E501
:rtype: bool
"""
return self._serial_number_diff_warning
@serial_number_diff_warning.setter
def serial_number_diff_warning(self, serial_number_diff_warning):
"""Sets the serial_number_diff_warning of this CheckIdentifyRequest.
:param serial_number_diff_warning: The serial_number_diff_warning of this CheckIdentifyRequest. # noqa: E501
:type: bool
"""
self._serial_number_diff_warning = serial_number_diff_warning
@property
def timeout(self):
"""Gets the timeout of this CheckIdentifyRequest. # noqa: E501
Timeout for the request (0 => no timeout) # noqa: E501
:return: The timeout of this CheckIdentifyRequest. # noqa: E501
:rtype: int
"""
return self._timeout
@timeout.setter
def timeout(self, timeout):
"""Sets the timeout of this CheckIdentifyRequest.
Timeout for the request (0 => no timeout) # noqa: E501
:param timeout: The timeout of this CheckIdentifyRequest. # noqa: E501
:type: int
"""
self._timeout = timeout
@property
def vendor_diff_warning(self):
"""Gets the vendor_diff_warning of this CheckIdentifyRequest. # noqa: E501
:return: The vendor_diff_warning of this CheckIdentifyRequest. # noqa: E501
:rtype: bool
"""
return self._vendor_diff_warning
@vendor_diff_warning.setter
def vendor_diff_warning(self, vendor_diff_warning):
"""Sets the vendor_diff_warning of this CheckIdentifyRequest.
:param vendor_diff_warning: The vendor_diff_warning of this CheckIdentifyRequest. # noqa: E501
:type: bool
"""
self._vendor_diff_warning = vendor_diff_warning
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(CheckIdentifyRequest, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CheckIdentifyRequest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, CheckIdentifyRequest):
return True
return self.to_dict() != other.to_dict()
|
[
"philipp.baus.studium@googlemail.com"
] |
philipp.baus.studium@googlemail.com
|
be89afab0a0a8125be3fcc85bbcf78d59a8b7933
|
b7d61afe530f603e1f0d6e269e6cd46a4911c51f
|
/Basic.py
|
f068ba4f0bb1b03ee7a2b952f12279b4a0a7e222
|
[] |
no_license
|
kashiv15/Cache---Performance
|
e98866a11101f3e12b3a04fd3352cb9e1ae7af25
|
99c33c5713b3d010bce1c4dc6f061635adfb95ad
|
refs/heads/master
| 2023-01-20T22:25:25.606853
| 2020-11-29T20:30:11
| 2020-11-29T20:30:11
| 289,562,473
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,098
|
py
|
import m5
from m5.objects import *
system=System()
system.clk_domain=SrcClockDomain()
system.clk_domain.clock='3.4GHz'
system.clk_domain.voltage_domain=VoltageDomain()
system.mem_mode='timing'
system.mem_ranges=[AddrRange('8192MB')]
system.cpu=TimingSimpleCPU()
system.membus=SystemXBar()
system.cpu.icache_port=system.membus.slave
system.cpu.dcache_port=system.membus.slave
system.cpu.createInterruptController()
system.cpu.interrupts[0].pio=system.membus.master
system.cpu.interrupts[0].int_master=system.membus.slave
system.cpu.interrupts[0].int_slave=system.membus.master
system.system_port=system.membus.slave
system.mem_ctrl=DDR3_1600_8x8()
system.mem_ctrl.range=system.mem_ranges[0]
system.mem_ctrl.port=system.membus.master
process=Process()
process.cmd=['/home/kashi/Desktop/micro/cs251a-microbench/merge']
#change dir to benchmark
system.cpu.workload=process
system.cpu.createThreads()
root=Root(full_system=False,system=system)
m5.instantiate()
print ("Begin")
exit_event=m5.simulate()
print ("Exiting @ tick %i because %s" % (m5.curTick(),exit_event.getCause()))
|
[
"noreply@github.com"
] |
kashiv15.noreply@github.com
|
c5920946fc2c9d9c50f213f3b22487f73946bd0b
|
c4d7ae80706b8ee954e671df83de673b8ab4ec8b
|
/cms34/resources/themes/fields.py
|
0a452c68590f67c767dbc4eb40abf61af8cc23df
|
[] |
no_license
|
motor23/cms34
|
b63c7cf6e493d78f25f7ee09510833570742cd8a
|
229887486bcb12af7ad70d764f05b73acb7eda81
|
refs/heads/master
| 2021-01-17T14:02:24.511669
| 2019-03-06T08:08:50
| 2019-03-06T08:08:50
| 31,367,780
| 0
| 2
| null | 2019-03-06T08:08:51
| 2015-02-26T13:16:30
|
Python
|
UTF-8
|
Python
| false
| false
| 243
|
py
|
# -*- coding: utf8 -*-
from ...mixed import (
XF_StreamSelect,
)
class XF_Themes(XF_StreamSelect):
name = 'themes'
label = u'Темы'
model = 'Theme'
stream_name = 'themes'
multiple = True
xf_themes = XF_Themes()
|
[
"motormen@gmail.com"
] |
motormen@gmail.com
|
3749bb5ed6c01cc9d93af2315596fea184b724b5
|
a4838d7572b91427f62a62928764b3733b3362c8
|
/Chapter11/Exercise11.04/test_11_4.py
|
6630d15301782e70f233a0ead05ea5b75c6affd5
|
[] |
no_license
|
Evgeneom/The-Statistics-and-Calculus-with-Python-Workshop
|
2648cc60bee446e56ffc90da7a00e80e2407c2c7
|
d57ef0f480ba1b418a904572bd18cd15ad8a837c
|
refs/heads/master
| 2023-03-18T23:43:19.971680
| 2021-01-21T12:11:33
| 2021-01-21T12:11:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 479
|
py
|
import unittest
import sys
import import_ipynb
from Finding_the_Length_of_Insulation_in_a_Roll import *
class Test(unittest.TestCase):
def test_r(self):
self.assertAlmostEqual(r(24*pi),7.6,1)
def test_opposite(self):
self.assertAlmostEqual(opposite(r(0),r(0.0001),0.0001),0.0002,4)
def test_trap_integral(self):
self.assertAlmostEqual(spiral(r,0,2*pi*23.5),1107.5,1)
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
Evgeneom.noreply@github.com
|
fbcf5b7d508ad8a33e8e303f73759b7d5782c4e0
|
30a6975de792d613db836346ff758a7c0797d400
|
/lldb/test/API/lang/swift/parseable_interfaces/shared/TestSwiftInterfaceNoDebugInfo.py
|
075df0dd1fb74f689568a004da5f267648814dee
|
[
"NCSA",
"Apache-2.0",
"LLVM-exception"
] |
permissive
|
WYK15/swift-Ollvm11
|
0a2aa1b216c8e3f38829ae16db846039e8de149e
|
b28dba1ebe1186790650c72d5e97d8b46f1bc6e0
|
refs/heads/main
| 2023-06-27T18:14:47.652175
| 2021-06-10T12:47:56
| 2021-06-10T12:47:56
| 367,350,198
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,467
|
py
|
# TestSwiftInterfaceNoDebugInfo.py
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2019 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# -----------------------------------------------------------------------------
"""
Test that we load and handle swift modules that only have textual
.swiftinterface files -- i.e. no associated .swiftmodule file -- and no debug
info. The module loader should generate the .swiftmodule for any
.swiftinterface it finds unless it is already in the module cache.
"""
import glob
import lldb
from lldbsuite.test.lldbtest import *
from lldbsuite.test.decorators import *
import lldbsuite.test.lldbutil as lldbutil
import os
import os.path
import unittest2
class TestSwiftInterfaceNoDebugInfo(TestBase):
mydir = TestBase.compute_mydir(__file__)
@swiftTest
def test_swift_interface(self):
"""Test that we load and handle modules that only have textual .swiftinterface files"""
self.build()
self.do_test()
@swiftTest
def test_swift_interface_fallback(self):
"""Test that we fall back to load from the .swiftinterface file if the .swiftmodule is invalid"""
self.build()
# Install invalid modules in the build directory first to check we
# still fall back to the .swiftinterface.
modules = ['AA.swiftmodule', 'BB.swiftmodule', 'CC.swiftmodule']
for module in modules:
open(self.getBuildArtifact(module), 'w').close()
self.do_test()
@swiftTest
@skipUnlessPlatform(["macosx"])
def test_prebuilt_cache_location(self):
"""Verify the prebuilt cache path is correct"""
self.build()
log = self.getBuildArtifact("types.log")
self.runCmd('log enable lldb types -f "%s"' % log)
# Set a breakpoint in and launch the main executable so we load the
# ASTContext and log the prebuilt cache path
lldbutil.run_to_source_breakpoint(
self, "break here", lldb.SBFileSpec("main.swift"),
exe_name=self.getBuildArtifact("main"))
# Check the prebuilt cache path in the log output
prefix = 'Using prebuilt Swift module cache path: '
expected_suffix = os.path.join('macosx', 'prebuilt-modules')
found = False
with open(log, "r") as logfile:
for line in logfile:
if prefix in line:
self.assertTrue(line.rstrip().endswith(os.path.sep + expected_suffix), 'unexpected prebuilt cache path: ' + line)
found = True
break
self.assertTrue(found, 'prebuilt cache path log entry not found')
# Check the host toolchain has a prebuilt cache in the same subdirectory of its swift resource directory
prebuilt_path = os.path.join(self.get_toolchain(), 'usr', 'lib', 'swift', expected_suffix)
self.assertTrue(len(os.listdir(prebuilt_path)) > 0)
def get_toolchain(self):
sdkroot = self.get_sdkroot()
# The SDK root is expected to be wihin the Xcode.app/Contents
# directory. Drop the last path component from the sdkroot until we get
# up to that level.
self.assertTrue('{0}Contents{0}'.format(os.path.sep) in sdkroot)
contents = os.path.abspath(sdkroot)
while os.path.split(contents)[1] != 'Contents':
(contents, _) = os.path.split(contents)
# Construct the expected path to the default toolchain from there and
# check it exists.
toolchain = os.path.join(contents, 'Developer', 'Toolchains', 'XcodeDefault.xctoolchain')
self.assertTrue(os.path.exists(toolchain), 'no default toolchain?')
return toolchain
def get_sdkroot(self):
with open(self.getBuildArtifact("sdk-root.txt"), "r") as sdkroot:
return sdkroot.read().rstrip()
def setUp(self):
TestBase.setUp(self)
def do_test(self):
# The custom swift module cache location
swift_mod_cache = self.getBuildArtifact("MCP")
# Clear the swift module cache (populated by the Makefile build)
shutil.rmtree(swift_mod_cache)
self.assertFalse(os.path.isdir(swift_mod_cache),
"module cache should not exist")
# Update the settings to use the custom module cache location
self.runCmd('settings set symbols.clang-modules-cache-path "%s"'
% swift_mod_cache)
target = self.dbg.CreateTarget(self.getBuildArtifact("main"))
self.assertTrue(target, VALID_TARGET)
self.registerSharedLibrariesWithTarget(target, ['AA', 'BB', 'CC'])
# Set a breakpoint in and launch the main executable
lldbutil.run_to_source_breakpoint(
self, "break here", lldb.SBFileSpec("main.swift"),
exe_name=self.getBuildArtifact("main"))
# Check we are able to access the public fields of variables whose
# types are from the .swiftinterface-only dylibs
var = self.frame().FindVariable("x")
lldbutil.check_variable(self, var, False, typename="AA.MyPoint")
child_y = var.GetChildMemberWithName("y") # MyPoint.y is public
lldbutil.check_variable(self, child_y, False, value="0")
# MyPoint.x isn't public, but LLDB can find it through type metadata.
child_x = var.GetChildMemberWithName("x")
self.assertTrue(child_x.IsValid())
# Expression evaluation using types from the .swiftinterface only
# dylibs should work too
lldbutil.check_expression(
self, self.frame(), "y.magnitudeSquared", "404", use_summary=False)
lldbutil.check_expression(
self, self.frame(), "MyPoint(x: 1, y: 2).magnitudeSquared", "5",
use_summary=False)
# Check the swift module cache was populated with the .swiftmodule
# files of the loaded modules
self.assertTrue(os.path.isdir(swift_mod_cache), "module cache exists")
a_modules = glob.glob(os.path.join(swift_mod_cache, 'AA-*.swiftmodule'))
b_modules = glob.glob(os.path.join(swift_mod_cache, 'BB-*.swiftmodule'))
c_modules = glob.glob(os.path.join(swift_mod_cache, 'CC-*.swiftmodule'))
self.assertEqual(len(a_modules), 1)
self.assertEqual(len(b_modules), 1)
self.assertEqual(len(c_modules), 0)
# Update the timestamps of the modules to a time well in the past
for file in a_modules + b_modules:
make_old(file)
# Re-import module A and B
self.runCmd("expr import AA")
self.runCmd("expr import BB")
# Import C for the first time and check we can evaluate expressions
# involving types from it
self.runCmd("expr import CC")
lldbutil.check_expression(
self, self.frame(), "Baz.baz()", "23", use_summary=False)
# Check we still have a single .swiftmodule in the cache for A and B
# and that there is now one for C too
a_modules = glob.glob(os.path.join(swift_mod_cache, 'AA-*.swiftmodule'))
b_modules = glob.glob(os.path.join(swift_mod_cache, 'BB-*.swiftmodule'))
c_modules = glob.glob(os.path.join(swift_mod_cache, 'CC-*.swiftmodule'))
self.assertEqual(len(a_modules), 1,
"unexpected number of swiftmodules for A.swift")
self.assertEqual(len(b_modules), 1,
"unexpected number of swiftmodules for B.swift")
self.assertEqual(len(c_modules), 1,
"unexpected number of swiftmodules for C.swift")
# Make sure the .swiftmodule files of A and B were re-used rather than
# re-generated when they were re-imported
for file in a_modules + b_modules:
self.assertTrue(is_old(file),
"Swiftmodule file was regenerated rather than reused")
OLD_TIMESTAMP = 1390550700 # 2014-01-24T08:05:00+00:00
def make_old(file):
"""Sets the access and modified time of the given file to a time long past"""
os.utime(file, (OLD_TIMESTAMP, OLD_TIMESTAMP))
def is_old(file):
"""Checks the modified time of the given file matches the timestamp set my make_old"""
return os.stat(file).st_mtime == OLD_TIMESTAMP
|
[
"wangyankun@ishumei.com"
] |
wangyankun@ishumei.com
|
8e5ea75d19f8f20c402bd0ceccf491afa32e9d65
|
3d0e428be16d5bbabe7bfe57d6a42082ff815a38
|
/train/train_lr.py
|
ca7db0916b7464b4e269ceaf57ebcc76b5baa547
|
[] |
no_license
|
obrown06/sentiment-oracle
|
eeaf75f42cabd65f0e5572b4d960fccd443e0475
|
14bab186ef73d845a7c836bb9b72d87798677486
|
refs/heads/master
| 2018-10-24T07:15:23.617279
| 2018-08-20T23:47:31
| 2018-08-20T23:47:31
| 117,775,007
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,237
|
py
|
import sys
sys.path.insert(0, '../test/')
sys.path.insert(0, '../data/')
sys.path.insert(0, '../classifiers/')
import data_handler
import test_utils
import logistic_regression
import pickle
import numpy as np
print("#################################################################### \n")
print("GENERATING INPUT: LOGISTIC REGRESSION\n")
print("####################################################################\n")
AMAZON_PREFIX = "../pickle/amazon/"
YELP_PREFIX = "../pickle/yelp/balanced/"
RT_PREFIX = "../pickle/rt/balanced/"
PATH_TO_CLASSIFIER = YELP_PREFIX + "lr_classifier.p"
PATH_TO_EXTRACTOR = YELP_PREFIX + "lr_extractor.p"
data_info = {"source" : "YELP",
"path" : "../data/review.json",
"is_balanced" : True,
"n_samples_train" : 50000,
"n_samples_val" : 5000,
"n_samples_test" : 5000,
"class_labels" : [1, 2, 3, 4, 5]
}
classifier_info = {"nfeatures" : 2000,
"ngrams" : 2,
"niterations" : 1000,
"alpha" : 0.1,
"lambda" : 1
}
train_documents, train_labels, val_documents, val_labels, test_documents, test_labels, end_index = data_handler.load_data(data_info["source"], data_info["path"], data_info["n_samples_train"], data_info["n_samples_val"], data_info["n_samples_test"], data_info["class_labels"], is_balanced=data_info["is_balanced"])
print("end_index", end_index)
extractor = data_handler.generate_bag_of_ngrams_extractor(train_documents, classifier_info["nfeatures"], classifier_info["ngrams"])
pickle.dump(extractor, open(PATH_TO_EXTRACTOR, "wb"))
train_input = data_handler.generate_input(train_documents, extractor)
val_input = data_handler.generate_input(val_documents, extractor)
train_label_input = np.array(train_labels)
val_label_input = np.array(val_labels)
print("#################################################################### \n")
print("TRAINING: LOGISTIC REGRESSION\n")
print("####################################################################\n")
lr_classifier = logistic_regression.LogisticRegressionClassifier(data_info, classifier_info)
lr_classifier.train(train_input, train_label_input, "batch")
pickle.dump(lr_classifier, open(PATH_TO_CLASSIFIER, "wb"))
print("#################################################################### \n")
print("VALIDATING: LOGISTIC REGRESSION\n")
print("####################################################################\n")
predictions, actual = lr_classifier.test(val_input, val_label_input)
accuracy, near_accuracy, accurate_polarity = test_utils.multiclass_accuracy(predictions, actual)
precision, recall, specificity, accuracy, auc = test_utils.test_statistics(predictions, actual, pos_label=2)
print("####################################################################\n")
print("RESULTS: \n")
print("Accuracy: ", accuracy)
print("ONLY RELEVANT FOR FINE GRAINED:")
print("Near Accuracy: ", near_accuracy)
print("Accurate Polarity: ", accurate_polarity)
print("ONLY RELEVANT FOR BINARY:")
print("Recall: ", recall)
print("Specificity: ", specificity)
print("Precision: ", precision)
print("AUC: ", auc)
print("####################################################################")
|
[
"nicholas@athenaeg.com"
] |
nicholas@athenaeg.com
|
0982815fa6e2febe3f979f4b6f47031da010b787
|
fa7f72fe2369733f8f97d2324d8e236deb6eab39
|
/Python/Face Detection - OpenCV/F_D_Video_Final.py
|
299614479ea83f6d8a9577cfecd9527584045da7
|
[
"CC0-1.0"
] |
permissive
|
Gulnaz-Tabassum/hacktoberfest2021
|
bd296832f7ff07712b0b4671a8bd841d645abc29
|
ffee073f6efa4090244b55966fd69dde51be12f1
|
refs/heads/master
| 2023-08-17T13:18:17.557965
| 2021-10-08T09:52:19
| 2021-10-08T09:52:19
| 414,930,631
| 2
| 0
|
CC0-1.0
| 2021-10-08T09:52:20
| 2021-10-08T09:47:37
| null |
UTF-8
|
Python
| false
| false
| 1,146
|
py
|
import cv2
face_cascade=cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade=cv2.CascadeClassifier('haarcascade_eye.xml')
cap = cv2.VideoCapture(0)
while 1:
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Detects faces of different sizes in the input image
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
# To draw a rectangle in a face
cv2.rectangle(img,(x,y),(x+w,y+h),(255,255,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
# Detects eyes of different sizes in the input image
eyes = eye_cascade.detectMultiScale(roi_gray)
#To draw a rectangle in eyes
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,127,255),2)
# Display an image in a window
cv2.imshow('img',img)
# Wait for Esc key to stop
k = cv2.waitKey(30) & 0xff
if k == 27:
break
# Close the window
cap.release()
# De-allocate any associated memory usage
cv2.destroyAllWindows()
|
[
"noreply@github.com"
] |
Gulnaz-Tabassum.noreply@github.com
|
d9c4a1f54e371582e9a10bf13e6af9007a39e2dd
|
acfdf9a8ebca7d58d420c19e02fb7efb7bd7674b
|
/day-27.py
|
0e67aae2e8b6cab7fe87d9cb527c9ca0f2827643
|
[] |
no_license
|
Kuljeet-123/Hackerrank-30-days-code-with-python
|
9758112a7c622fc37a1818c3868e318ac13b5b99
|
028c43aafe52eb7ac17a16a2ac07cb421820f4d3
|
refs/heads/master
| 2020-05-21T01:03:49.486910
| 2019-09-19T08:14:54
| 2019-09-19T08:14:54
| 185,846,208
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,046
|
py
|
This problem is all about unit testing.
Your company needs a function that meets the following requirements:
For a given array of integers, the function returns the index of the element with the minimum value in the array. If there is more than one element with the minimum value, the returned index should be the smallest one.
If an empty array is passed to the function, it should raise an Exception.
Note: The arrays are indexed from .
A colleague has written that function, and your task is to design separated unit tests, testing if the function behaves correctly. The implementation in Python is listed below (Implementations in other languages can be found in the code template):
def minimum_index(seq):
if len(seq) == 0:
raise ValueError("Cannot get the minimum value index from an empty sequence")
min_idx = 0
for i in range(1, len(seq)):
if a[i] < a[min_idx]:
min_idx = i
return min_idx
Another co-worker has prepared functions that will perform the testing and validate returned results with expectations. Your task is to implement classes that will produce test data and the expected results for the testing functions. More specifically: function get_array() in TestDataEmptyArray class and functions get_array() and get_expected_result() in classes TestDataUniqueValues and TestDataExactlyTwoDifferentMinimums following the below specifications:
get_array() method in class TestDataEmptyArray has to return an empty array.
get_array() method in class TestDataUniqueValues has to return an array of size at least 2 with all unique elements, while method get_expected_result() of this class has to return the expected minimum value index for this array.
get_array() method in class TestDataExactlyTwoDifferentMinimums has to return an array where there are exactly two different minimum values, while method get_expected_result() of this class has to return the expected minimum value index for this array.
Take a look at the code template to see the exact implementation of functions that your colleagues already implemented.
#######################################################################################################################
My Code....................
def minimum_index(seq):
if len(seq) == 0:
raise ValueError("Cannot get the minimum value index from an empty sequence")
min_idx = 0
for i in range(1, len(seq)):
if seq[i] < seq[min_idx]:
min_idx = i
return min_idx
class TestDataEmptyArray(object):
@staticmethod
def get_array():
# complete this function
return []
class TestDataUniqueValues(object):
@staticmethod
def get_array():
# complete this function
return [3, 1, 2]
@staticmethod
def get_expected_result():
# complete this function
return 1
class TestDataExactlyTwoDifferentMinimums(object):
@staticmethod
def get_array():
# complete this function
return [3, 1, 1]
@staticmethod
def get_expected_result():
# complete this function
return 1
def TestWithEmptyArray():
try:
seq = TestDataEmptyArray.get_array()
result = minimum_index(seq)
except ValueError as e:
pass
else:
assert False
def TestWithUniqueValues():
seq = TestDataUniqueValues.get_array()
assert len(seq) >= 2
assert len(list(set(seq))) == len(seq)
expected_result = TestDataUniqueValues.get_expected_result()
result = minimum_index(seq)
assert result == expected_result
def TestiWithExactyTwoDifferentMinimums():
seq = TestDataExactlyTwoDifferentMinimums.get_array()
assert len(seq) >= 2
tmp = sorted(seq)
assert tmp[0] == tmp[1] and (len(tmp) == 2 or tmp[1] < tmp[2])
expected_result = TestDataExactlyTwoDifferentMinimums.get_expected_result()
result = minimum_index(seq)
assert result == expected_result
TestWithEmptyArray()
TestWithUniqueValues()
TestiWithExactyTwoDifferentMinimums()
print("OK")
|
[
"noreply@github.com"
] |
Kuljeet-123.noreply@github.com
|
cc83365f6c3ba8b9e1b461f4f40ee02ce4e5a9d3
|
d0e45ba04838518dd3e86bfecffe9e955c602be2
|
/01_Basic/app.py
|
0229e319f69e47f66d93a2aa4a2e9223556edb75
|
[] |
no_license
|
hieutrinhds/flask
|
97c2ff8e4fe32273d59988bd864c6458e47a2198
|
9cb2c7396885b3f7a2055ea4106352daa8fb4c10
|
refs/heads/master
| 2022-12-16T11:41:10.373542
| 2020-09-08T07:00:25
| 2020-09-08T07:00:25
| 293,274,948
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,041
|
py
|
from flask import Flask, jsonify, request, redirect, url_for, session, render_template, g
import sqlite3
app = Flask(__name__)
app.config['DEBUG'] = True
app.config['SECRET_KEY'] = 'Thisisasecret'
def connect_db():
sql = sqlite3.connect('data.db')
sql.row_factory = sqlite3.Row
return sql
def get_db():
if not hasattr(g, 'sqlite3'):
g.sqlite_db = connect_db()
return g.sqlite_db
@app.teardown_appcontext
def close_db(error):
if hasattr(g, 'sqlite_db'):
g.sqlite_db.close()
@app.route('/')
def index():
session.pop('name', None)
return "<h1>Hello, World!</h1>"
@app.route('/home', methods=['POST', 'GET'], defaults={'name': "Default"})
@app.route('/home/<string:name>', methods=['POST', 'GET'])
def home(name):
session['name'] = name
db = get_db()
cur = db.execute('select id, name, location from users')
results = cur.fetchall()
return render_template('home.html', name=name, display=False,
mylist=['one', 'two', 'three', 'four'],
listofdicts= [{'name' : 'Zach'}, {'name' : 'Joey'}], results=results)
@app.route('/json')
def json():
if 'name' in session:
name = session['name']
else:
name = 'Notinthesession'
return jsonify({'key': 'value', 'key2': [1, 2, 3], 'name': name})
@app.route('/query')
def query():
name = request.args.get('name')
location = request.args.get('location')
return f'<h1>Hi {name}. You are from {location}!</h1>'
@app.route('/theform', methods=['GET', 'POST'])
def theform():
if request.method == 'GET':
return render_template('form.html')
else:
name = request.form['name']
location = request.form['location']
db = get_db()
db.execute('insert into users (name, location) values (?, ?)', [name, location])
db.commit()
# return f'<h1>Hello {name}. You are from {location}. You are submitted the form successfully!</h1>'
return redirect(url_for('home', name=name, location=location))
'''
@app.route('/process', methods=['POST'])
def process():
name = request.form['name']
location = request.form['location']
return f'<h1>Hello {name}. You are from {location}. You are submitted the form successfully!</h1>'
'''
@app.route('/processjson', methods=['POST'])
def processjson():
data = request.get_json()
name = data['name']
location = data['location']
randomlist = data['randomlist']
return jsonify({"result" : "success", "name" : name, "location" : location, "randomkeyinlist" : randomlist[1]})
@app.route('/viewresults')
def viewresults():
db = get_db()
cur = db.execute('select id, name, location from users')
results = cur.fetchall()
return '<h2>The ID is {0}. The name is {1}. The location is {2}.</h2>'.format(results[1]['id'], results[1]['name'], results[1]['location'])
if __name__ == '__main__':
app.run(debug=True)
|
[
"hieutrinh.au@gmail.com"
] |
hieutrinh.au@gmail.com
|
e7d0dab25ef173459d313189465f2ad65048d5ce
|
37b8eb5e5d2640537d99100358d4b5132eb4e922
|
/plot_OMI_assimilate.py
|
0f0493c8805b9879788376a5172db0b873892a35
|
[] |
no_license
|
YellowruiAccount/Arctic_codes
|
94392ac9bcc1874dd3529d2bd59566c23a2130b2
|
2075efca802c645f99b4f4191a50ea112103c394
|
refs/heads/master
| 2023-05-11T01:18:58.306824
| 2021-05-19T15:06:54
| 2021-05-19T15:06:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,855
|
py
|
#!/usr/bin/env python
"""
NAME:
PURPOSE:
SYNTAX:
EXAMPLE:
MODIFICATIONS:
Blake Sorenson <blake.sorenson@und.edu> - 2018/09/07:
"""
import sys
import glob
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from mpl_toolkits.basemap import Basemap
import matplotlib.colors as color
##import matplotlib.colors as colors
from matplotlib.colors import rgb2hex,Normalize
from matplotlib.cm import ScalarMappable
from matplotlib.colorbar import ColorbarBase
if(len(sys.argv)<2):
print("SYNTAX: python plot_OMI_assimilate.py assimilation_output_file")
sys.exit()
infile = sys.argv[1]
plot_date = infile.strip().split('/')[-1].split('_')[0]
#plot_date = infile.strip().split('/')[-1].split('_')[-1].split('.')[0]
if(plot_date=='flat'):
plot_date = infile.strip().split('/')[-1].split('_')[0]
##LAT[i,j],LON[i,j],AI[i,j],0.5,SZA[i,j],VZA[i,j],RAZ[i,j], \
##ALBEDO[i,j,0],ALBEDO[i,j,1],REFLECTANCE[i,j,0],\
##REFLECTANCE[i,j,1]))
# Set up grid arrays
n_p = 1440
nl = 720
UVAI = np.zeros(shape=(n_p,nl))
#realUVAI = np.zeros(shape=(n_p,nl))
count = np.zeros(shape=(n_p,nl))
#real_count = np.zeros(shape=(n_p,nl))
# Open the input file
with open(infile,'r') as f:
# Read in all lines from the file
flines = f.readlines()
# Use the number at the beginning of the file to determine how large to
# make the data arrays
file_len = len(flines)
# For now, only plot AI, so only extract LAT, LON, and AI from the
# file.
LON = np.zeros(file_len)
LAT = np.zeros(file_len)
baseAI = np.zeros(file_len)
#realAI = np.zeros(file_len)
# Loop over the data file and insert the AI values into the 0.25x0.25 grid
for i, line in enumerate(flines[1:]):
###LAT[i] = float(lines.strip()[0])
###LON[i] = float(lines.strip()[1])
###AI[i] = float(lines.strip()[2])
tlon = float(line.strip().split()[0])
tlat = float(line.strip().split()[1])
tbase_ai = float(line.strip().split()[6])
#tbase_ai = float(line.strip().split()[2])
#treal_ai = float(line.strip().split()[2])
index1 = int(np.floor(tlat*4 + 360.))
index2 = int(np.floor(tlon*4 + 720.))
#index1 = int(np.floor(plotLAT[i,j]*4 + 360.))
#index2 = int(np.floor(plotLON[i,j]*4 + 720.))
if(index1 < 0): index1 = 0
if(index1 > 719): index1 = 719
if(index2 < 0): index2 = 0
if(index2 > 1439): index2 = 1439
UVAI[index2, index1] = (UVAI[index2,index1]*count[index2,index1] + tbase_ai)/(count[index2,index1]+1)
count[index2, index1] = count[index2,index1] + 1
lonmin = -180
lonmax = 180
latmax = 90
latmin = -90
# Set up the polar stereographic projection map
fig1 = plt.figure(figsize=(8,8))
m = Basemap(projection='npstere',boundinglat=60,lon_0=0,resolution='l')
fig = plt.gcf()
m.drawcoastlines()
m.drawparallels(np.arange(-80.,81.,20.))
m.drawmeridians(np.arange(-180.,181.,20.))
#define the lat which will be gridded into 0.25 x 0.25 bins
LATalt = np.arange(-90,90,0.25)
LONalt = np.arange(-180,180,0.25)
# Set up the color bar
cmap = plt.get_cmap('jet')
v_min = 0.000 # AOD
v_max = 1.000
#v_min = -1.000 # AI
#v_max = 3.000
norm = Normalize(vmin=v_min,vmax=v_max)
mapper = ScalarMappable(norm=norm,cmap=cmap)
nodatacolor="black"
# Loop over the grid and plot the data
for ii in range(0,n_p-1):
for jj in range(0,nl-1):
if(count[ii,jj]>0):
colors = mapper.to_rgba(UVAI[ii,jj])
lon0 = LONalt[ii]
lat0 = LATalt[jj]
lat1 = LATalt[jj+1]
lon1 = LONalt[ii+1]
if(lat1>latmin):
y = [lat0,lat1,lat1,lat0]
x = [lon0,lon0,lon1,lon1]
mx, my = m(x,y)
mxy = zip(mx,my)
pair1 = (mx[0],my[0])
pair2 = (mx[1],my[1])
pair3 = (mx[2],my[2])
pair4 = (mx[3],my[3])
# Plot the box on the map using color
color2 = rgb2hex(colors)
poly = Polygon([pair1,pair2,pair3,pair4],facecolor=color2,edgecolor=color2)
plt.gca().add_patch(poly)
plt.title('OMI Assimilated Aerosol Optical Depth '+plot_date)
#plt.title('OMI Assimilated Aerosol Index '+plot_date)
cax = fig.add_axes([0.16,0.075,0.7,0.025])
cb = ColorbarBase(cax,cmap=cmap,norm=norm,orientation='horizontal')
#cb.ax.set_xlabel('Aerosol Optical Depth')
cb.ax.set_xlabel('Aerosol Index')
#out_name = 'omi_assimilate_aod_'+plot_date+'.png'
out_name = 'omi_assimilate_aod_'+plot_date+'_10282019.png'
plt.savefig(out_name)
print('Saved image '+out_name)
|
[
"bsorenson4244@gmail.com"
] |
bsorenson4244@gmail.com
|
2d1ab9f2d3eadfb2a69ad816402402ec65cfb51d
|
d3a1e42bf4c895242229571cb2c013a06f437210
|
/.gitignore/fsdfv.py
|
bc85caf0e780b4b46e519d45b5b11f784d4cf6fb
|
[] |
no_license
|
mohanramofficial/rtfr
|
2b165cf72e6a2f12104027d0f633d3ca1bcdaecb
|
b9983377ef8757e7f1bf7e1fcda319551bcb59a1
|
refs/heads/master
| 2021-04-15T15:19:55.130692
| 2018-03-24T05:01:26
| 2018-03-24T05:01:26
| 126,567,439
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 544
|
py
| ERROR: type should be string, got "https://github.com/mohanramofficial/fgfd/blob/master/.gitignore/gvdhc.py\n\nnum = 407\n\n# take input from the user\n# num = int(input(\"Enter a number: \"))\n\n# prime numbers are greater than 1\nif num > 1:\n # check for factors\n for i in range(2,num):\n if (num % i) == 0:\n print(num,\"is not a prime number\")\n print(i,\"times\",num//i,\"is\",num)\n break\n else:\n print(num,\"is a prime number\")\n \n# if input number is less than\n# or equal to 1, it is not prime\nelse:\n print(num,\"is not a prime number\")\n"
|
[
"noreply@github.com"
] |
mohanramofficial.noreply@github.com
|
403e6aea999f4b8b857e6ebb24ff679f68a607af
|
46e50a1bd93569a0f945b65b6e84c927fb256f53
|
/mtpy/utils/modemMakeModel.py
|
36128ecdce8a38fc29194a3dd6cc3fa86d367988
|
[] |
no_license
|
lucasc15/mt3py
|
d3afed2527b1bc49e4e4cd4ec1d24b5083a5942d
|
734fc551e87cd30261e7d648d79d6d0e5885b85d
|
refs/heads/master
| 2021-01-19T00:35:44.069960
| 2016-07-03T14:03:54
| 2016-07-03T14:03:54
| 62,498,631
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,929
|
py
|
#!/usr/bin/env python
#build ModEM input Model from ModEM data file
import numpy as np
import sys,os
#==============================================================================
#plot model geometry
plot = True
# parameters:
n_xpadding = 10
n_ypadding = 6
#number of vertical padding layers is set to 3 !
#factor with which the padding stretches outside the central rectangle grid
padding_stretch = 1.2
n_layers = 45
#determine minimum block sizes
#used in the inner rectangle - constant widths
dx = 300
dy = 350
#region around stations discretised with these sizes
#outside, the grid steps will be extended exponentially
#the size of padding is determined by the numbers of cells as defined above
#number of trys to shift the grid for getting own cells for each station
n_maximum_gridshifts = 123
#depth of first layer
z0 = 50
#total model depth in meters
model_depth = 200000
#stretching factor for the whole model extension
model_extension_factor = 1
#starting resistivity value for homog. halfspace setup
rho0 = 100.
#define layered/1d model as input
inmodel1d = np.zeros((4,2))
inmodel1d[0] = 0,0.1
inmodel1d[1] = 250,100
inmodel1d[2] = 2000,10
inmodel1d[3] = 4000,1000
#inmodel1d = None
#==============================================================================
#allow rotation of the grid along a known geo electrical strike angle
# X,Y will be rotated to X',Y' with X' along strike
#rotation center is the midpoint of the station loactions
strike = 0.
#NOTE: if strike is set to a value !=0, the locations of the stations have to
#be adapted in the data file in the same way!!!
#==============================================================================
#name of datafile (to be handled as argument later on)
datafile = 'ModEMdata.dat'
#name of output model file
modelfile = 'THE_modelfile.rho'
#==============================================================================
#==============================================================================
#==============================================================================
outstring = ''
outstring += '# ModEM model generated with MTpy - layout read from datafile: {0}\n'.format(datafile)
Fin = open(datafile,'r')
data = Fin.readlines()
Fin.close()
coords = []
#read station coordinates
#start in line after header info, determined by starting character '>'
for dataline in data:
line = dataline.strip().split()
if (len(line) == 0) or line[0].strip()[0] in ['#','>']:
continue
try:
line = dataline.strip().split()
co = (float(line[4]),float(line[5]),float(line[6]))
coords.append(co)
except:
continue
# local, Cartesian coordinates:
coords = np.array(list(set(coords)))
if strike != 0:
original_coords = coords.copy()
cosphi = np.cos(strike/180.*np.pi)
sinphi = np.sin(strike/180.*np.pi)
RotMat = np.matrix(np.array([cosphi,sinphi,-sinphi,cosphi]).reshape(2,2))
center = (np.mean(coords[:,0]),np.mean(coords[:,1]))
rel_coords = coords[:,:2]
rel_coords[:,0] = coords[:,0] - center[0]
rel_coords[:,1] = coords[:,1] - center[1]
rotated_coords = np.dot(RotMat,np.matrix(rel_coords).T).T
rotated_coords[:,0] = rotated_coords[:,0] + center[0]
rotated_coords[:,1] = rotated_coords[:,1] + center[1]
coords[:,:2] = rotated_coords
#reduce grid to 2D - assuming all stations are at the surface
xmin = min(coords[:,0])
xmax = max(coords[:,0])
ymin = min(coords[:,1])
ymax = max(coords[:,1])
x_range = xmax - xmin
y_range = ymax - ymin
n_center_xblocks = int(x_range/dx) + 3
n_center_yblocks = int(y_range/dy) + 3
center_widthX = n_center_xblocks * dx
center_widthY = n_center_yblocks * dy
surplusX = center_widthX - x_range
surplusY = center_widthY - y_range
all_points_in_single_cell = False
n_shifts = 0
x_shifts = 0
y_shifts = 0
while all_points_in_single_cell is False:
#stop after a finite number of steps
if n_shifts > n_maximum_gridshifts:
break
shifting_fraction = np.sqrt(n_maximum_gridshifts) + 1
offset_x = x_shifts * dx/shifting_fraction
offset_y = y_shifts * dy/shifting_fraction
if n_shifts > 0:
print('{0} shift(s): x-offset {1} m - y-offset {2} m'.format(n_shifts,offset_x,offset_y))
center_x0 = xmin - surplusX/2. + offset_x
center_y0 = ymin - surplusY/2. + offset_y
grid_x_points = (np.arange(n_center_xblocks+1) * dx) + center_x0
grid_y_points = (np.arange(n_center_yblocks+1) * dy) + center_y0
station_cells = []
for idx_sta,co in enumerate(coords):
idx_x = np.argmin(np.abs(grid_x_points-co[0]))
if (grid_x_points-co[0])[idx_x] == 0:
# coordinate lies on a node line => need to shift
print('station coordinates lie on cell nodes')
break
#otherwise, shift the index to correspond with the row of blocks, if necessary:
if grid_x_points[idx_x] > co[0] :
idx_x -= 1
idx_y = np.argmin(np.abs(grid_y_points-co[1]))
if (grid_y_points-co[1])[idx_y] == 0:
# coordinate lies on a node line => need to shift
break
#otherwise, shift the index to correspond with the row of blocks, if necessary:
if grid_y_points[idx_y] > co[1] :
idx_y -= 1
#cells enumerated West->East first, then northwards
cell_index = idx_x * n_center_xblocks + idx_y
station_cells.append(cell_index)
if len(set(station_cells)) == len(coords):
all_points_in_single_cell = True
#shift the grid
x_shifts += 1
if x_shifts >= (shifting_fraction - 1):
x_shifts = 0
y_shifts += 1
n_shifts += 1
x_range = np.max(grid_x_points) - np.min(grid_x_points)
y_range = np.max(grid_y_points) - np.min(grid_y_points)
if all_points_in_single_cell < 1:
print('ERROR - cannot build grid having each station in a single cell!\n'\
'change the values for dx,dy or remove stations')
sys.exit()
#Now the inner grid is well distributed over the stations
#add padding to the sides:
grid_x_points = list(grid_x_points)
x_padding_widths = [dx]
for idx_pad in range(n_xpadding):
pad = x_padding_widths[-1] * padding_stretch
x_padding_widths.append(pad)
x_padding_widths.pop(0)
#extend the padding to at least the extent of the regular grid:
pad_ratio = np.sum(x_padding_widths)/(x_range * model_extension_factor)
if pad_ratio < 1:
x_padding_widths = np.array(x_padding_widths)/pad_ratio
#add the padding to the grid
for idx_pad in range(n_xpadding):
grid_x_points.insert(0,grid_x_points[0]-x_padding_widths[idx_pad])
grid_x_points.append(grid_x_points[-1]+x_padding_widths[idx_pad])
grid_y_points = list(grid_y_points)
y_padding_widths = [dy]
for idy_pad in range(n_ypadding):
pad = y_padding_widths[-1] * padding_stretch
y_padding_widths.append(pad)
y_padding_widths.pop(0)
#extend the padding to at least the extent of the regular grid:
pad_ratio = np.sum(y_padding_widths)/(y_range * model_extension_factor)
if pad_ratio < 1:
y_padding_widths = np.array(y_padding_widths)/pad_ratio
#add the padding to the grid
for idy_pad in range(n_ypadding):
grid_y_points.insert(0,grid_y_points[0]-y_padding_widths[idy_pad])
grid_y_points.append(grid_y_points[-1]+y_padding_widths[idy_pad])
xmin_padded = grid_x_points[0]
ymin_padded = grid_y_points[0]
# transfer the block coordinates into block widths
xblocks = []
for idx_x in range(len(grid_x_points)-1):
xblocks.append(grid_x_points[idx_x+1] - grid_x_points[idx_x])
yblocks = []
for idy_y in range(len(grid_y_points)-1):
yblocks.append(grid_y_points[idy_y+1] - grid_y_points[idy_y])
#---------------------------------------------------------------------
n_zpadding = 3
#build block depths:
n_layers_eff = n_layers - 1
#splitted uppermost layer
log_part_thickness = model_depth - (n_layers_eff-1) * z0
depths = np.logspace( np.log10(z0), np.log10(log_part_thickness), n_layers_eff ) + \
np.arange(n_layers_eff) * z0
depths = list(depths)
thicknesses = [z0/2.]
for i, layer in enumerate(depths):
if i == 0 :
t = layer/2.
else:
t = layer - depths[i-1]
thicknesses.append(t)
padding = [thicknesses[-1]*padding_stretch]
for idx_pad in range(n_zpadding-1):
padding.append(padding[-1]*padding_stretch)
total_padding = np.sum(padding)
pad_ratio = total_padding/model_depth
if pad_ratio < 1.5:
padding = list(np.array(padding)/pad_ratio*1.5)
if pad_ratio >2 :
padding = list(np.array(padding)/pad_ratio*2)
thicknesses.extend(padding)
grid_z_points = [0]
for t in thicknesses:
grid_z_points.append(grid_z_points[-1]+t)
#some information for the user:
print('\n\t Model set up - dimensions: {0:.1f}x{1:.1f}x{2:.1f} km^3 ({3}x{4}x{5} cells)\n'.format(
(grid_x_points[-1]-grid_x_points[0])/1000.,(grid_y_points[-1]-grid_y_points[0])/1000.,
depths[-1]/1000.,len(grid_x_points)-1,len(grid_y_points)-1,len(grid_z_points)-1))
outstring += '{0} {1} {2} {3} {4}\n'.format(len(xblocks),len(yblocks),
len(thicknesses), 0,'LOGE')
xstring = ''
for block in xblocks:
xstring += '{0:.3f} '.format(block)
xstring += '\n'
outstring += xstring
ystring = ''
for block in yblocks:
ystring += '{0:.3f} '.format(block)
ystring += '\n'
outstring += ystring
zstring = ''
for block in thicknesses:
zstring += '{0:.3f} '.format(block)
zstring += '\n'
outstring += zstring
for idx_z in range(len(thicknesses)):
z_string = ''
#empty line before each layer:
z_string += '\n'
resistivity = rho0
if inmodel1d is not None:
layertop_depth = grid_z_points[idx_z]
layertop_modelboundary_distance = layertop_depth-inmodel1d[:,0]
layertop_idx = (np.abs(layertop_modelboundary_distance)).argmin()
if layertop_modelboundary_distance[layertop_idx] < 0:
layertop_idx -= 1
resistivity = inmodel1d[layertop_idx,1]
for idx_y in range(len(yblocks)):
y_string = ''
for idx_x in range(len(xblocks)):
x_string = '{0:.5E} '.format(np.log(resistivity))
y_string += x_string
y_string += '\n'
z_string += y_string
outstring += z_string
co_reference = '{0} {1} {2} \n'.format(np.min(grid_x_points),np.min(grid_y_points),0)
outstring += co_reference
outstring += '0 \n'
Fout= open(modelfile,'w')
Fout.write(outstring)
Fout.close()
def plotgrid(stations,grid_x,grid_y,grid_z=None, n_xpadding = None, n_y_padding=None, n_zpadding_layers = None):
ion()
close('all')
equal = True
equal = False
grid_x = [i/1000. for i in grid_x]
grid_y = [i/1000. for i in grid_y]
# Note: X and Y are swapped - mathematical definition used in the plotting functions!!!
#fig = figure(1)
#ax = fig.gca()
fig = figure(figsize=(8, 6))
if grid_z is not None:
colspan = 3
else:
colspan = 4
if equal == True:
ax = subplot2grid((1, 4), (0, 0), colspan=colspan,aspect='equal')
else:
ax = subplot2grid((1, 4), (0, 0), colspan=colspan,aspect='auto')
#ax = subplot(1,2,1)
ax.scatter(stations[:,1]/1000.,stations[:,0]/1000.,c='r')
ax.scatter([ymin_padded/1000.],[xmin_padded/1000.],c='b',marker='x',s=40)
outline_x = [min(grid_x),min(grid_x),max(grid_x),max(grid_x),min(grid_x)]
outline_y = [min(grid_y),max(grid_y),max(grid_y),min(grid_y),min(grid_y)]
ax.plot(outline_y,outline_x,c='r')
if n_xpadding is not None and n_ypadding is not None:
regular_x = [grid_x[n_xpadding],grid_x[n_xpadding],
grid_x[-n_xpadding-1],grid_x[-n_xpadding-1],grid_x[n_xpadding]]
regular_y = [grid_y[n_ypadding],grid_y[-n_ypadding-1],
grid_y[-n_ypadding-1],grid_y[n_ypadding],grid_y[n_ypadding]]
ax.plot(regular_y,regular_x,c='b')
extension_factor = 0.1
x_extent = max(grid_x) - min(grid_x)
x_extension = extension_factor * x_extent
ax.set_ylim([min(grid_x) - x_extension,max(grid_x) + x_extension])
y_extent = max(grid_y) - min(grid_y)
y_extension = extension_factor * y_extent
ax.set_xlim([min(grid_y) - y_extension,max(grid_y) + y_extension])
ax.set_yticks(grid_x, minor=True)
ax.yaxis.grid(False, which='major')
ax.yaxis.grid(True, which='minor',c='g')
ax.set_xticks(grid_y, minor=True)
ax.xaxis.grid(False, which='major')
ax.xaxis.grid(True, which='minor',c='g')
ax.set_xlabel('Easting (Y-coordinate) in km')
ax.set_ylabel('Northing (X-coordinate) in km')
ax.set_title('Model geometry (origin at {0:.1f},{1:.1f})'.format(xmin_padded,ymin_padded))
if equal == True:
ax.set_aspect('equal',adjustable='box')
draw()
if grid_z is not None:
grid_z = [-i/1000. for i in grid_z]
bottom_index = len(grid_z) - n_zpadding_layers -1
if equal == True:
ax2 = subplot2grid((1, 4), (0, 3),aspect='equal')
else:
ax2 = subplot2grid((1, 4), (0, 3),aspect='auto')
#fig2 = figure(2)
#ax2 = fig2.gca()
#ax2 = subplot(1,2,2)
outline_z = [min(grid_z),min(grid_z),max(grid_z),max(grid_z),min(grid_z)]
outline_y = [min(grid_y),max(grid_y),max(grid_y),min(grid_y),min(grid_y)]
plot(outline_y,outline_z,c='r')
plot([min(grid_y),max(grid_y)],[grid_z[bottom_index],grid_z[bottom_index]],c='b')
ax2.axhline(linewidth=2, color='k')
extension_factor = 0.1
z_extent = max(grid_z) - min(grid_z)
z_extension = extension_factor * z_extent
ax2.set_ylim([min(grid_z) - z_extension,max(grid_z) + z_extension])
y_extent = max(grid_y) - min(grid_y)
y_extension = extension_factor * y_extent
ax2.set_xlim([min(grid_y) - y_extension,max(grid_y) + y_extension])
#ax2.set_aspect('equal','datalim')
ax2.set_yticks(grid_z, minor=True)
ax2.yaxis.grid(False, which='major')
ax2.yaxis.grid(True, which='minor',c='k')
ax2.set_xlabel('Easting (Y-coordinate) in km')
ax2.set_ylabel('Depth in km')
ax2.set_title('Model layers')
ax2.set_aspect('equal',adjustable='box')
tight_layout()
show(block=True)
if plot == True:
import platform
if not platform.system().lower().startswith('win') :
#generate an interactive plot window, which remains open after this script has finshed:
proc_num = os.fork()
if proc_num != 0:
#This is the parent process, that should quit immediately to return to the
#shell.
print("You can kill the plot window with the command \"kill %d\"." % proc_num)
sys.exit()
from pylab import *
plotgrid(coords,grid_x_points,grid_y_points,grid_z_points,n_xpadding,n_ypadding, n_zpadding)
|
[
"currahl@yahoo.ca"
] |
currahl@yahoo.ca
|
2cb15cb8f534bf7210f79940336fca5337a3874d
|
ec405878d93e554338cf6e0b58ba36453a036f90
|
/workflow_package_creator.py
|
6b872fbf7d999e1b1db7999bab4da9be4a7a3162
|
[] |
no_license
|
vadimsmilgin/check_automation
|
f794b5cc2255d46754315776053830292e8e10b7
|
dac3b640012027681a12be83894cb8ee2d595bfc
|
refs/heads/master
| 2022-10-29T16:36:26.608936
| 2020-06-12T10:28:37
| 2020-06-12T10:28:37
| 271,249,646
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,513
|
py
|
#!/usr/bin/python
import os
import platform
import re
from lxml import etree
from context import Context, Windows, MacOS
from work_with_s_objects import WorkWithSObjects
def create_package_xml():
if platform.system() == 'Windows':
_context = Context(Windows())
if platform.system() == 'Darwin':
_context = Context(MacOS())
wws = WorkWithSObjects(_context)
s_object_name = wws.get_lookup_fields()
keys = s_object_name.keys()
document = etree.Element("Package")
document.set("xmlns", "http://soap.sforce.com/2006/04/metadata")
types = etree.Element("types")
members = etree.SubElement(types, "members")
members.text = '*'
name = etree.SubElement(types, "name")
name.text = "Flow"
for key in keys:
types = etree.Element("types")
document.append(types)
members = etree.SubElement(types, "members")
members.text = key
name = etree.SubElement(types, "name")
name.text = "Workflow"
version = etree.SubElement(document, "version")
version.text = "48.0"
etree.indent(document, space=" ")
tree = etree.ElementTree(document)
result = re.sub(
r'\'',
'\"',
etree.tostring(tree.getroot(), encoding="UTF-8", xml_declaration=True, pretty_print=True).decode("utf-8")
)
os.chdir(_context.get_automation_files_path())
with open("package.xml", "w") as package:
package.write(result)
if __name__ == "__main__":
create_package_xml()
|
[
"vadimsmilgin64@gmail.com"
] |
vadimsmilgin64@gmail.com
|
2f4b4624a70b3f7b58565625cf554142d9c0c3c4
|
ec8fa3b2436ed17a3e703deec80a414bb5306b08
|
/huxley/python/2045-inversion-sort.py
|
922229aeb5f2619f579e26bceb19a3959748629b
|
[] |
no_license
|
pmba/algorithms
|
3c3f89a0eae9b3144325ec719d1db3109aa707db
|
2c15b1bc18645bc2cc24b7b8b615134066845e6b
|
refs/heads/master
| 2020-03-27T00:41:27.816090
| 2018-10-05T12:05:48
| 2018-10-05T12:05:48
| 145,646,130
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 853
|
py
|
def inversion(array):
inv = 0
if len(array) > 1:
pivot = len(array)//2
left = array[:pivot]
right = array[pivot:]
inv = inversion(left)
inv += inversion(right)
i = j = k = 0
left.append(float('Inf'))
right.append(float('Inf'))
while i < len(left) and j < len(right) and k < len(array):
if left[i] <= right[j]:
array[k] = left[i]
i += 1
else:
array[k] = right[j]
j += 1
inv += pivot - i
k += 1
return inv
cases = int(input())
global inv
inv = 0
for i in range(cases):
inv = 0
input()
arr_size = int(input())
array = [0]*arr_size
for j in range(arr_size):
array[j] = int(input())
print(inversion(array))
|
[
"pmba@ic.ufal.br"
] |
pmba@ic.ufal.br
|
fd23272afb6040f22d495da6f1ccc4cde2e7d91e
|
245a9d9fbfde70c33afe39769d7a93b2d6c3e3ad
|
/map_bn2conv.py
|
2f8cf103de3d33a5dddb7a4050a06d393a4bdf15
|
[] |
no_license
|
wjc852456/pytorch-practice
|
dc9b1cb8d8d85778307e772556a1703f04bd551a
|
dc90620d8aa06d7c16c6c7d0f06e0d2ef16a7741
|
refs/heads/master
| 2020-03-24T21:06:20.389522
| 2018-07-31T12:56:13
| 2018-07-31T12:56:13
| 143,013,775
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,902
|
py
|
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
import sys
import os
sys.path.append(os.path.expanduser('~/pytorch-quant/utee'))
sys.path.append(os.path.expanduser('~/pytorch-mobilenet-v2'))
import quant
import MobileNetV2
def bn2conv(model):
r""" conv layer must be arranged before bn layer !!!"""
if isinstance(model,nn.Sequential):
ikv = enumerate(model._modules.items())
for i,(k,v) in ikv:
if isinstance(v,nn.Conv2d):
key,bn = next(ikv)[1]
if isinstance(bn, nn.BatchNorm2d):
if bn.affine:
a = bn.weight / torch.sqrt(bn.running_var+bn.eps)
b = - bn.weight * bn.running_mean / torch.sqrt(bn.running_var+bn.eps) + bn.bias
else:
a = 1.0 / torch.sqrt(bn.running_var+bn.eps)
b = - bn.running_mean / torch.sqrt(bn.running_var+bn.eps)
v.weight = Parameter( v.weight * a.reshape(v.out_channels,1,1,1) )
v.bias = Parameter(b)
model._modules[key] = nn.Sequential()
else:
bn2conv(v)
else:
for k,v in model._modules.items():
bn2conv(v)
#net = MobileNetV2.MobileNetV2()
#bn2conv(net)
#print(net)
ifmap = 3
ofmap = 4
conv1 = nn.Conv2d(ifmap, ofmap, 3, padding=1, bias=False)
bn1 = nn.BatchNorm2d(ofmap)
seq1 = nn.Sequential(conv1, bn1).eval()
x = torch.randn(2,3,5,5)
y = seq1(x)
print(seq1)
print('\n')
bn2conv(seq1)
y1 = seq1(x)
print(seq1)
'''
conv2 = conv1
conv2.load_state_dict(conv1.state_dict(),strict=False)
bn2 = quant.BatchNorm2d_new(ofmap)
bn2.load_state_dict(bn1.state_dict())
bn2.set_newparam()
conv2.weight = Parameter(conv2.weight*bn2.a.reshape(ofmap,1,1,1))
conv2.bias = Parameter(bn2.b)
y2 = conv2(x)
#print(y2)
#print(conv2.bias)
'''
|
[
"wangjichen2012@163.com"
] |
wangjichen2012@163.com
|
7cc991b3cfe972ac5fd124fd8d9f3ec6ca838eb5
|
64bffe2a7934c012195dd46ce3873c74c4ed20dc
|
/likes/models.py
|
5ada38830c94576ea38cc13488fbf927c73b27f5
|
[] |
no_license
|
Power2hang/mysite
|
f01c0ca3b074676cbe8911069a943f0deb594f91
|
c37f2dd2c70821cda4fa76c992d4fb6225506662
|
refs/heads/master
| 2022-11-25T19:20:48.874809
| 2020-01-06T01:54:49
| 2020-01-06T01:54:49
| 159,321,263
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 810
|
py
|
from django.db import models
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import User
class LikeCount(models.Model):
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
liked_num = models.IntegerField(default=0)
class LikeRecord(models.Model):
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
user = models.ForeignKey(User, on_delete=models.CASCADE)
liked_time = models.DateTimeField(auto_now_add=True)
|
[
"1247637039@qq.com"
] |
1247637039@qq.com
|
53347068559ac6f5dcd89d0d775da298fbf13504
|
b1a9fa233492aef3d983a3c4c7ca70105b2d1ad2
|
/cw2/twitter_clone/bin/wheel
|
0cd317ec5daad0431dcf072926e33936116afde5
|
[] |
no_license
|
milkos14/nikolaou_andreas_set09103_cw2
|
7f7c9096f18eedb4d9fe0a7fc5bced2259634d07
|
73cbbc9cd636736032deb49732b9527c170adf10
|
refs/heads/master
| 2020-04-07T06:06:32.927181
| 2018-11-27T19:29:19
| 2018-11-27T19:29:19
| 158,122,602
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 239
|
#!/home/40211330/cw2/twitter_clone/bin/python2
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"40211330@live.napier.ac.uk"
] |
40211330@live.napier.ac.uk
|
|
fe608fb9ba98f0f1ab8f1ae85fec8b54b345a900
|
d930e060542a569dec217d099e387d396ed3ca8d
|
/our_contacts/models.py
|
7f6037f2da302b3302327698b9ee1a17fd5bbcab
|
[] |
no_license
|
dzhem911/t_kontakta
|
9eab09e6491a3e906b6aab34991b06f9d97ac07e
|
f404517501dddb505c8ad8e4d13a316afdd47190
|
refs/heads/master
| 2022-09-29T14:59:40.026628
| 2020-05-24T15:39:54
| 2020-05-24T15:39:54
| 255,593,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 574
|
py
|
from django.db import models
from tinymce.models import HTMLField
class OurContacts(models.Model):
title = models.CharField(max_length=100, verbose_name='Заголовок')
description = HTMLField()
img = models.ImageField(upload_to='media/', verbose_name='Изображение', null=True, blank=True)
phone_number = models.CharField(max_length=11, verbose_name='Номер телефона')
def __str__(self):
return self.title
class Meta:
verbose_name = 'Контакты'
verbose_name_plural = 'Контакты'
|
[
"dzhemal911@mail.ru"
] |
dzhemal911@mail.ru
|
26deb937ebee71f0637a6fce55598cea3e7c0140
|
a01ec17189abf5333a9fddba771b1c96c335e64b
|
/src/viewer/urls.py
|
4ecee086ea828fe65e388b0d9fd2fea323ba2127
|
[
"MIT"
] |
permissive
|
coderuz/WatchMeCode
|
c9d89e29f19dd114d4449e247f8d94d4b94deadd
|
96e3e20dcbedddd0e4823d90c87cbd644ec268d5
|
refs/heads/master
| 2020-03-30T16:10:22.557016
| 2018-07-17T07:19:29
| 2018-07-17T07:19:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 605
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.show, name='welcome'), # TODO: Change <-this to welcome view or Register View for Auth Users
path('edit/<str:room>/', views.edit, name='editable'), # TODO: either this room does not belong to you or does
path('room/<str:room>/', views.show, name='showroom'), # TODO: render code or give nice 404
path('save', views.save, name='save'), # TODO: Think good way of safely (from author) saving Room Code
path('login/', views.do_login, name='login'),
path('logout/', views.do_logout, name='logout'),
]
|
[
"mitazimjon@gmail.com"
] |
mitazimjon@gmail.com
|
c6454d5ab12e01278e08734c2b1fc8c5cca23a85
|
23ed9e7f9f37204b991a4b8d37cb2a2620959814
|
/FRM_function_update.py
|
c3847b6e212012c4a7ec6ef4ae330e0eda4bb457
|
[] |
no_license
|
Jojanna/geojo_tools
|
263b895d16c40621088bec1601f054ff0dc3a3f7
|
5403b203df036e6ad95f995ece4eee1b3cb60d7b
|
refs/heads/master
| 2023-06-01T18:44:57.820566
| 2021-06-13T20:11:46
| 2021-06-13T20:11:46
| 376,630,112
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,024
|
py
|
import numpy as np
import pandas as pd
from rppy.fluid import batzle_wang
"""
Contents:
* k_wet
* fluids_calc
* reuss fluids
*rho_dry_calc
"""
def k_wet (k_dry, k_fluid, phiT, k_HS): # gassmanns
#print ("k_fluid")
#print (k_fluid)
rhs = k_dry / (k_HS - k_dry) + k_fluid / (phiT * (k_HS - k_fluid))
k_sat = rhs * k_HS / (1 + rhs)
return k_sat
def fluids_calc(pressure, temp, fl, s, g, api_oil, ratio):
from geojo.moduli import k_mu, calc_vp_vs
w = batzle_wang(pressure, temp, 'brine', S=s, G=g, api=api_oil, Rg=ratio)
# print (w)
rho_brine = w["rho"]
vp_brine = w["Vp"]
k_brine, mu_brine = k_mu(vp_brine, 0, rho_brine)
# print ("Kw: %f" % k_brine)
if fl == 'oil':
h = batzle_wang(pressure, temp, 'oil', S=s, G=g, api=api_oil, Rg=ratio)
# print (h)
rho_h = h["rho"]
vp_h = h["Vp"]
k_h, mu_h = k_mu(vp_h, 0, rho_h)
# print ("KH: %f" % k_h)
elif fl == 'gas':
h = batzle_wang(pressure, temp, 'gas', S=s, G=g, api=api_oil, Rg=ratio)
# print (h)
rho_h = h["rho"]
k_h = h["K"] / np.array(10 ** 3) # output from BW is in MPA --> supply in GPa
# print ("KH: %f" % k_h)
else:
print('check fluids!')
rho_h, vp_h = 0, 0
exit(0)
# k_brine, k_h = [k_brine, k_h] * np.array(10**9)
return rho_brine, k_brine, rho_h, k_h
def reuss_fluids(rho_brine, k_brine, rho_h, k_h, sw_data):
k_f = []
rho_f = []
for s in sw_data:
# s_we = 2 * np.exp(-11 * p)
# print (s_we)
k_f_s = 1 / (s / k_brine + (1 - s) / k_h) # Reuss average
k_f.append(k_f_s)
rho_f_p = s * rho_brine + (1 - s) * rho_h
rho_f.append(rho_f_p)
return k_f, rho_f
def rho_dry_calc (rho_data, rho_f_ini, phie_data):
rho_dry = []
for rho_rock, rho_f, p in zip(rho_data, rho_f_ini, phie_data):
rho_dry.append(rho_rock - p * rho_f)
return rho_dry
def dry_rock(phie_data,k_sat,k_f, k_ma):
k_pore_data = []
k_d_data = []
pore_params = zip(phie_data,k_sat,k_f, k_ma)
for phie_n, k_sat_n, k_f_n, k_ma_n in pore_params:
k_pore_n = phie_n /(1/k_sat_n - 1/k_ma_n) - 1 / (1/(k_f_n) - 1/k_ma_n)
k_pore_data.append(k_pore_n)
dry_params = zip(phie_data, k_pore_data, k_ma)
for phie_n, k_pore_n, k_ma_n in dry_params:
k_d_n = 1/(phie_n/k_pore_n + 1/k_ma_n)
k_d_data.append(k_d_n)
return k_pore_data, k_d_data
def fluid_sub_k(k_ma, phie_data, k_pore_data, k_f):
sub_params = zip(k_ma, phie_data, k_pore_data, k_f)
k_out = []
#phie_exc_0 = []
for k_ma_n, phie_n, k_pore_n, k_f_n in sub_params:
if phie_n > 0:
k_out_n = 1 / (1/k_ma_n + phie_n/(k_pore_n + (k_ma_n * k_f_n)/(k_ma_n - k_f_n)))
#k_out.append(k_out_n)
#phie_exc_0.append(phie_n)
else:
k_out_n = 1 / (1/k_ma_n)
k_out.append(k_out_n)
return k_out
def multiple_FRM(phie_data, sw_out, k_ma, kphi_set, rho_dry, pressure_out, temp, fl_out, s, g, api_oil, ratio):
rho_brine, k_brine, rho_h_out, k_h_out = fluids_calc(pressure_out, temp, fl_out, s, g, api_oil, ratio)
k_f_out = list(map(lambda sw_out_n: 1 / (sw_out_n / k_brine + (1 - sw_out_n) / k_h_out), sw_out))
rho_f_out = list(map(lambda sw_out_n: sw_out_n * rho_brine + (1 - sw_out_n) * rho_h_out, sw_out))
k_out = []
for k_ma_n, phie_n, k_pore_n, k_f_n in zip(k_ma, phie_data, kphi_set, k_f_out):
if phie_n > 0:
k_out_n = 1 / (1 / k_ma_n + phie_n / (k_pore_n + (k_ma_n * k_f_n) / (k_ma_n - k_f_n)))
# k_out.append(k_out_n)
# phie_exc_0.append(phie_n)
else:
k_out_n = 1 / (1 / k_ma_n)
k_out.append(k_out_n)
rho_out = []
for rhod, phie, rhof in zip(rho_dry, phie_data, rho_f_out):
rho_out.append(rhod + phie * rhof)
k_out_norm = [(k / k_ma_n) for k, k_ma_n in zip(k_out, k_ma)]
return k_f_out, rho_f_out, k_out, rho_out, k_out_norm
|
[
"j.wallis@dunelm.org.uk"
] |
j.wallis@dunelm.org.uk
|
b23143408eae95819c6760c853c06db075ea9987
|
d62e01ee1b50b8228d25736daceae0e822f3a0a0
|
/examples/user_guide/add_tasks.py
|
e323c771d0a918d6b08a4f1fc852134d93c6e40d
|
[
"MIT"
] |
permissive
|
dalg24/radical.entk
|
b6f34ae1b2075f638fbdfd5fc397ea4c0d87cb93
|
4aa68d8de7804e09ca64629035ccda0b79ac0b76
|
refs/heads/master
| 2020-04-03T17:25:37.548618
| 2018-10-16T12:06:30
| 2018-10-16T12:06:30
| 155,444,172
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,916
|
py
|
from radical.entk import Pipeline, Stage, Task, AppManager
import os
# ------------------------------------------------------------------------------
# Set default verbosity
if os.environ.get('RADICAL_ENTK_VERBOSE') == None:
os.environ['RADICAL_ENTK_REPORT'] = 'True'
# Description of how the RabbitMQ process is accessible
# No need to change/set any variables if you installed RabbitMQ has a system
# process. If you are running RabbitMQ under a docker container or another
# VM, set "RMQ_HOSTNAME" and "RMQ_PORT" in the session where you are running
# this script.
hostname = os.environ.get('RMQ_HOSTNAME', 'localhost')
port = os.environ.get('RMQ_PORT', 5672)
if __name__ == '__main__':
# Create a Pipeline object
p = Pipeline()
# Create a Stage object
s = Stage()
for cnt in range(10):
# Create a Task object
t = Task()
t.name = 'my-task' # Assign a name to the task (optional, do not use ',' or '_')
t.executable = ['/bin/echo'] # Assign executable to the task
t.arguments = ['I am task %s'%cnt] # Assign arguments for the task executable
# Add the Task to the Stage
s.add_tasks(t)
# Add Stage to the Pipeline
p.add_stages(s)
# Create Application Manager
appman = AppManager(hostname=hostname, port=port)
# Create a dictionary describe four mandatory keys:
# resource, walltime, and cpus
# resource is 'local.localhost' to execute locally
res_dict = {
'resource': 'local.localhost',
'walltime': 10,
'cpus': 1
}
# Assign resource request description to the Application Manager
appman.resource_desc = res_dict
# Assign the workflow as a set or list of Pipelines to the Application Manager
# Note: The list order is not guaranteed to be preserved
appman.workflow = set([p])
# Run the Application Manager
appman.run()
|
[
"b.vivek91@gmail.com"
] |
b.vivek91@gmail.com
|
190402e6dd636bf2f4fa9578042f043ce51c8530
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/wtBko8Bc8o8Tmra3q_11.py
|
93df75889df3f71ae56c4f1e24530b81e184ba11
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 135
|
py
|
def halflife_calculator(mass, hlife, n):
mass_left = mass/(2**n)
years = hlife * n
return [round(mass_left,3),years]
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
d25b847438a89a2ef7907ccbe7bd97416f294e59
|
9edf6efa74b211a50db1767d7cbfbc885942a161
|
/spatialnde/dataguzzler/dg_3d.py
|
1abeb18c444bc7871ffe6d16bc11d4bac8547d64
|
[
"Apache-2.0"
] |
permissive
|
isuthermography/spatialnde
|
42a49118a3f7cd62c27b6d317b60faff0a55b279
|
983de2e68139c33ab5eb8df59ac0aaa84bad096a
|
refs/heads/master
| 2022-05-03T13:00:35.981337
| 2021-06-23T15:26:11
| 2021-06-23T15:26:11
| 197,398,055
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,623
|
py
|
import copy
import json
#try:
# from cStringIO import StringIO # python 2.x
# pass
#except ImportError:
# from io import StringIO # python 3.x
# pass
from io import BytesIO
import numpy as np
import dg_file as dgf
import dg_metadata as dgm
import dataguzzler as dg
from ..exporters.vrml import VRMLSerialization
from ..exporters.x3d import X3DSerialization
from ..ndeobj import ndepart
def create_x3d_vrml_channel(wfmdict,channame,assembly,coordframe,x3dnamespace=None):
wfm=dg.wfminfo()
wfm.Name=channame
wfm.dimlen=np.array((1,),dtype='i8')
wfm.ndim=1
wfm.data=np.array((0,),dtype='f')
wfm.wfmrevision=0
VRMLBuf=BytesIO()
vrmlwriter = VRMLSerialization.tofileorbuffer(VRMLBuf)
for part in assembly.parts:
part.VRMLWrite(vrmlwriter,coordframe,UVparameterization=None)
pass
vrmlwriter.finish()
X3DBuf=BytesIO()
x3dwriter=X3DSerialization.tofileorbuffer(X3DBuf,x3dnamespace=x3dnamespace)
for part in assembly.parts:
part.X3DWrite(x3dwriter,coordframe,UVparameterization=None)
pass
x3dwriter.finish()
dgm.AddMetaDatumWI(wfm,dgm.MetaDatum("VRML97Geom",VRMLBuf.getvalue().decode('utf-8')))
dgm.AddMetaDatumWI(wfm,dgm.MetaDatum("X3DGeom",X3DBuf.getvalue().decode('utf-8')))
TextureNames=set([])
for part in assembly.parts:
for surface in part.implpart.surfaces:
if hasattr(surface.appearance,"texture_url") and surface.appearance.texture_url.startswith("#"):
TextureNames.add(surface.appearance.texture_url[1:])
pass
pass
pass
# Add metadata listing the texture names
dgm.AddMetaDatumWI(wfm,dgm.MetaDatum("TextureChans","|".join(TextureNames)))
if wfmdict is not None:
wfmdict[channame]=wfm
pass
return wfm
def ndepartparams_from_landmarked3d(l3d_wfmdict,wfmname_list,TexChanPrefix=""):
landmarkdict2d={}
landmarkdict3d={}
UVScalingDict={}
for wfmname in wfmname_list:
if not(wfmname.startswith(TexChanPrefix)):
raise ValueError("Error processing texture channel: Texture channel name does not start with specified prefix (%s)" % (wfmname,TexChanPrefix))
unprefixedname=wfmname[len(TexChanPrefix):]
wfm=l3d_wfmdict[wfmname]
IniVal1=dgm.GetMetaDatumWIDbl(wfm,"IniVal1",0.0)
Step1=dgm.GetMetaDatumWIDbl(wfm,"Step1",1.0)
IniVal2=dgm.GetMetaDatumWIDbl(wfm,"IniVal2",0.0)
Step2=dgm.GetMetaDatumWIDbl(wfm,"Step2",1.0)
# lowerleft defined at corner of pixel, not pixel center
lowerleft_meaningfulunits=(IniVal1-Step1/2.0,IniVal2-Step2/2.0)
meaningfulunits_per_texcoord = (Step1*wfm.dimlen[0],Step2*wfm.dimlen[1])
# index must be a string because params must be JSON-compatible
texurl = "#"+unprefixedname
UVScalingDict[texurl]=(lowerleft_meaningfulunits,meaningfulunits_per_texcoord)
for mdname in wfm.MetaData:
if (mdname.startswith("FIDUCIAL_") or mdname.startswith("LANDMARK_")) and mdname.endswith("_X"):
fiducialname=mdname[9:-2]
fiducialmdname=mdname[:-2]
landmarkx=dgm.GetMetaDatumWIDbl(wfm,"%s_X" % (fiducialmdname),np.NaN)
landmarky=dgm.GetMetaDatumWIDbl(wfm,"%s_Y" % (fiducialmdname),np.NaN)
landmarkdict2d[fiducialname]=(texurl,landmarkx,
landmarky)
pass
pass
pass
# Was UV_ScalingParamsBySurfaceNum
implpartparams = { "UV_ScalingParamsByTexURL": UVScalingDict }
landmarks_params = (landmarkdict2d,landmarkdict3d)
ndepartparams = (landmarks_params,implpartparams)
return ndepartparams
def blank_uv_from_landmarked3d(l3d_wfmdict,wfmname):
# Replace specified waveform with a blank, ready for stuff to be added
oldwfm=l3d_wfmdict[wfmname]
l3d_wfmdict[wfmname]=dg.wfminfo()
l3d_wfmdict[wfmname].Name=wfmname
l3d_wfmdict[wfmname].ndim=2
l3d_wfmdict[wfmname].dimlen=np.array(oldwfm.dimlen,dtype='i8')
l3d_wfmdict[wfmname].wfmrevision=0
l3d_wfmdict[wfmname].n=np.prod(l3d_wfmdict[wfmname].dimlen)
l3d_wfmdict[wfmname].data=np.zeros(l3d_wfmdict[wfmname].dimlen,dtype='f',order='F')
l3d_wfmdict[wfmname].MetaData = copy.deepcopy(oldwfm.MetaData)
return l3d_wfmdict[wfmname]
def ndepart_from_dataguzzler_wfm(wfm_3d,wfmdict,objframe):
# Really, we shouldn't need wvm_uv but as it is
# we need to extract ndepartparams from it prior to load (!)
geomstr=""
TextureChans=dgm.GetMetaDatumWIStr(wfm_3d,"TextureChans","").split("|") # TextureChans are already prefixed...
TexChanPrefix=""
refwfm_3d = wfm_3d
while len(geomstr)==0:
geomstr=dgm.GetMetaDatumWIStr(refwfm_3d,"X3DGeom","")
TexChanPrefix+=dgm.GetMetaDatumWIStr(refwfm_3d,"TexChanPrefix","")
if len(geomstr)==0:
geomrefstr=dgm.GetMetaDatumWIStr(refwfm_3d,"X3DGeomRef","")
if len(geomrefstr)==0:
break;
if geomrefstr not in wfmdict:
raise ValueError("X3DGeomRef metadata refers to nonexisting waveform %s" % (geomrefstr))
refwfm_3d=wfmdict[geomrefstr]
pass
pass
ndepartparams=ndepartparams_from_landmarked3d(wfmdict,TextureChans,TexChanPrefix)
x3dbuf=BytesIO(geomstr.encode('utf-8'))
obj = ndepart.fromx3d(objframe,ndepartparams,x3dbuf)
return (obj,TexChanPrefix)
|
[
"sdh4@iastate.edu"
] |
sdh4@iastate.edu
|
3310e6775faa200891070352b482f64fce2ea800
|
54abd03baf2424343a56d11deae58516f9620c94
|
/backend/src/api/migrations/0015_auto_20190121_1917.py
|
7aa5536fce00bd5e6854725bda34bb3eef18354d
|
[] |
no_license
|
krskibin/zpp
|
ce590fee81edf5ba1a565b6be302be7aea2e69f1
|
76572557435ba9583d46ae6f841d37d484c7c04e
|
refs/heads/master
| 2021-06-10T23:45:09.280420
| 2019-06-24T13:46:23
| 2019-06-24T13:46:23
| 153,451,428
| 1
| 0
| null | 2021-03-30T10:31:18
| 2018-10-17T12:14:52
|
Python
|
UTF-8
|
Python
| false
| false
| 457
|
py
|
# Generated by Django 2.1.2 on 2019-01-21 19:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0014_auto_20190121_1917'),
]
operations = [
migrations.AlterField(
model_name='image',
name='name',
field=models.CharField(blank=True, default='2019-01-21 19:17:29.178352', editable=False, max_length=100, null=True),
),
]
|
[
"noreply@github.com"
] |
krskibin.noreply@github.com
|
478dec05c29f554e8d1effc63ad7264f99e95538
|
c236e0c3b34a81e75acb9591423b6aad9d6a22dd
|
/unitTestRunner.py
|
451a956edaed0cee0386f60f3b60470f1b9a6a7c
|
[] |
no_license
|
abhijeetdtu/heimcharge
|
2cd68c9eaaf5b94206d310c8b8348133b5d4e77b
|
569a9d22916808ba8b67169a2822a91e05a051e9
|
refs/heads/master
| 2021-06-06T02:55:29.913134
| 2019-06-11T03:07:20
| 2019-06-11T03:07:20
| 131,433,168
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 182
|
py
|
import unittest
from UnitTests.ChartPlotTest import *
from UnitTests.GeoOpsTest import *
from UnitTests.FileOpsTest import *
if __name__ == '__main__':
unittest.main(exit=False)
|
[
"abhijeetdtu@gmail.com"
] |
abhijeetdtu@gmail.com
|
817651586fec5e60a2d025e0fd50748f7b3c55c8
|
ebcf0d04f6e8545891b3552deff2aa5abd833871
|
/authentication/migrations/0001_initial.py
|
67b8587dff175c204f5ecc17bd713555b3706968
|
[] |
no_license
|
rpascual0812/webpay
|
0d854d2a47cb9cae7c7ba64d989abb309f2f4cfa
|
8c607c24662292a884e1ee31b7424bd7c00908a0
|
refs/heads/master
| 2021-01-10T04:29:37.221743
| 2016-01-03T15:29:21
| 2016-01-03T15:29:21
| 48,950,530
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,246
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Account',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last login')),
('employeeid', models.EmailField(unique=True, max_length=75)),
('username', models.CharField(unique=True, max_length=50)),
('firstname', models.CharField(max_length=50)),
('lastname', models.CharField(max_length=50)),
('email', models.CharField(max_length=50)),
('is_admin', models.BooleanField(default=False)),
('datecreated', models.DateTimeField(auto_now_add=True)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
]
|
[
"rafael.pascual@team.acquireap.com"
] |
rafael.pascual@team.acquireap.com
|
44c569b36803775a0c36187b8503777aef16b0ec
|
fa7790c45dbc1ee804011e9dff2d4ff424b9f3d6
|
/Searching and Sorting/Counting sort/Implementation.py
|
63a5b8f34c85215b5c59e2ea21c34fde374752dc
|
[] |
no_license
|
KuroKousuii/Algorithms
|
bcdf75e58e20287e3162ef3302f8051604d7b7d6
|
3bf0250780c9d11c69546ca0da130fbbcada7e40
|
refs/heads/main
| 2023-05-31T07:41:07.399881
| 2021-06-16T09:32:16
| 2021-06-16T09:32:16
| 368,274,761
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,177
|
py
|
# Python program for counting sort
# The main function that sort the given string arr[] in
# alphabetical order
def countSort(arr):
# The output character array that will have sorted arr
output = [0 for i in range(len(arr))]
# Create a count array to store count of inidividul
# characters and initialize count array as 0
count = [0 for i in range(256)]
# For storing the resulting answer since the
# string is immutable
ans = ["" for _ in arr]
# Store count of each character
for i in arr:
count[ord(i)] += 1
# Change count[i] so that count[i] now contains actual
# position of this character in output array
for i in range(256):
count[i] += count[i - 1]
# Build the output character array
for i in range(len(arr)):
output[count[ord(arr[i])] - 1] = arr[i]
count[ord(arr[i])] -= 1
# Copy the output array to arr, so that arr now
# contains sorted characters
for i in range(len(arr)):
ans[i] = output[i]
return ans
# Driver program to test above function
arr = "geeksforgeeks"
ans = countSort(arr)
print("Sorted character array is % s" % ("".join(ans)))
|
[
"84732337+KuroKousuii@users.noreply.github.com"
] |
84732337+KuroKousuii@users.noreply.github.com
|
a1440a3c4c685e9974daa780c9e4a8ce9e887701
|
0517a6bb332d35e4a3ccb05ae2f4b692b2b4d297
|
/lesson_5/dz_1.py
|
5f5cecf14250790898099fb6ef8b599db78c5711
|
[] |
no_license
|
VitalyBorin/GB
|
4a404c1a5ce6d54c85b8e87550bf61e215673e56
|
f8e09565c4adc41b88043f88c7185656aaecf44c
|
refs/heads/master
| 2023-02-24T10:24:47.034976
| 2021-01-19T21:51:07
| 2021-01-19T21:51:07
| 328,787,193
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 295
|
py
|
a = ' '
with open("file.txt", "w") as f:
while a != '':
a = input('Введите строку и нажмите Enter для записи строки в файл. При вводе пустой строки запись в файл прекратится')
print(a, file=f)
|
[
"vitaly.borin@gmail.com"
] |
vitaly.borin@gmail.com
|
a4cbfef2228e9ff39aa8907031dc7bb505c0b1da
|
4f72a15d3f922b2a526d6646dec7394212fb0b2d
|
/Foodie/download_data.py
|
52828fed853c742c16d07159394f0b9cd0c140ee
|
[] |
no_license
|
panchenhui/Foodie
|
876c24580b829fda2274662a4ccad3d72a2c41b2
|
d5636d6b38a4cbf0623a577785860b2c64a062a0
|
refs/heads/master
| 2021-03-30T16:25:37.959942
| 2018-04-26T02:39:56
| 2018-04-26T02:39:56
| 120,586,280
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 236
|
py
|
from firebase import firebase
class download(object):
def __init__(self):
self.firebase1 = firebase.FirebaseApplication('https://inf552-69068.firebaseio.com', None)
self.data = self.firebase1.get('/business', None)
|
[
"pan.558@osu.edu"
] |
pan.558@osu.edu
|
6d951c40f76555e0280a5e182bb2a631fa8b7936
|
a242274ab48b13785ac0a78bbc353c30b6eb7fd2
|
/MyLighthouse/lighthouse/parsers/drcov.py
|
a6321f6a1932f2ecccfff3ff63fafb067151a1ae
|
[
"LicenseRef-scancode-mulanpsl-1.0-en",
"MulanPSL-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
cbwang505/DIYDynamoRIO
|
da48c6a57ba6b21088e4bbd36804131de56f6f36
|
93efa58e802e17115f7623ef4e38a1e2ac856822
|
refs/heads/master
| 2022-07-28T23:58:11.929087
| 2021-08-27T00:45:53
| 2021-08-27T00:46:10
| 323,505,098
| 26
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,625
|
py
|
#!/usr/bin/python
import os
import sys
import mmap
import struct
import re
from ctypes import *
#------------------------------------------------------------------------------
# drcov log parser
#------------------------------------------------------------------------------
class DrcovData(object):
"""
A drcov log parser.
"""
def __init__(self, filepath=None):
# original filepath
self.filepath = filepath
# drcov header attributes
self.version = 0
self.flavor = None
# drcov module table
self.module_table_count = 0
self.module_table_version = 0
self.modules = []
# drcov basic block data
self.bb_table_count = 0
self.bb_table_is_binary = True
self.basic_blocks = []
# parse the given filepath
self._parse_drcov_file(filepath)
#--------------------------------------------------------------------------
# Public
#--------------------------------------------------------------------------
def get_module(self, module_name, fuzzy=True):
"""
Get a module by its name.
Note that this is a 'fuzzy' lookup by default.
"""
# fuzzy module name lookup
if fuzzy:
# attempt lookup using case-insensitive filename
for module in self.modules:
if module_name.lower() in module.filename.lower():
return module
#
# no hits yet... let's cleave the extension from the given module
# name (if present) and try again
#
if "." in module_name:
module_name = module_name.split(".")[0]
# attempt lookup using case-insensitive filename without extension
for module in self.modules:
if module_name.lower() in module.filename.lower():
return module
# strict lookup
else:
for module in self.modules:
if module_name == module.filename:
return module
# no matching module exists
return None
def get_blocks_by_module(self, module_name):
"""
Extract coverage blocks pertaining to the named module.
"""
# locate the coverage that matches the given module_name
module = self.get_module(module_name)
# if we fail to find a module that matches the given name, bail
if not module:
raise ValueError("No coverage for module '%s' in log" % module_name)
# extract module id for speed
mod_id = module.id
# loop through the coverage data and filter out data for only this module
coverage_blocks = [(bb.start, bb.size) for bb in self.basic_blocks if bb.mod_id == mod_id ]
# return the filtered coverage blocks
return coverage_blocks
#--------------------------------------------------------------------------
# Parsing Routines - Top Level
#--------------------------------------------------------------------------
def _parse_drcov_file(self, filepath):
"""
Parse drcov coverage from the given log file.
"""
with open(filepath, "rb") as f:
self._parse_drcov_header(f)
self._parse_module_table(f)
self._parse_bb_table(f)
def _parse_drcov_data(self, drcov_data):
"""
Parse drcov coverage from the given data blob.
"""
pass # TODO/DRCOV
#--------------------------------------------------------------------------
# Parsing Routines - Internals
#--------------------------------------------------------------------------
def _parse_drcov_header(self, f):
"""
Parse drcov log header from filestream.
"""
# parse drcov version from log
# eg: DRCOV VERSION: 2
version_line = f.readline().strip()
self.version = int(version_line.split(":")[1])
# parse drcov flavor from log
# eg: DRCOV FLAVOR: drcov
flavor_line = f.readline().strip()
self.flavor = flavor_line.split(":")[1]
assert self.version == 2, "Only drcov version 2 log files supported"
def _parse_module_table(self, f):
"""
Parse drcov log module table from filestream.
"""
self._parse_module_table_header(f)
self._parse_module_table_columns(f)
self._parse_module_table_modules(f)
def _parse_module_table_header(self, f):
"""
Parse drcov log module table header from filestream.
-------------------------------------------------------------------
Format used in DynamoRIO v6.1.1 through 6.2.0
eg: 'Module Table: 11'
Format used in DynamoRIO v7.0.0-RC1 (and hopefully above)
eg: 'Module Table: version X, count 11'
"""
# parse module table 'header'
# eg: Module Table: version 2, count 11
header_line = f.readline().strip()
field_name, field_data = header_line.split(": ")
#assert field_name == "Module Table"
#
# NOTE/COMPAT:
#
# DynamoRIO doesn't document their drcov log format, and it has
# changed its format at least once during its lifetime.
#
# we just have to try parsing the table header one way to determine
# if its the old (say, a 'v1') table, or the new 'v2' table.
#
try:
# seperate 'version X' and 'count Y' from each other ('v2')
version_data, count_data = field_data.split(", ")
# failure to unpack indicates this is an 'older, v1' drcov log
except ValueError:
self.module_table_count = int(field_data)
self.module_table_version = 1
return
# parse module table version out of 'version X'
data_name, version = version_data.split(" ")
#assert data_name == "version"
self.module_table_version = int(version)
if not self.module_table_version in [2, 3, 4]:
raise ValueError("Unsupported (new?) drcov log format...")
# parse module count in table from 'count Y'
data_name, count = count_data.split(" ")
#assert data_name == "count"
self.module_table_count = int(count)
def _parse_module_table_columns(self, f):
"""
Parse drcov log module table columns from filestream.
-------------------------------------------------------------------
DynamoRIO v6.1.1, table version 1:
eg: (Not present)
DynamoRIO v7.0.0-RC1, table version 2:
Windows:
'Columns: id, base, end, entry, checksum, timestamp, path'
Mac/Linux:
'Columns: id, base, end, entry, path'
DynamoRIO v7.0.17594B, table version 3:
Windows:
'Columns: id, containing_id, start, end, entry, checksum, timestamp, path'
Mac/Linux:
'Columns: id, containing_id, start, end, entry, path'
DynamoRIO v7.0.17640, table version 4:
Windows:
'Columns: id, containing_id, start, end, entry, offset, checksum, timestamp, path'
Mac/Linux:
'Columns: id, containing_id, start, end, entry, offset, path'
"""
# NOTE/COMPAT: there is no 'Columns' line for the v1 table...
if self.module_table_version == 1:
return
# parse module table 'columns'
# eg: Columns: id, base, end, entry, checksum, timestamp, path
column_line = f.readline().strip()
field_name, field_data = column_line.split(": ")
#assert field_name == "Columns"
# seperate column names
# Windows: id, base, end, entry, checksum, timestamp, path
# Mac/Linux: id, base, end, entry, path
columns = field_data.split(", ")
def _parse_module_table_modules(self, f):
"""
Parse drcov log modules in the module table from filestream.
"""
# loop through each *expected* line in the module table and parse it
for i in xrange(self.module_table_count):
module = DrcovModule(f.readline().strip(), self.module_table_version)
self.modules.append(module)
def _parse_bb_table(self, f):
"""
Parse dcov log basic block table from filestream.
"""
self._parse_bb_table_header(f)
self._parse_bb_table_entries(f)
def _parse_bb_table_header(self, f):
"""
Parse drcov log basic block table header from filestream.
"""
# parse basic block table 'header'
# eg: BB Table: 2792 bbs
header_line = f.readline().strip()
field_name, field_data = header_line.split(": ")
#assert field_name == "BB Table"
# parse basic block count out of 'X bbs'
count_data, data_name = field_data.split(" ")
#assert data_name == "bbs"
self.bb_table_count = int(count_data)
# peek at the next few bytes to determine if this is a binary bb table.
# An ascii bb table will have the line: 'module id, start, size:'
token = "module id"
saved_position = f.tell()
# is this an ascii table?
if f.read(len(token)) == token:
self.bb_table_is_binary = False
# nope! binary table
else:
self.bb_table_is_binary = True
# seek back to the start of the table
f.seek(saved_position)
def _parse_bb_table_entries(self, f):
"""
Parse drcov log basic block table entries from filestream.
"""
# allocate the ctypes structure array of basic blocks
self.basic_blocks = (DrcovBasicBlock * self.bb_table_count)()
if self.bb_table_is_binary:
# read the basic block entries directly into the newly allocated array
f.readinto(self.basic_blocks)
else: # let's parse the text records
text_entry = f.readline().strip()
if text_entry != "module id, start, size:":
raise ValueError("Invalid BB header: %r" % text_entry)
pattern = re.compile(r"^module\[\s*(?P<mod>[0-9]+)\]\:\s*(?P<start>0x[0-9a-f]+)\,\s*(?P<size>[0-9]+)$")
for basic_block in self.basic_blocks:
text_entry = f.readline().strip()
match = pattern.match(text_entry)
if not match:
raise ValueError("Invalid BB entry: %r" % text_entry)
basic_block.start = int(match.group("start"), 16)
basic_block.size = int(match.group("size"), 10)
basic_block.mod_id = int(match.group("mod"), 10)
#------------------------------------------------------------------------------
# drcov module parser
#------------------------------------------------------------------------------
class DrcovModule(object):
"""
Parser & wrapper for module details as found in a drcov coverage log.
A 'module' in this context is a .EXE, .DLL, ELF, MachO, etc.
"""
def __init__(self, module_data, version):
self.id = 0
self.base = 0
self.end = 0
self.size = 0
self.entry = 0
self.checksum = 0
self.timestamp = 0
self.path = ""
self.filename = ""
self.containing_id = 0
# parse the module
self._parse_module(module_data, version)
@property
def start(self):
"""
Compatability alias for the module base.
DrCov table version 2 --> 3 changed this paramter name base --> start.
"""
return self.base
def _parse_module(self, module_line, version):
"""
Parse a module table entry.
"""
data = module_line.split(", ")
# NOTE/COMPAT
if version == 1:
self._parse_module_v1(data)
elif version == 2:
self._parse_module_v2(data)
elif version == 3:
self._parse_module_v3(data)
elif version == 4:
self._parse_module_v4(data)
else:
raise ValueError("Unknown module format (v%u)" % version)
def _parse_module_v1(self, data):
"""
Parse a module table v1 entry.
"""
self.id = int(data[0])
self.size = int(data[1])
self.path = str(data[2])
self.filename = os.path.basename(self.path)
def _parse_module_v2(self, data):
"""
Parse a module table v2 entry.
"""
self.id = int(data[0])
self.base = int(data[1], 16)
self.end = int(data[2], 16)
self.entry = int(data[3], 16)
if len(data) == 7: # Windows Only
self.checksum = int(data[4], 16)
self.timestamp = int(data[5], 16)
self.path = str(data[-1])
self.size = self.end-self.base
self.filename = os.path.basename(self.path)
def _parse_module_v3(self, data):
"""
Parse a module table v3 entry.
"""
self.id = int(data[0])
self.containing_id = int(data[1])
self.base = int(data[2], 16)
self.end = int(data[3], 16)
self.entry = int(data[4], 16)
if len(data) == 7: # Windows Only
self.checksum = int(data[5], 16)
self.timestamp = int(data[6], 16)
self.path = str(data[-1])
self.size = self.end-self.base
self.filename = os.path.basename(self.path)
def _parse_module_v4(self, data):
"""
Parse a module table v4 entry.
"""
self.id = int(data[0])
self.containing_id = int(data[1])
self.base = int(data[2], 16)
self.end = int(data[3], 16)
self.entry = int(data[4], 16)
self.offset = int(data[5], 16)
if len(data) == 7: # Windows Only
self.checksum = int(data[6], 16)
self.timestamp = int(data[7], 16)
self.path = str(data[-1])
self.size = self.end-self.base
self.filename = os.path.basename(self.path)
#------------------------------------------------------------------------------
# drcov basic block parser
#------------------------------------------------------------------------------
class DrcovBasicBlock(Structure):
"""
Parser & wrapper for basic block details as found in a drcov coverage log.
NOTE:
Based off the C structure as used by drcov -
/* Data structure for the coverage info itself */
typedef struct _bb_entry_t {
uint start; /* offset of bb start from the image base */
ushort size;
ushort mod_id;
} bb_entry_t;
"""
_pack_ = 1
_fields_ = [
('start', c_uint32),
('size', c_uint16),
('mod_id', c_uint16)
]
#------------------------------------------------------------------------------
# Command Line Testing
#------------------------------------------------------------------------------
if __name__ == "__main__":
argc = len(sys.argv)
argv = sys.argv
# base usage
if argc < 2:
print "usage: %s <coverage filename>" % os.path.basename(sys.argv[0])
sys.exit()
# attempt file parse
x = DrcovData(argv[1])
for bb in x.basic_blocks:
print "0x%08x" % bb.start
|
[
"cbwang505@hotmail.com"
] |
cbwang505@hotmail.com
|
f55af3d8a9ae98e223e9c99bf7e61ae465ce3918
|
2b70516185e8b102cf1a90d730bb068461fb7af8
|
/book/migrations/0018_auto_20191225_0718.py
|
a65a4fa4830fa4550943a9b6ef10abdbf81c0643
|
[] |
no_license
|
KeeperNight/Roronoa
|
c4eb083e6e2c7311b0aca8e699df01dfd1028ae6
|
ffff84a4c95483a742806314004678002401fafc
|
refs/heads/master
| 2020-11-29T21:23:33.841854
| 2020-01-03T19:00:44
| 2020-01-03T19:00:44
| 230,218,681
| 0
| 0
| null | 2020-01-03T19:00:46
| 2019-12-26T07:39:30
|
HTML
|
UTF-8
|
Python
| false
| false
| 549
|
py
|
# Generated by Django 3.0.1 on 2019-12-25 15:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('book', '0017_auto_20191225_0716'),
]
operations = [
migrations.RemoveField(
model_name='collection',
name='book_name',
),
migrations.AddField(
model_name='book',
name='collection',
field=models.ManyToManyField(related_name='coll', to='book.Collection'),
),
]
|
[
"noreply@github.com"
] |
KeeperNight.noreply@github.com
|
008880df49eaa648acea8a9abf9ffaa149112098
|
85c0813d837b0e0f189020a52348db1deffb0b11
|
/public/db/coupon_db.py
|
80daaa213acc609f25eb27d5f2237e1696469652
|
[] |
no_license
|
reb00t2018/flask-reptiles
|
2d49fb27e718e305a7127e05047d865a1e7a6157
|
ac3832340219f922e3b9451c2e2b1e18773938c1
|
refs/heads/master
| 2020-07-03T03:05:56.991764
| 2019-06-11T01:19:14
| 2019-06-11T01:19:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,051
|
py
|
# -*- coding: utf-8 -*-
__author__ = 'Apple'
from public.db.participle_db import DataBase_PD
class CouponDB(DataBase_PD):
def __init__(self):
super(CouponDB, self).__init__()
def save_coupon(self, coupon):
'''
保存一条商品信息到数据库
:param coupon:
:return:
'''
insert_sql = """
(insert into goods_goods(category_id,second_id,first_id,title, price, url, pic, brand,goods_desc,add_time)
values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s))
"""
old_coupon = self.is_has_by_name(coupon.title)
insert_data = (
coupon.category_id,coupon.second_id,coupon.first_id, coupon.title, coupon.price, coupon.url, coupon.pic
, coupon.brand,coupon.goods_desc,coupon.add_time
)
if not old_coupon:
return self.execute(insert_sql, insert_data)
else:
return False
def is_has_by_name(self,title):
'''
根据name查询是否有这个商品
:param title:
:return:
'''
sql = """
select 1 from goods_goods where title = %s
"""
return self.find_execute(sql, (title))
def save_ip(self,ip,time):
insert_sql = """
insert into goods_getip(ip,add_time) values (%s,%s)
"""
return self.execute(insert_sql, (ip,time))
def count_ip(self):
select_sql = """
select count(*) from goods_getip
"""
return self.find_execute(select_sql)
def delete_ip(self,getip):
delete_sql = """
DELETE FROM goods_getip WHERE id = {0}
"""
return self.execute(delete_sql.format(getip))
def sumip(self):
select_sql = """
select * from goods_getip
"""
return self.find_execute(select_sql,fetchone=False)
|
[
"1991585851@qq.com"
] |
1991585851@qq.com
|
f60ea6fc3ab345db095269b7fab77201af8d0c01
|
81da50f44ac78f99ab78125d36a7d508e07bea63
|
/old_approach/selection.py
|
7f3c4f51b05e29faf195f988240fc4812d3822ef
|
[] |
no_license
|
pnovais/granada_2016
|
ebb859370921489e95dafcf21d58757f87ff53f5
|
11ad3d44b4f50606608fd89f8e295949818ffd3e
|
refs/heads/master
| 2021-03-22T04:36:25.768426
| 2016-12-02T16:42:21
| 2016-12-02T16:42:21
| 70,143,649
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,648
|
py
|
import pandas as pd
import numpy as np
import time
import sql as sql
from sys import exit
from astropy import units as u
from astropy.coordinates import SkyCoord
ini=time.time()
#dfs = pd.read_table('califa_objs.txt', delim_whitespace = True)
#dfs.columns = ['name', 'id', 'ra', 'dec', 'v500','v1200', 'comb', 'ned', 'link']
#df = dfs.ix[:,('name', 'id', 'ra', 'dec')]
dfs = pd.read_csv('califa_master_list_rgb_2012.txt')
#dfs.columns = ['name', 'id', 'rah','ram','ras', 'decD', 'decM', 'decs', 'v500','v1200', 'comb', 'ned', 'link']
df = dfs.ix[:,('#CALIFA_ID','ned_name','ra','dec','Mr','u-r','objID','fiberMag_r','petroMag_u','petroMag_g','petroMag_r','petroMag_i','petroMag_z','petroRad_r','petroR50_r','petroR90_r','z','re')]
df2 = pd.read_table('Paty_at_flux__yx/mapas.txt', delim_whitespace = True)
df2.columns = ['at-flux', 'gal_num']
dfm = pd.read_table('morphological_types_califa.txt', delim_whitespace = True)
dfm.columns = ['index', 'gal_num', 'type']
n=0
for i in range(len(df2)):
for j in range(len(df)):
if (df2['gal_num'][i]==df['#CALIFA_ID'][j]):
n=n+1
df2['ned_name'] = df['ned_name']
df2['u-r'] = df['u-r']
df2['re'] = df['re']
df2['type_m'] = dfm['type']
#print(n)
df2.to_csv('califa_gal_properts.csv')
dfg1 = df2.ix[(df2['type_m'] <=0)]
dfg2 = df2.ix[(df2['type_m'] > 0) & (df2['type_m'] <= 3)]
dfg3 = df2.ix[(df2['type_m'] > 3)]
dfg1.to_csv('califa_group1.csv')
df3.to_csv('califa_group2.csv')
df4.to_csv('califa_group3.csv')
fim = time.time()
time_proc = fim - ini
print('')
print('tempo de processamento: %fs' %time_proc)
|
[
"patynovais@gmail.com"
] |
patynovais@gmail.com
|
810d67acdeb4bd7d114d76c3722eb2212d1920c3
|
debb70d11887c636ad90c5cabb01e0933e66d642
|
/Thys/GUI Stuff/v3/Mayflower_v3.py
|
01038acdfa2196c6cd9004bf31a695b4210dd149
|
[] |
no_license
|
james-barrett-44/COMP2100FinalProject
|
68a4bc31c618d3d2d5d75bd3ac581c6a39a25e19
|
36572c7bc7f0c8bbfdc2d48a1f04a647f59dd065
|
refs/heads/master
| 2020-08-24T20:11:46.533634
| 2019-12-12T22:08:09
| 2019-12-12T22:08:09
| 216,897,441
| 0
| 0
| null | 2019-11-23T03:05:48
| 2019-10-22T19:49:46
|
Python
|
UTF-8
|
Python
| false
| false
| 2,974
|
py
|
import tkinter as tk
import socket
from threading import Thread
"""
The class setup and frame switching is copied from https://www.youtube.com/watch?v=jBUpjijYtCk
"""
def get_own_ip():
"""https://stackoverflow.com/questions/166506/finding-local-ip-addresses-using-pythons-stdlib?rq=1"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
ip = s.getsockname()[0]
# local_ip.set(IP)
print(ip)
# add_line_to_output("Own IP address: %s" % IP)
except:
ip = '127.0.0.1'
finally:
s.close()
return ip
DEFAULT_FONT = ("Veranda", 10)
DEFAULT_FONT_LARGE = ("Veranda", 35)
BUFSIZ = 1024
class Mayflower(tk.Tk):
def __init__(self, *arg, **kwargs):
tk.Tk.__init__(self, *arg, **kwargs)
container = tk.Frame(self)
container.pack(side="top", fill="both", expand=True)
container.grid_rowconfigure(0, weight=1)
container.grid_columnconfigure(0, weight=1)
self.frames = {}
for F in (StartPage, MainPage):
frame = F(container, self)
self.frames[F] = frame
frame.grid(row=0, column=0, sticky="nsew")
self.show_frame(StartPage)
def show_frame(self, cont):
frame = self.frames[cont]
frame.tkraise()
class StartPage(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
l_welcome = tk.Label(self, text="Welcome to the Mayflower", font=DEFAULT_FONT_LARGE)
l_subtitle = tk.Label(self, text="The #1 Peer-2-Peer File Sharing Application", font=DEFAULT_FONT)
l_welcome.pack()
l_subtitle.pack()
b_start = tk.Button(self, text="Start", command=lambda: controller.show_frame(MainPage))
b_start.pack()
class MainPage(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
main()
l_mainlabel = tk.Label(self, text="Main window")
l_mainlabel.pack()
b_back_to_start = tk.Button(self, text="Back to Welcome Page", command=lambda: controller.show_frame(StartPage))
b_back_to_start.pack()
own_ip = str(get_own_ip())
app_port = 1234
ip_and_port = (own_ip, app_port)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(ip_and_port)
sock.listen(5)
def accept_incoming_connections():
while True:
peer, client_address = sock.accept()
Thread(targets=handle_peer, args=(peer,)).start()
def handle_peer(peer):
name = peer.recv(BUFSIZ).decode()
print(name)
def main():
print("Waiting for connection...")
t = Thread(target=accept_incoming_connections)
t.start()
t.join()
sock.close()
if __name__ == '__main__':
app = Mayflower()
app.title("The P2P Mayflower v3")
app.geometry("910x620")
app.mainloop()
|
[
"thys.nadema@gmail.com"
] |
thys.nadema@gmail.com
|
01e347f7f20a1e1ee957aee5bb968aca54863c14
|
0d09fb71910f1d9d625c7573f2f1f6e98bc3a3a4
|
/myblog/myblog/urls.py
|
39204b4f81e0f6e201bf79e07a635f99a99d8775
|
[] |
no_license
|
jayhchoi/django-blog-project
|
0e28037885b9662839d98c4d8258be20e5405f68
|
5e6548b55a0b64908381e23588b8475f260b544f
|
refs/heads/master
| 2020-03-30T18:52:16.628981
| 2018-10-05T11:30:13
| 2018-10-05T11:30:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 876
|
py
|
"""myblog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from .views import IndexView
urlpatterns = [
path('admin/', admin.site.urls),
path('', IndexView.as_view(), name='index'),
path('blog/', include('blog.urls')),
]
|
[
"33884497+jaychoi1619@users.noreply.github.com"
] |
33884497+jaychoi1619@users.noreply.github.com
|
60b6c78bf5554244f344ae127983fba5b3b0324c
|
8de775551031c80337cdd978f1cfa6368ffba03d
|
/437_pathsumIII.py
|
a0b91d553c1205dc9d30de639b792412cb15a74e
|
[] |
no_license
|
evilwarlock/leetcode
|
6ba531ae5dbe818c9ba87b5122c66d2cc8e2729e
|
819855b2d388e3ddd77f072d3108af7b4b902047
|
refs/heads/master
| 2022-11-30T03:58:41.199365
| 2020-08-18T18:07:07
| 2020-08-18T18:07:07
| 260,012,580
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 515
|
py
|
class Solution:
def dfs(self, root, sum, root_sum):
if not root: return None
root_sum += root.val
self.result += self.count[root_sum]
self.count[root_sum + sum] += 1
self.dfs(root.left, sum, root_sum)
self.dfs(root.right, sum, root_sum)
self.count[root_sum + sum] -= 1
def pathSum(self, root, sum):
self.result, self.count = 0, defaultdict(int)
self.count[sum] = 1
self.dfs(root, sum, 0)
return self.result
|
[
"ceaxyz@gmail.com"
] |
ceaxyz@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.