blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
13049b593f0288083aea364de83482a8fa777145
|
71cb249dc2f2c915a850f382d6db87b07d869d43
|
/src/test_vessel12.py
|
4a893181e688c180ab6cce7ccdf663c65feff64b
|
[] |
no_license
|
Hannah-Xue/vessel-flow-consistency-ssl
|
e22f26a65f026a0819faf40f7ef82ac0b1e8ffa8
|
4e53f4dfc9f17e0b873192259afc1d10d4422838
|
refs/heads/master
| 2023-07-12T17:12:06.059726
| 2021-08-17T15:11:27
| 2021-08-17T15:11:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,646
|
py
|
'''
Test script (such as `test_tubetk.py`), loads the trained model, loads the test data loader and saves the output vesselness patches
These patches can be stitched together, and used for evaluation/visualization.
This script is for VESSEL12 dataset.
'''
import argparse
import numpy as np
import torch
from tqdm import tqdm
import data_loader.data_loaders as module_data
import model.loss as module_loss
from matplotlib import pyplot as plt
import pickle as pkl
import model.metric as module_metric
import model.model as module_arch
from scipy.ndimage import gaussian_filter
from parse_config import ConfigParser
from utils import dir2flow_2d, v2vesselness, v2transpose_vesselness, overlay, overlay_quiver
from utils.util import *
from model.loss import v2_avg
from time import time
def to_device(data, device):
for k, v in data.items():
data[k] = v.to(device)
return data
def smooth(ves, s=1):
# image = [B, C, H, W]
smoothves = ves * 0
B, C, H, W, D = ves.shape
for b in range(B):
for c in range(C):
if isinstance(ves, torch.Tensor):
sm = gaussian_filter(ves[b, c].data.numpy(), sigma=s)
smoothves[b, c] = torch.Tensor(sm)
else:
smoothves[b, c] = gaussian_filter(ves[b, c], sigma=s)
return smoothves
def main(config, args):
logger = config.get_logger('test')
training = True if args.train else False
trainstr = "train" if args.train != 0 else "test"
# Keep track of time
times = []
data_loader = getattr(module_data, config['data_loader']['type'])(
config['data_loader']['args']['data_dir'],
batch_size=args.batch_size,
shuffle=False,
validation_split=0.0,
training=training,
pad=args.pad,
num_workers=2,
)
## Vesselness function (3d versions)
if config.config['loss'] == 'vessel_loss_2d_sq':
vesselfunc = v2_sq_vesselness
elif config.config['loss'] == 'vessel_loss_3d':
vesselfunc = v13d_sq_vesselness_test
elif config.config['loss'] == 'vessel_loss_3d_bifurc':
vesselfunc = v13d_sq_jointvesselness
elif config.config['loss'] == 'vessel_loss_3dmax':
vesselfunc = v13d_sqmax_vesselness
else:
assert False, 'Unknown loss function {}'.format(config['loss'])
# print(vesselfunc)
if 'max' in config['loss']:
s = 1
else:
s = 0.7
print("Smoothness {}".format(s))
# More information
parallel_scale = config.config['loss_args'].get('parallel_scale', 2)
sv_range = config.config['loss_args'].get('sv_range')
# build model architecture
model = config.init_obj('arch', module_arch)
model.eval()
logger.info(model)
# get function handles of loss and metrics
loss_fn = getattr(module_loss, config['loss'])
metric_fns = [getattr(module_metric, met) for met in config['metrics']]
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
logger.info('Loading checkpoint: {} ...'.format(config.resume))
checkpoint = torch.load(config.resume, map_location=device)
state_dict = checkpoint['state_dict']
if config['n_gpu'] > 1:
model = torch.nn.DataParallel(model)
# get nsamples
nsample = config['loss_args'].get('num_samples_template', 12)
# Load model
try:
model.load_state_dict(state_dict)
except:
model.module.load_state_dict(state_dict)
print("Using vessel function: {}".format(vesselfunc))
# prepare model for testing
model = model.to(device)
model.eval()
total_loss = 0.0
total_metrics = torch.zeros(len(metric_fns))
print("Dataset has size: {}".format(len(data_loader.dataset)))
pad = args.pad
with torch.no_grad():
# Store all stuff here
vesselness = dict(imgid=None, img=None, count=0)
for j, data in enumerate(tqdm(data_loader)):
## Get output from model
data = to_device(data, device)
output = model(data)
## Get vessels
vessel_type = config.get('vessel_type', 'light')
mask = data.get('mask')
if mask is not None:
mask = mask.cpu()
## Change this for different vesselness modes
t1 = time()
ves = vesselfunc(data['image'], output, nsample=nsample, vtype=vessel_type, mask=mask, is_crosscorr=args.crosscorr, parallel_scale=parallel_scale, sv_range=sv_range)
t2 = time()
ves = ves.data.cpu().numpy()
# If non-zero padding, then take the extra padding out from all sides (minimizes border artifacts)
if args.pad > 0:
ves = ves[..., pad:-pad, pad:-pad, pad:-pad]
ves = smooth(ves, s)
times.append(t2 - t1)
# From this vesselness, use it to add to current image
B = ves.shape[0]
for i in tqdm(range(B)):
imgid = data['imgid'][i]
shape = list(data['shape'][i])
# New image, save old image and create new templates
if imgid != vesselness['imgid']:
# Save it first
v_id = vesselness['imgid']
v_img = vesselness['img']
v_cnt = np.maximum(1, vesselness['count'])
# Save this image if it exists
if v_id is not None:
ves_img = v_img / v_cnt
zerocount = np.where(v_cnt == 0)
if zerocount[0] != []: # Print a warning is some location has zero index
print("{} index has zero count in location {}".format(v_id, zerocount))
outputfile = '/ocean/projects/asc170022p/rohit33/VESSEL12output/ours_{}.npy'.format(v_id)
print(v_cnt.min(), v_cnt.max())
with open(outputfile, 'wb') as fi:
np.save(fi, ves_img)
print("Saved to {}".format(outputfile))
# Create new images
vesselness['imgid'] = imgid
vesselness['img'] = np.zeros(shape)
vesselness['count'] = np.zeros(shape)
# Save patch to new image
#img = data['image'][i, 0].cpu().data.numpy() # [P, P, P]
img = ves[i, 0]
_, H, W, D = data['startcoord'][i].cpu().data.numpy()
pH, pW, pD = img.shape
# Add to patch
vesselness['img'][H:H+pH, W:W+pW, D:D+pD] = img + vesselness['img'][H:H+pH, W:W+pW, D:D+pD]
vesselness['count'][H:H+pH, W:W+pW, D:D+pD] = 1 + vesselness['count'][H:H+pH, W:W+pW, D:D+pD]
# The last image wont be saved, save it here
v_id = vesselness['imgid']
v_img = vesselness['img']
v_cnt = np.maximum(1, vesselness['count'])
ves_img = v_img / v_cnt
zerocount = np.where(v_cnt == 0)
if zerocount[0] != []: # Print a warning is some location has zero index
print("{} index has zero count in location {}".format(v_id, zerocount))
outputfile = '/ocean/projects/asc170022p/rohit33/VESSEL12output/ours_{}.npy'.format(v_id)
print(v_cnt.min(), v_cnt.max())
with open(outputfile, 'wb') as fi:
np.save(fi, ves_img)
print("Saved to {}".format(outputfile))
# Print time stats
meantime = np.mean(times)
stdtime = np.std(times)
print("Time for ours: {} +- {} sec".format(meantime, stdtime))
if __name__ == '__main__':
args = argparse.ArgumentParser(description='PyTorch Template')
args.add_argument('-c', '--config', default=None, type=str,
help='config file path (default: None)')
args.add_argument('-r', '--resume', default=None, type=str,
help='path to latest checkpoint (default: None)')
args.add_argument('-d', '--device', default=None, type=str,
help='indices of GPUs to enable (default: all)')
args.add_argument('--run_id', default='test')
args.add_argument('--crosscorr', default=0, type=int)
args.add_argument('--dataset', default="", type=str)
args.add_argument('--train', default=0, type=int)
args.add_argument('--patientIDs', default=None, type=str, nargs='+')
args.add_argument('--batch_size', default=512, type=int,)
args.add_argument('--pad', type=int, default=0)
config = ConfigParser.from_args(args)
args = args.parse_args()
main(config, args)
|
[
"rohit.rango@gmail.com"
] |
rohit.rango@gmail.com
|
db4f4ad08c06f2ec5e0a0db469b583da732d894c
|
97629345947f80b16848af1d33b37c5185cca651
|
/LESSON-6/zadacha2.py
|
778a964b303763692f165982aaece1e0a4526ff0
|
[] |
no_license
|
xemnes5/py-course
|
2f9ec17975e66ba7080a5acd366ce02573e1b430
|
fcc1f02e6d0e4c4104b2d2d42418cd1a46fae914
|
refs/heads/main
| 2023-05-04T19:43:26.287387
| 2021-05-25T12:36:18
| 2021-05-25T12:36:18
| 323,301,473
| 0
| 0
| null | 2021-05-25T12:36:19
| 2020-12-21T10:23:14
|
Python
|
UTF-8
|
Python
| false
| false
| 385
|
py
|
class Road:
def __init__(self,length,width):
self._length = length
self._width = width
def mass(self):
return self._length*self._width*25*5
a = Road(10,100)
b = Road(20,200)
print('Масса асфальта первого экземпляра =', a.mass())
print('Масса асфальта первого экземпляра =', b.mass())
|
[
"razielxem@gmail.com"
] |
razielxem@gmail.com
|
9e9c2b70ff0e481265bd61c79da9fa108683cf57
|
fab9db25f60728f7055e0c92887430d55538c1cd
|
/Project3/wc.py
|
3aa4adf43206db20c675b0e4762a1f4c499ef091
|
[] |
no_license
|
ErlendAbrahamsen/INF3331
|
e877b369c12cdb7cc5e5eab6120d4389f98e50fe
|
f6621c193bc07d202726db8bb26743f6238d5009
|
refs/heads/master
| 2020-12-19T15:18:30.858747
| 2020-01-23T16:48:16
| 2020-01-23T16:48:16
| 235,771,254
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,168
|
py
|
#3.1
import sys
import os
def wc(file):
"""
Inputs filename in same directory as wc.py.
Returns tuple with number of
lines, words, characters, and filename of the input file.
"""
infile = open(str(file), "r")
words = []
lines = 0
for line in infile:
lines += 1
words += line.split()
char = 0
for word in words:
for character in word:
char += 1
return lines, len(words), char, str(file)
def execute():
"""
Read command line args. and execute front-end of program.
"""
#word count all compatible files in directory
if str(sys.argv[1]) == "*":
for file in os.listdir():
try:
print(wc(file))
except:
print("WARNING: Could not read %s properly" % file)
#word count all python files in directory
elif str(sys.argv[1]) == "*.py":
for file in os.listdir():
if str(file)[-3:] == ".py":
print(wc(str(file)))
#word count input file in directory
else:
file = str(sys.argv[1])
print(wc(file))
if __name__ == '__main__':
execute()
|
[
"erlend_abrahamsen@hotmail.com"
] |
erlend_abrahamsen@hotmail.com
|
1e6560752ac05e68875474c1eb9b1afe9b16284f
|
5e6543d360396d02d2e9ae052a88997fde4af61d
|
/lingvo/tasks/car/summary.py
|
9ce333cb2b5a81653e0bf11c4023d5857fa89f57
|
[
"Apache-2.0"
] |
permissive
|
saswat0/lingvo
|
58b0c044485fe89b33aca22a1c01e81d3be45c8a
|
3d0418a896c21240bb75ce1dc4c906da5a6dc757
|
refs/heads/master
| 2022-12-06T19:24:08.679519
| 2020-09-02T14:30:29
| 2020-09-02T14:30:29
| 279,816,053
| 2
| 0
|
Apache-2.0
| 2020-09-02T14:30:30
| 2020-07-15T08:49:42
| null |
UTF-8
|
Python
| false
| false
| 20,896
|
py
|
# Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to help with plotting summaries.
These functions try to take care in ensuring that transformations on points and
headings are consistent. For example, when points are transformed from the
standard x/y plane (+x going right, +y going up) to an alternate formulation
such as 'flipped axes', the heading must also be updated to account for the
change in axis directions.
"""
import collections
import math
from lingvo import compat as tf
from lingvo.core import plot
from lingvo.tasks.car import transform_util
import matplotlib.patheffects as path_effects
import numpy as np
import PIL.Image as Image
import PIL.ImageColor as ImageColor
import PIL.ImageDraw as ImageDraw
import PIL.ImageFont as ImageFont
# Source code for keys can be found at:
# https://github.com/python-pillow/Pillow/blob/080bfd3ee1412b401d520fe26c51e2f5515e3a65/src/PIL/ImageColor.py#L163
def _PILColorList():
pil_color_list = sorted(ImageColor.colormap.keys())
pil_color_list.remove('black')
return pil_color_list
PIL_COLOR_LIST = _PILColorList()
def ExtractRunIds(run_segments):
"""Extract the RunIds from the run_segments feature field.
Args:
run_segments: a string Tensor of shape [batch, 1] containing a text proto.
See `SummaryTest.testExtractRunIds` for an example.
Returns:
A string Tensor of shape [batch], containing the extracted run id.
"""
run_segments = tf.convert_to_tensor(run_segments)[:, 0]
return tf.strings.regex_replace(run_segments,
r'[^:]+: "(.+)"\n[^:]+: (\d+)(.|\n)*',
r'\1_\2')
def CameraImageSummary(frontal_images, run_segment_strings, figsize=(6, 4)):
"""Write frontal_images as tf.Summaries.
Args:
frontal_images: Float tensor of frontal camera images: Shape: [batch,
height, width, depth]. Expected aspect ratio of 3:2 for visualization.
run_segment_strings: Tensor of strings: Shape: [batch, 1]. The associated
RunSegment proto for the batch.
figsize: Tuple indicating size of camera image. Default is (6, 4)
indicating a 3:2 aspect ratio for visualization.
"""
# Parse the run segment strings to extract the run segment info.
run_segment_ids = ExtractRunIds(run_segment_strings)
def DrawCameraImage(fig, axes, frontal_image, run_segment_id):
"""Draw camera image for image summary."""
plot.AddImage(
fig=fig,
axes=axes,
data=frontal_image / 256.,
show_colorbar=False,
suppress_xticks=True,
suppress_yticks=True)
txt = axes.text(
x=0.5,
y=0.01,
s=run_segment_id,
color='blue',
fontsize=14,
transform=axes.transAxes,
horizontalalignment='center')
txt.set_path_effects([
path_effects.Stroke(linewidth=3, foreground='lightblue'),
path_effects.Normal()
])
with plot.MatplotlibFigureSummary(
'examples', figsize=figsize, max_outputs=10) as fig:
# Plot raw frontal image samples for each example.
fig.AddSubplot([frontal_images, run_segment_ids], DrawCameraImage)
def _CarToImageTransform():
return transform_util.MakeCarToImageTransform(
pixels_per_meter=10., image_ref_x=250, image_ref_y=750, flip_axes=True)
def DrawTopDown(lasers):
"""Draw the laser points in the top down car view."""
# For every laser point, convert the point to a top down image.
# Lasers is a [r, b, s*6] tensor where b is the batch size.
# Transpose back to batch major
lasers = np.transpose(lasers, [1, 0, 2])
# Reshape data to get all points in the spin.
lasers = np.reshape(lasers, [np.shape(lasers)[0], -1])
car_to_image_transform = _CarToImageTransform()
# Create an empty image of the appropriate size.
batch_size = min(8, np.shape(lasers)[0])
images = np.zeros(shape=(batch_size, 1000, 500, 3), dtype=np.uint8)
# TODO(vrv): Slice the lasers into [b, x, y, z] matrix
# and then do a batch_matmul on all points at once.
max_npoints = np.shape(lasers)[1] // 6
for b in range(batch_size):
for i in range(max_npoints):
index = i * 6
x = lasers[b][index]
y = lasers[b][index + 1]
z = lasers[b][index + 2]
# TODO(vrv): Use lasers_padding to filter out invalid points.
# For now, just assume that all zeros means that the point
# shouldn't be drawn.
if x == 0 and y == 0 and z == 0:
continue
tx, ty, _ = transform_util.TransformPoint(car_to_image_transform, x, y, z)
# Point outside image.
if tx < 0 or ty < 0 or tx >= images.shape[2] or ty >= images.shape[1]:
continue
# Fill in that point in the image
images[b, int(ty), int(tx), :] = (255, 255, 255)
return images
def MakeRectangle(l, w, theta, offset=(0, 0)):
"""Make rotated rectangle."""
c, s = math.cos(theta), math.sin(theta)
coordinates = [(l / 2.0, w / 2.0), (l / 2.0, -w / 2.0), (-l / 2.0, -w / 2.0),
(-l / 2.0, w / 2.0)]
return [(c * x - s * y + offset[0], s * x + c * y + offset[1])
for (x, y) in coordinates]
def DrawHeadingTriangle(draw, x, y, heading, color, scale=25):
"""Draw a triangle indicating `heading` at `x`, `y` with `color`."""
ch = scale * math.cos(heading)
sh = scale * math.sin(heading)
lead_point = (x - ch, y - sh)
anchor_1 = (x - sh / 3, y + ch / 3)
anchor_2 = (x + sh / 3, y - ch / 3)
draw.line([anchor_1, lead_point, anchor_2], fill=color, width=4)
def DrawCircle(draw, x, y, fill, outline, circle_size=5):
"""Draw a circle at `x`, `y`."""
draw.ellipse(
(x - circle_size, y - circle_size, x + circle_size, y + circle_size),
fill=fill,
outline=outline)
# TODO(vrv): Support multiple display strings.
def DrawBoundingBoxOnImage(image,
box,
display_str,
color='red',
thickness=4,
text_loc='BOTTOM'):
"""Draw bounding box on the input image."""
original_image = image
image = Image.fromarray(np.uint8(original_image)).convert('RGB')
draw = ImageDraw.Draw(image)
center_x, center_y, width, height, heading = box
box2d = transform_util.Box2D(center_x, center_y, width, height, heading)
corners = list(box2d.corners.reshape(-1, 2))
points = [tuple(c) for c in corners]
points += [points[0]]
draw.line(points, fill=color, width=thickness)
# Draw heading.
max_dim = max(width, height) / 2.
end_heading_point = (center_x + max_dim * math.cos(heading),
center_y + max_dim * math.sin(heading))
start_heading_point = ((end_heading_point[0] - center_x) / 2 + center_x,
(end_heading_point[1] - center_y) / 2 + center_y)
heading_points = [start_heading_point, end_heading_point]
draw.line(heading_points, fill=color, width=thickness)
# Compute extremes so we can anchor the labels to them.
xs = [x[0] for x in points]
ys = [x[1] for x in points]
left = np.min(xs)
bottom = np.min(ys)
top = np.max(ys)
try:
font = ImageFont.truetype('arial.ttf', 24)
except IOError:
font = ImageFont.load_default()
text_width, text_height = font.getsize(display_str)
margin = np.ceil(0.05 * text_height)
if text_loc == 'TOP':
text_bottom = top
else:
text_bottom = bottom + text_height
draw.rectangle([(left, text_bottom - text_height - 2 * margin),
(left + text_width, text_bottom)],
fill=color)
draw.text((left + margin, text_bottom - text_height - margin),
display_str,
fill='black',
font=font)
np.copyto(original_image, np.array(image))
def VisualizeBoxes(image,
boxes,
classes,
scores,
class_id_to_name,
min_score_thresh=.25,
line_thickness=4,
groundtruth_box_visualization_color='black',
skip_scores=False,
skip_labels=False,
text_loc='TOP'):
"""Visualize boxes on top down image."""
box_to_display_str_map = collections.defaultdict(str)
box_to_color_map = collections.defaultdict(str)
num_boxes = boxes.shape[0]
for i in range(num_boxes):
if scores is not None and scores[i] < min_score_thresh:
continue
box = tuple(boxes[i].tolist())
display_str = ''
if not skip_labels:
if classes[i] in class_id_to_name:
class_name = class_id_to_name[classes[i]]
display_str = str(class_name)
else:
display_str = 'N/A'
if not skip_scores:
if not display_str:
display_str = '{}%'.format(int(100 * scores[i]))
else:
display_str = '{}: {}%'.format(display_str, int(100 * scores[i]))
box_to_display_str_map[box] = display_str
if scores is None:
box_to_color_map[box] = groundtruth_box_visualization_color
else:
box_to_color_map[box] = PIL_COLOR_LIST[classes[i] % len(PIL_COLOR_LIST)]
# Draw all boxes onto image.
for box, color in box_to_color_map.items():
DrawBoundingBoxOnImage(
image,
box,
color=color,
thickness=line_thickness,
display_str=box_to_display_str_map[box],
text_loc=text_loc)
return image
def TransformBBoxesToTopDown(bboxes, car_to_image_transform=None):
"""Convert bounding boxes from car coordinates to top down pixel coordinates.
Args:
bboxes: A (batch, nbboxes, 4 or 5) np.float32 tensor containing bounding box
xywhh in car coordinates (smooth adjusted by car pose).
car_to_image_transform: An optional Transform object. If None, this will be
created using _CarToImageTransform.
Returns:
np.array of shape (batch, nbboxes) containing the bounding boxes in top down
image space.
"""
if car_to_image_transform is None:
car_to_image_transform = _CarToImageTransform()
batch_size = np.shape(bboxes)[0]
nbboxes = np.shape(bboxes)[1]
transformed_boxes = np.zeros_like(bboxes)
for batch_id in range(batch_size):
for box_id in range(nbboxes):
# TODO(vrv): When we predict heading, we should assert
# that the length of bbox_data is 5.
bbox_data = np.squeeze(bboxes[batch_id, box_id, :])
x, y, width, height = (bbox_data[0], bbox_data[1], bbox_data[2],
bbox_data[3])
if len(bbox_data) == 5:
heading = bbox_data[4]
else:
heading = 0.0
# Skip boxes that cannot be visualized.
if width <= 0 or height <= 0:
continue
if any([np.isnan(c) or np.isinf(c) for c in bbox_data]):
continue
# Bounding boxes are in car coordinates (smooth adjusted by car pose).
# Transform from car coordinates to new coordinates.
bbox_car = transform_util.Box2D(x, y, width, height, heading)
bbox_transformed = bbox_car.Apply(car_to_image_transform)
bbox_values = bbox_transformed.AsNumpy()
if len(bbox_data) == 4:
bbox_values = bbox_values[:-1]
transformed_boxes[batch_id, box_id, :] = bbox_values
return transformed_boxes
def DrawBBoxesOnImages(images, bboxes, box_weights, labels, class_id_to_name,
groundtruth):
"""Draw ground truth boxes on top down image.
Args:
images: A 4D uint8 array (batch, height, width, depth) of images to draw on
top of.
bboxes: A (batch, nbboxes, 4 or 5) np.float32 tensor containing bounding box
xywhh to draw specified in top down pixel values.
box_weights: A (batch, nbboxes) float matrix indicating the predicted score
of the box. If the score is 0.0, no box is drawn.
labels: A (batch, nbboxes) integer matrix indicating the true or predicted
label indices.
class_id_to_name: Dictionary mapping from class id to name.
groundtruth: Boolean indicating whether bounding boxes are ground truth.
Returns:
'images' with the bboxes drawn on top.
"""
# Assert 4d shape.
assert len(np.shape(images)) == 4
if np.shape(images)[3] == 1:
# Convert from grayscale to RGB.
images = np.tile(images, (1, 1, 1, 3))
# Assert channel dimension is 3 dimensional.
assert np.shape(images)[3] == 3
batch_size = np.shape(images)[0]
nbboxes = np.shape(bboxes)[1]
for batch_id in range(batch_size):
image = images[batch_id, :, :, :]
# Draw a box for each box and label if weights is 1.
transformed_boxes = []
box_scores = []
label_ids = []
for box_id in range(nbboxes):
box_weight = box_weights[batch_id, box_id]
# If there is no box to draw, continue.
if box_weight == 0.0:
continue
# TODO(vrv): When we predict heading, we should assert
# that the length of bbox_data is 5.
bbox_data = np.squeeze(bboxes[batch_id, box_id, :])
if len(bbox_data) == 5:
x, y, width, length, heading = bbox_data
else:
x, y, width, length = bbox_data
heading = 0.0
# Check whether we can draw the box.
bbox = transform_util.Box2D(x, y, width, length, heading)
ymin, xmin, ymax, xmax = bbox.Extrema()
if ymin == 0 and xmin == 0 and ymax == 0 and xmax == 0:
continue
# TODO(vrv): Support drawing boxes on the edge of the
# image.
if (xmin < 0 or ymin < 0 or xmax >= image.shape[1] or
ymax >= image.shape[0]):
continue
# We can draw a box on the image, so fill in the score, the boxes,
# and the label.
transformed_boxes.append([x, y, width, length, heading])
box_scores.append(box_weight)
label_ids.append(labels[batch_id, box_id])
if transformed_boxes:
transformed_boxes = np.stack(transformed_boxes, axis=0)
scores = None if groundtruth else np.array(box_scores)
text_loc = 'TOP' if groundtruth else 'BOTTOM'
VisualizeBoxes(
image=image,
boxes=transformed_boxes,
classes=label_ids,
scores=scores,
class_id_to_name=class_id_to_name,
groundtruth_box_visualization_color='cyan',
skip_scores=groundtruth,
skip_labels=False,
text_loc=text_loc)
return images
def DrawTrajectory(image, bboxes, masks, labels, is_groundtruth):
"""Draw the trajectory of bounding boxes on 'image'.
Args:
image: The uint8 image array to draw on. Assumes [1000, 500, 3] input with
RGB value ranges.
bboxes: A [num_steps, num_objects, 5] float array containing the bounding
box information over a sequence of steps. bboxes are expected to be in
car coordinates.
masks: A [num_steps, num_objects] integer array indicating whether the
corresponding bbox entry in bboxes is present (1 = present).
labels: A [num_steps, num_objects] integer label indicating which class is
being predicted. Used for colorizing based on labels.
is_groundtruth: True if the scene is the groundtruth vs. the predicted.
Returns:
The updated image array.
"""
image = Image.fromarray(np.uint8(image)).convert('RGB')
draw = ImageDraw.Draw(image)
try:
font = ImageFont.truetype('arial.ttf', 20)
except IOError:
font = ImageFont.load_default()
pixels_per_meter = 10.
image_ref_x = 250.
image_ref_y = 750.
car_to_image_transform = transform_util.MakeCarToImageTransform(
pixels_per_meter=pixels_per_meter,
image_ref_x=image_ref_x,
image_ref_y=image_ref_y,
flip_axes=True)
# Iterate over each object and produce the series of visualized trajectories
# over time.
for object_idx in range(bboxes.shape[1]):
# Annotate the box with the class type
label = labels[0, object_idx]
# Choose a label_consistent color.
color = PIL_COLOR_LIST[label % len(PIL_COLOR_LIST)]
# Make predictions white.
if not is_groundtruth:
color = 'white'
# Convert string color name to RGB so we can manipulate it.
color_rgb = ImageColor.getrgb(color)
# For each sample, extract the data, transform to image coordinates, and
# store in centroids.
centroids = []
for time in range(bboxes.shape[0]):
if masks[time, object_idx] == 0:
continue
center_x, center_y, width, height, heading = bboxes[time, object_idx, :]
# Compute the new heading.
heading = transform_util.TransformHeading(car_to_image_transform, heading)
# Transform from car to image coords.
x, y, _ = transform_util.TransformPoint(car_to_image_transform, center_x,
center_y, 0.0)
# Hack to scale from meters to pixels.
width *= pixels_per_meter
height *= pixels_per_meter
# Collect the centroids of all of the points.
centroids.append((x, y, heading))
# Draw the groundtruth bounding box at the first timestep.
if is_groundtruth and time == 0:
# Draw a rectangle
rect = MakeRectangle(height, width, heading, offset=(x, y))
rect += [rect[0]]
draw.line(rect, fill=color_rgb, width=4)
delta = 20
# Annotate the box with the object index
draw.text((x + delta, y + delta),
str(object_idx),
fill='white',
font=font)
# Draw a callout
draw.line([(x, y), (x + delta, y + delta)], fill='white', width=1)
# Extract the point pairs from centroids and draw a line through them.
point_pairs = []
for (x, y, heading) in centroids:
point_pairs.append((x, y))
if point_pairs:
draw.line(point_pairs, width=4, fill=color_rgb)
# Draw the centroids.
triangle_color_rgb = color_rgb
for i, (x, y, heading) in enumerate(centroids):
if i == 0:
# Draw the heading for the first timestep.
scale = 25 if is_groundtruth else 15
DrawHeadingTriangle(draw, x, y, heading, triangle_color_rgb, scale)
else:
# Draw a circle for the centroids of other timesteps.
outline_color = color_rgb
circle_size = 5 if is_groundtruth else 3
DrawCircle(
draw,
x,
y,
fill=triangle_color_rgb,
outline=outline_color,
circle_size=circle_size)
# Desaturate the color with every timestep.
increment = 45 # Allow this to be modified?
triangle_color_rgb = (triangle_color_rgb[0] - increment,
triangle_color_rgb[1] - increment,
triangle_color_rgb[2] - increment)
return np.array(image)
def GetTrajectoryComparison(gt_bboxes, gt_masks, gt_labels, pred_bboxes,
pred_masks, pred_labels):
"""Draw a trajectory comparison of groundtruth and predicted.
Args:
gt_bboxes: A [batch_size, num_steps, num_objects, 5] float array containing
the bounding box information over a sequence of steps.
gt_masks: A [batch_size, num_steps, num_objects] integer array indicating
whether the corresponding bbox entry in bboxes is present (1 = present).
gt_labels: A [batch_size, num_steps, num_objects] integer label indicating
which class is being predicted. Used for colorizing based on labels.
pred_bboxes: A [batch_size, num_steps, num_objects, 5] float array
containing the bounding box information over a sequence of steps.
pred_masks: A [batch_size, num_steps, num_objects] integer array indicating
whether the corresponding bbox entry in bboxes is present (1 = present).
pred_labels: A [batch_size, num_steps, num_objects] integer label indicating
which class is being predicted. Used for colorizing based on labels.
Returns:
images: A np.uint8 images array that can be displayed of size
[batch_size, 1000, 500, 3].
"""
batch_size = gt_bboxes.shape[0]
images = np.zeros([batch_size, 1000, 500, 3], dtype=np.uint8)
for b in range(batch_size):
images[b] = DrawTrajectory(
images[b], gt_bboxes[b], gt_masks[b], gt_labels[b], is_groundtruth=True)
images[b] = DrawTrajectory(
images[b],
pred_bboxes[b],
pred_masks[b],
pred_labels[b],
is_groundtruth=False)
return images
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
f0ff2dc7df8b5c4a3a3b557baab81a65b1bd2364
|
d295ccee70e2fbc054c6fda5cc26e468aae3d35c
|
/flask_api.py
|
5ef671edc3d816994bc105ac7aa8dbde1b210ab4
|
[] |
no_license
|
jrehm135/FlaskAPI
|
9bb87f5f50625420c99bb2aa2c2a4acace545310
|
e0dd71d0abbe73ccbadae48bb165b848c809a80b
|
refs/heads/main
| 2023-03-03T12:00:02.203781
| 2021-02-06T18:05:36
| 2021-02-06T18:05:36
| 336,566,814
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,035
|
py
|
#!flask/bin/python
import json
import datetime
from flask import Flask, request, render_template
from flask_cors import CORS, cross_origin
import socket
import os
#Try catch necessary to avoid os.uname failure on windows
try:
#we still need to verify it is a pi, this will do good enough
if os.uname()[4][:3] == 'arm':
import rpi_gpio
print('RPI')
except AttributeError:
print('Windows Machine')
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
startTime = 0
ip_address = socket.gethostbyname(socket.gethostname())
class Servo():
pass
@app.route('/')
def index():
data = {'ip_address': ip_address}
return render_template("arm_page.html", data=data)
@app.route('/move_arm', methods=['POST'])
@cross_origin()
def moveArm():
print('moving arm')
servoVal = 0
timeElapsed = 0
data = request.get_data()
payload = json.loads(request.data.decode('UTF-8'))
servoName = payload["servo"]
mServo = Servo()
if servoName == "open_claw":
mServo.servoVal = 1
mServo.dutyCycle = 10
elif servoName == "close_claw":
mServo.servoVal = 1
mServo.dutyCycle = 5
elif servoName == "rotate_left":
mServo.servoVal = 8
mServo.dutyCycle = 10
elif servoName == "rotate_right":
mServo.servoVal = 8
mServo.dutyCycle = 5
elif servoName == "up_icon":
mServo.servoVal = 2
mServo.dutyCycle = 10
elif servoName == "down_icon":
mServo.servoVal = 2
mServo.dutyCycle = 5
startEvent = payload["startEvent"]
global startTime
if startEvent:
startTime = datetime.datetime.now()
else:
timeElapsed = datetime.datetime.now() - startTime
requestTime = str(timeElapsed.seconds) + ':' + str(timeElapsed.microseconds)
print(requestTime)
mServo.servoNum = rpi_gpio.select_servos(mServo.servoVal)
rpi_gpio.pwm_move(mServo)
return "Moved arm!"
if __name__ == '__main__':
app.run(debug=True)
|
[
"jrehm135@vt.edu"
] |
jrehm135@vt.edu
|
00da9939c27958074895b16dd9e8063ceb42c7aa
|
a291db0ebaae1aae0bf07f1e6add194f5c7f7e6b
|
/Capítulo 4: Adivianzas con números aleatorios y bucles/while.py
|
7157a6a66c02b2985867eb7776bcbc064c021634
|
[
"MIT"
] |
permissive
|
osmandi/programarcadegames
|
ca8ca4aa45a9a064f6aadf36e885fd52a395892f
|
314b2a958baf23ff4a393a624338ea22e70ad7d5
|
refs/heads/master
| 2021-09-05T13:54:13.179125
| 2018-01-28T12:20:24
| 2018-01-28T12:20:24
| 119,255,569
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 428
|
py
|
#Uso de un bucle for para imprimir los números del 0 al 9
for i in range(10):
print(i)
#Uso de un bucle while para imprimir los números del 0 al 9
i = 0
while i < 10:
print(i)
i = i + 1
#No usar la función range con while
#Incremento con while
i = 0
while i < 10:
print(i)
i += 1
#No cierra hasta que el usuario lo solicita
salir = "n"
while salir == "n":
salir = input ("¿Quieres salir? s/n ")
|
[
"osmandi.github@gmail.com"
] |
osmandi.github@gmail.com
|
3ce9ad452d63f22bf6d22946107f6744261a6daa
|
2ecded5f9985e4a0301e6adc6e3ffa0f80dab17d
|
/pythondemo/robot_library/reference/old_code/Library6/Page_Add_Company.py
|
ff1685dfc2729e6b3b59bb6f3b8ba5074d9c2dda
|
[] |
no_license
|
KayCheung/letscode
|
c443d7993afd0a7e92b23885afec19c4fabbbf86
|
8932638b7b7cd8248b6af5ff526c22e5e247197a
|
refs/heads/master
| 2021-01-01T06:30:29.322504
| 2017-04-28T08:44:47
| 2017-04-28T08:45:57
| 97,443,846
| 1
| 0
| null | 2017-07-17T06:46:13
| 2017-07-17T06:46:13
| null |
UTF-8
|
Python
| false
| false
| 2,699
|
py
|
__author__ = 'g705360'
from Page import Page
from robot.api import logger
from Call_Oracle import Call_Oracle
import time
class Page_Add_Company():
def __init__(self):
self.page = Page()
self.company_name = None
self.carrier_id = None
def add_company(self, driver, company_name, type=None):
ext_url = r"/servlet/AddNewCompanyProfile?action=add_company_profile"
full_url = self.page.get_base_url(driver) + ext_url
time.sleep(10)
logger.info("Go directly to add company page")
driver.get(full_url)
self.page.find_element_by_xpath(driver, "//input[@tabindex='1']").send_keys(company_name)
if type!=None:
option = "//option[@value='%s']",type
self.page.find_element_by_xpath(driver, option).click()
self.page.find_element_by_tag_name(driver, "button").click()
logger.info("Save company info")
result_title = self.page.find_element_by_xpath(driver, "//strong")
logger.info(result_title)
#Get the carrier_id for the new company
self.carrier_id = self.page.find_elements_by_xpath(driver, "//input[@name='carrieruid']").get_attribute("value")
def get_new_carrier_id(self):
if self.carrier_id==None:
logger.warn("Sorry, you need add company to get the new carrier_id")
return False
else:
return self.carrier_id
def set_new_company_permission(self, driver, permission_group_label, permission_label, element_type='checkbo'):
#click on the "Update Permission" button on the page
logger.info("To update company permissions....")
self.page.find_element_by_xpath("//input[@type='submit']").click()
#get permission id, which will be used to find the label in permission change page
db_conn = Call_Oracle()
db_conn.login_to_xroad6()
perm_group_id = db_conn.get_permission_id_from_label(permission_group_label, 'group', element_type)
perm_id = db_conn.get_permission_id_from_label(permission_label, 'company_permission', element_type)
db_conn.close_connection()
perm_group_tag = perm_group_id + '_span'
perm_id_tag = perm_id
perm_group_xpath = "//span[@id='%s']"%perm_group_tag
per_id_xpath = "//input[@id='%s']"%perm_id_tag
self.page.find_element_by_xpath(driver, perm_group_xpath).click()
self.page.find_elements_by_xpath(driver, per_id_xpath).click()
def save_company_permission(self, driver):
logger.info("To save permissions....")
time.sleep(5)
self.page.find_element_by_xpath(driver, "//input[@type='button']").click()
|
[
"libing2@tuniu.com"
] |
libing2@tuniu.com
|
6a26088a8ae1b4926fb06b5752628650e72e72d1
|
a85f4cc285131af1fede46c43f984f32c8e5f5b1
|
/gita-api/api.py
|
13aa1bd59b69ccbe44e5a6d76172a459e64c63f3
|
[] |
no_license
|
AxiomSamarth/kubernetes-101
|
9bf83cb4e6e4a44e781253bfdc4ee3e79ba3b05c
|
62aead505c6af50ebd061db977ca6207976f3e7e
|
refs/heads/master
| 2022-11-10T06:20:13.101040
| 2020-06-29T12:28:41
| 2020-06-29T12:28:41
| 275,803,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,841
|
py
|
import os
import utils
import uuid
from populate_db import populate_db
from flask import Flask, jsonify, request, redirect, session
app = Flask(__name__)
app.secret_key = os.environ['secret_key']
@app.route('/', methods=['GET'])
def index():
return {'message': 'Welcome to Gita API'}
@app.route('/login/', methods=['POST'])
def login():
credentials = request.json
if credentials['username'] and credentials['password']:
result, status_code = utils.login(credentials)
if status_code == 200:
session['id'] = str(uuid.uuid4())
return jsonify({'id': session['id']})
return result, status_code
@app.route('/signup/', methods=['POST'])
def signup():
credentails = request.get_json()
if credentails['username'] and credentails['password']:
result = utils.signup(credentails)
return result
@app.route('/chapter/<chapter_number>/', methods=['GET'])
def get_chapter(chapter_number):
session_id = request.headers.get('id', None)
if session_id == session.get('id'):
return utils.get_verse_from_db(chapter_number)
return {'error_message': 'Session not found. Please login'}
@app.route('/chapter/<chapter_number>/verse/<verse_number>/', methods=['GET'])
def get_verse(chapter_number, verse_number):
session_id = request.headers.get('id', None)
if session_id == session.get('id'):
return utils.get_verse_from_db(chapter_number, verse_number)
return {'error_message': 'Session not found. Please login'}
@app.route('/logout/', methods=['POST'])
def logout():
session_id = request.headers.get('id')
if session_id == session.get('id'):
del session['id']
return {}, 200
return {'error_message': 'Invalid Session'}, 401
if __name__ == '__main__':
populate_db()
app.run(host='0.0.0.0', port=8080)
|
[
"deyagondsamarth@gmail.com"
] |
deyagondsamarth@gmail.com
|
7a4cde7d038ec75fbbd432077d9b44e040e561a9
|
e3001008fc1d201f4380040d4a498a92109665a5
|
/opencv_color_detection.py
|
28b9d61bc7afe59fe951956e901404ff57b3696d
|
[] |
no_license
|
RafaelBorges-code/color_detection_opencv
|
dfc1c7d3782b36981702f2c47abdc88bb7f08316
|
55945a81bf2c5ec9b01fba673d8ec844a6e5e315
|
refs/heads/main
| 2023-02-12T23:10:03.974050
| 2021-01-15T14:37:14
| 2021-01-15T14:37:14
| 329,923,077
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,627
|
py
|
#Rafael Borges (inspirado no código de Murtaza
import cv2
import numpy as np
def empty(a):
pass
cv2.namedWindow("TrackBars")
cv2.resizeWindow("TrackBars", 300, 300)
cv2.createTrackbar("Hue Min", "TrackBars", 81, 179,
empty) # nome da barra, Nome da tela onde a barra vai ficar, minvalue, maxvalue, função a ser chamada sempre que o valor mudar
cv2.createTrackbar("Hue Max", "TrackBars", 108, 179, empty)
cv2.createTrackbar("Sat Min", "TrackBars", 18, 255, empty)
cv2.createTrackbar("Sat Max", "TrackBars", 180, 255, empty)
cv2.createTrackbar("Val Min", "TrackBars", 111, 255, empty)
cv2.createTrackbar("Val Max", "TrackBars", 217, 255, empty)
while True:
img = cv2.imread("eu.png")
imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h_min = cv2.getTrackbarPos("Hue Min", "TrackBars") # what trackbar from what window
h_max = cv2.getTrackbarPos("Hue Max", "TrackBars")
s_min = cv2.getTrackbarPos("Sat Min", "TrackBars")
s_max = cv2.getTrackbarPos("Sat Max", "TrackBars")
v_min = cv2.getTrackbarPos("Val Min", "TrackBars")
v_max = cv2.getTrackbarPos("Val Max", "TrackBars")
lower = np.array([h_min, s_min, v_min])
upper = np.array([h_max, s_max, v_max])
mask = cv2.inRange(imgHSV, lower, upper)
img_results = cv2.bitwise_and(img, img,
mask=mask) # imagem de destino, imagem que sera aplicada a mascara, e a mascara
cv2.imshow("eu", img)
cv2.imshow("eu em outra cor", imgHSV)
cv2.imshow("mask", mask)
cv2.imshow("Result mask", img_results)
cv2.waitKey(1)
|
[
"noreply@github.com"
] |
RafaelBorges-code.noreply@github.com
|
d7984b9e313d82b39181e07670559bc8af497925
|
6a462bfca1f1ef66500fcbe4f79a94401bf16301
|
/manager/migrations/0007_auto_20201207_0116.py
|
29b4defbc83f4f677fc661114dff7a0df4089e0b
|
[] |
no_license
|
vmysoulinflames/django-project
|
d834887533d9231f096c903462b66e454e30ee73
|
021177248d1735cc5213024b4f5c3a459720def2
|
refs/heads/master
| 2023-01-28T04:02:31.889820
| 2020-12-08T13:21:26
| 2020-12-08T13:21:26
| 318,991,593
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 521
|
py
|
# Generated by Django 3.1.4 on 2020-12-06 22:16
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('manager', '0006_book_authors'),
]
operations = [
migrations.AlterField(
model_name='book',
name='authors',
field=models.ManyToManyField(related_name='books', to=settings.AUTH_USER_MODEL),
),
]
|
[
"vmysoulinflames@yahoo.com"
] |
vmysoulinflames@yahoo.com
|
6e8a66df2f863e166b8b0e9beec03850e9668c32
|
f4a1ec0f1b376809e920ed6ed4c08490acb1e071
|
/venv/bin/django-admin.py
|
51f3f1b1c48d8ed9bb151787db7bc1df83186e1e
|
[] |
no_license
|
Peterbamidele/ChatApp-Appication
|
ac6bfd6be56f4962d5601b20034c669c7c5aa10a
|
4cf7d97f7dc3f8c6101ab328c728d4844da096d0
|
refs/heads/main
| 2023-05-08T07:14:16.091013
| 2021-05-23T09:00:56
| 2021-05-23T09:00:56
| 369,144,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 697
|
py
|
#!/home/peter/Django Project/PenxtaChatProject/venv/bin/python
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
|
[
"peterbamidele@gmail.com"
] |
peterbamidele@gmail.com
|
a3ada1d32a3f2d0f05b7beb828cfee81fe291f88
|
0b055bb267b33b8267e85415efd14e10fbbf4351
|
/Assignment/TC_04.py
|
e624536d120d5c504b920fa9505e4172da85939d
|
[] |
no_license
|
zebratesting/Python
|
08b2e6aba4a920bbb815447c0214595624b681d9
|
1d1bdbb70796d76a8518d7ebff118acf91293063
|
refs/heads/main
| 2023-08-23T03:45:00.918318
| 2021-10-11T14:01:37
| 2021-10-11T14:01:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 307
|
py
|
from selenium import webdriver
from selenium.webdriver.common.by import By
driver=webdriver.Firefox(executable_path="/work/Drivers/drivers/geckodriver.exe")
driver.get("https://www.amazon.in/")
ele=driver.find_elements(By.XPATH,"//div[@id='nav-xshop-container']/div/a")
for e in ele:
print(e.text)
|
[
"zebratesting5@gmail.com"
] |
zebratesting5@gmail.com
|
5a3257fd2c4116fc8c812fdee02ee42aa77ee27c
|
dd91996c41b3fa9c9cb5e37df330fd1c36248739
|
/qr_app/app/config.py
|
8edcecda6d4e432b826287c0fb34f662a4c220a2
|
[] |
no_license
|
muniter/qr_generator
|
a555b61f7d659d032a0062c821099939ec30d30a
|
6a2be2cd42fe3aaf186410cc90b391ecb3fa9b7e
|
refs/heads/master
| 2020-07-03T15:36:44.920661
| 2020-05-18T20:38:30
| 2020-05-18T20:38:30
| 201,954,723
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 152
|
py
|
import os
class Config(object):
SECRET_KEY = os.environ.get('SECRET_KEY') or 'golf-alfa-bravo-Kilo-uniform\
-victor-tango-uniform-sierra'
|
[
"graulopezjavier@gmail.com"
] |
graulopezjavier@gmail.com
|
8d317324f6e2e10f963c88094da32576c28faebf
|
1af28b45d10fc3fdf268c424598d30cd726046f3
|
/Word2Vec Models Proj /test_google_word2vec_model.py
|
f6cc0f26076a0e6e8becd864df96eb84003ac502
|
[
"MIT"
] |
permissive
|
sadipgiri/Computational-Linguistics-NLP
|
00ac420d817f6529320accfd1f4c711e563733d9
|
044f5c3a0d513cb75edf4f8ef33c3d599ee078ab
|
refs/heads/master
| 2022-01-24T07:59:04.266697
| 2019-07-19T21:43:37
| 2019-07-19T21:43:37
| 197,802,295
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,512
|
py
|
#!/usr/bin/env python3
'''
Word analogy python3 program -> that solves analogies such as "dog is to cat as puppy is to ___". using Google Word2Vec Model.
Author: Sadip Giri (sadipgiri@bennington.edu)
Date: 26th May, 2019
'''
import numpy as np
import os
import sys
from gensim.models import KeyedVectors
words_vector_file = sys.argv[1]
input_directory = sys.argv[2]
output_directory = sys.argv[3]
evaluation_file = sys.argv[4]
# loading pre-trained Google word2vec large model
model = KeyedVectors.load_word2vec_format(fname='GoogleNews-vectors-negative300.bin', binary=True)
def compare_and_return_fourth_vector(word1,word2,word3):
'''
'''
word1 = word1.lower()
word2 = word2.lower()
word3 = word3.lower()
list_of_words = list(model.wv.vocab)
if (word1 not in list_of_words) or (word2 not in list_of_words) or (word3 not in list_of_words):
return 'none'
return model.most_similar(positive=[word1.lower(), word2.lower()], negative=[word3.lower()], topn=1)[0][0] # gives a list of tuple consisting of word and the predicted probability
def read_write_format(input_dir, output_dir, evaluation_file):
'''
Task:
- Create new analogy test files out of input_test (GoogleTestFile)
- including evaluation file within it (accuracy of each file; plus, total accuracy!)
Approach:
- List all test files (only .txt) e.g. .txt files in GoogleTestFile
- Loop through all words (2 pairs/4 wrods) and compare 4 test word with fourth predicted word
- Finally write fourth predicted words in all new Output file for each test files
- At the same time, write evaluation files in each output dir according to given format
- including: accuracy of each file and total accuracy of that output dir type.
'''
# track total accuracy:
total_correct_guesses = 0
total_guesses = 0
# to finally write evaluation file:
eval_write_format = []
# list all .txt files from given input test dir e.g. GoogleTestFile
input_files = [file for file in os.listdir(input_dir) if file.endswith('.txt')]
for file in input_files:
print('Loading: {0}'.format(file))
# to track accuracy of each file
temp_correctly_guessed = 0
temp_write_format = []
# read each file's lines
with open('{0}/{1}'.format(input_dir, file), 'r') as f:
lines = f.read().splitlines()
temp_total_words = len(lines)
# for each line with 4 words: find predicted fourth word and compare with existing 4th word
# -> using above compare function
for line in lines:
words = line.split()
fourth_word = compare_and_return_fourth_vector(words[0], words[1], words[2])
if fourth_word == words[3].lower():
temp_correctly_guessed += 1
temp_write_format.append(' '.join(words[:3] + [fourth_word + '\n'])) # could use .capitlize() to follow case consistency!!
# finally, write 3 words + 4th predicted word in output dir epecified
with open('{0}/{1}'.format(output_dir,file), 'w') as write_file:
write_file.write(''.join(temp_write_format))
print('Done: {0}'.format(file))
eval_write_format.append(file + '\n')
eval_write_format.append('ACCURACY: {0}% ({1}/{2})\n'.format(temp_correctly_guessed/temp_total_words * 100, temp_correctly_guessed, temp_total_words))
total_correct_guesses += temp_correctly_guessed
total_guesses += temp_total_words
# finally include total accuracy in evaluation file:
eval_write_format.append('Total accuracy: {0}% ({1}/{2})'.format(total_correct_guesses/total_guesses * 100, total_correct_guesses, total_guesses))
with open(evaluation_file, 'w') as eval_file:
eval_file.write(''.join(eval_write_format))
return 'Hurray Done!'
# finally: execute everything at once:
read_write_format(input_directory, output_directory, evaluation_file)
'''
Techniques/Roadblocks:
- Learned how to use pre-trained word to vector model.
- Also, used gensim builtin word_similarity functionality which is super fast than our techniques.
- Was able to play with binary as well as word to vector txt file formats for faster computations.
- Adapted Project 5 GoogleTestSet read write functionality.
- I wasn't able to test Google's large word to vector model beacuse of time and memory complexity.
'''
|
[
"sadipgiri@bennington.edu"
] |
sadipgiri@bennington.edu
|
4cdab0e1ea80ccb7824f0df429903529ccb98868
|
49cb74338813557baa91973087c3dfb8e74936c6
|
/sovrin/test/upgrade/test_node_upgrade.py
|
0c365d850f564d97c82fa16431357cb5c62592f8
|
[
"Apache-2.0"
] |
permissive
|
pombredanne/old-sovrin
|
a0e1b607880888f9f0e8379cdd00f9726b051121
|
d4e705054b7252c62fea00114060035c6eb314a4
|
refs/heads/master
| 2021-03-16T08:59:57.817420
| 2017-07-13T21:33:09
| 2017-07-13T21:33:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,076
|
py
|
import os
import pytest
import sovrin
from plenum.common.eventually import eventually
from plenum.common.txn import TXN_TYPE
from plenum.common.types import OPERATION, f
from sovrin.common.txn import NODE_UPGRADE
from sovrin.test.upgrade.helper import bumpedVersion
oldVersion = sovrin.__metadata__.__version_info__
oldVersionStr = sovrin.__metadata__.__version__
# Increasing the version of code
sovrin.__metadata__.__version__ = bumpedVersion()
sovrin.__metadata__.__version_info__ = tuple(int(v) for v in
sovrin.__metadata__.__version__.split('.'))
from sovrin.test.conftest import tdirWithPoolTxns, poolTxnNodeNames
whitelist = ['unable to send message']
# TODO: Implement a client in node
@pytest.fixture(scope="module")
def tdirWithPoolTxns(tdirWithPoolTxns, poolTxnNodeNames, tconf):
# For each node, adding a file with old version number which makes the node
# think that an upgrade has been performed
for nm in poolTxnNodeNames:
path = os.path.join(tdirWithPoolTxns, tconf.nodeDataDir, nm)
os.makedirs(path)
with open(os.path.join(path, tconf.lastRunVersionFile), 'w') as f:
f.write(oldVersionStr)
return tdirWithPoolTxns
@pytest.fixture(scope="module")
def upgradeSentToAllNodes(looper, nodeSet, nodeIds):
def check():
for node in nodeSet:
assert len(node.configLedger) == len(nodeSet)
ids = set()
for txn in node.configLedger.getAllTxn().values():
assert txn[TXN_TYPE] == NODE_UPGRADE
ids.add(txn[f.IDENTIFIER.nm])
ids.add(node.id)
assert ids == set(nodeIds)
looper.run(eventually(check, retryWait=1, timeout=5))
def testNodeDetectsUpgradeDone(looper, nodeSet):
def check():
for node in nodeSet:
assert node.upgrader.hasCodeBeenUpgraded == \
sovrin.__metadata__.__version__
looper.run(eventually(check, retryWait=1, timeout=5))
def testSendNodeUpgradeToAllNodes(upgradeSentToAllNodes):
pass
|
[
"lovesh.harchandani@evernym.com"
] |
lovesh.harchandani@evernym.com
|
c3e7a5101a691117f06d2a82ecb47ea2bea05f2e
|
ac21fba3d354e02fd25c95dee9bc625004759318
|
/01-basics/main.py
|
05e3df99dc4e1b7f07e300d14a34c6333e28832b
|
[] |
no_license
|
cech-matej/kivy
|
cabff87479cf9d9e311188a51bd68220d7ca3550
|
6076f24ed108fe90c1864002346c0963378ebc58
|
refs/heads/main
| 2023-03-05T18:32:11.758198
| 2021-02-14T18:37:02
| 2021-02-14T18:37:02
| 338,873,030
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 204
|
py
|
import kivy
from kivy.app import App
from kivy.uix.label import Label
class MyApp(App):
def build(self):
return Label(text='asdf')
if __name__ == "__main__":
MyApp().run()
|
[
"noreply@github.com"
] |
cech-matej.noreply@github.com
|
155963742e5dc66ddc928eace781c92d74428577
|
0ecb2a901ab86b5500770a4fdf960ecdb3cc6c74
|
/MLBook/12 Evolutionary/greedyKnapsack.py
|
41f6317b156bacc3d922fe4abb3aefb9c8ef2d67
|
[
"MIT"
] |
permissive
|
AjayKrP/Machine-Learning
|
ad69da80234522d7dfe56a32688e2744bf1b4ea8
|
d039e99bbe09845174a845786099cfdd9a6913cb
|
refs/heads/master
| 2021-08-30T09:22:14.381515
| 2017-12-17T07:29:33
| 2017-12-17T07:29:33
| 109,667,127
| 0
| 0
|
MIT
| 2017-12-17T07:31:41
| 2017-11-06T08:24:10
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,044
|
py
|
# Code from Chapter 12 of Machine Learning: An Algorithmic Perspective
# by Stephen Marsland (http://seat.massey.ac.nz/personal/s.r.marsland/MLBook.html)
# You are free to use, change, or redistribute the code in any way you wish for
# non-commercial purposes, but please maintain the name of the original author.
# This code comes with no warranty of any kind.
# Stephen Marsland, 2008
# A greedy algorithm to solve the Knapsack problem
from numpy import *
def greedy():
maxSize = 500
sizes = array([109.60,125.48,52.16,195.55,58.67,61.87,92.95,93.14,155.05,110.89,13.34,132.49,194.03,121.29,179.33,139.02,198.78,192.57,81.66,128.90])
sizes.sort()
newSizes = sizes[-1:0:-1]
space = maxSize
while len(newSizes)>0 and space>newSizes[-1]:
# Pick largest item that will fit
item = where(space>newSizes)[0][0]
print newSizes[item]
space = space-newSizes[item]
newSizes = concatenate((newSizes[:item],newSizes[item+1:]))
print "Size = ",maxSize-space
greedy()
|
[
"kajay5080@gmail.com"
] |
kajay5080@gmail.com
|
705faeff683c613a8cb968dbed458fd80b4b37ce
|
2206dfbf3822e50135aea020bffa071eaf959c91
|
/MyLexer.py
|
9807840db2e88ef9a80a5a9a30f21b006bb5e775
|
[] |
no_license
|
XinyaoTian/compilation
|
6e24f775632f9fc6128088171e3c6013e5505a30
|
4b9d2d16104bd5d946990b0e309d8a03b250e138
|
refs/heads/master
| 2020-03-16T20:51:47.921012
| 2018-06-19T01:09:11
| 2018-06-19T01:09:11
| 132,975,514
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,149
|
py
|
# -*- encoding:utf-8 -*-
# 编译原理课程实验 : 词法分析
# written by : XinyaoTian
# at : May/10th/2018
# run with : python 3.6.1 without any packages
import logging
logging.basicConfig(level = logging.DEBUG)
from func_pack import hex_to_dec
from func_pack import oct_to_dec
# 导入 python 中的 flex 及 yacc 库: PLY
from ply import lex
# 定义可能用到的 正规文法类型。
tokens = [
"IDENTIFIER", # 变量名(标识符)
"REAL10", # 十进制小数
"REAL8", # 八进制小数
"REAL16", # 十六进制小数
"INT16", # 十六进制整数
"INT8", # 八进制整数
"INT10", # 十进制整数
#"SYMBOL"# 符号
#"KEYWORD" # 关键字
]
# symbols = [
# 'lparent'
# ]
#
# tokens = tokens + symbols
# 保留字类型, 使用lex自带的reserved区分出关键字
# 需要与 IDENTIFIER 配合使用
reserved = {
'if' : 'IF',
'then' : 'THEN',
'else' : 'ELSE',
'while' : 'WHILE',
'do' :'DO'
}
# 将预留标识符添加到tokens中
tokens = tokens + list(reserved.values())
# 注意!词法分析的优先级是按照函数定义的顺序划分的,定义越靠前优先级越高
# 比如分析 = 和 == 时, 就需要把 = 放在 == 的前面定义
# 十六进制实型
def t_REAL16(t):
r'0[x|X][1-9a-f][0-9a-f]*\.[0-9a-f]+|0[x|X]0+\.[0-9a-f]+'
# 匹配整数部分非0的十六进制小数
# 匹配整数部分为0的十六进制小数
t.value = hex_to_dec(t.value)
t.value = float(t.value)
return t
# 八进制浮点型
def t_REAL8(t):
r'0[1-7][0-7]*\.[0-7]+|00+\.[0-7]+'
# 匹配整数部分非0的八进制小数
# 匹配整数部分非0的八进制小数
t.value = oct_to_dec(t.value)
t.value = float(t.value)
return t
# 十进制浮点型
def t_REAL10(t):
r'[1-9][\d]*\.[\d]+|0\.[\d]+'
# 匹配整数部分非0的小数
# 匹配整数部分为0的小数
t.value = float(t.value)
return t
# 十六进制整数
def t_INT16(t):
r'0[x|X][0-9a-f][0-9a-f]*'
t.value = hex_to_dec(t.value)
t.value = int(t.value)
return t
# 八进制整数
def t_INT8(t):
r'0[0-7]+|000*'
t.value = oct_to_dec(t.value)
t.value = int(t.value)
return t
# 十进制整型
def t_INT10(t):
r'[1-9][\d]*|0'
t.value = int(t.value)
return t
# 关键字相关匹配
# def t_KEYWORD(t):
# r'if|then|else|while|do'
# t.type = t.type
# t.value = t.value
# return t
# 利用正则匹配出所有可能出现的情况
# 此为IDENTIFIER的各种情况
def t_IDENTIFIER(t):
r'[A-Za-z][A-Za-z0-9]*[.|_][A-Za-z0-9]+|[A-Za-z][A-Za-z0-9]*'
# 含有 . 或 _ 的, 由字母开头, 数字和字母组成的标识符
# 由字母开头, 数字和字母组成的标识符
t.type = reserved.get(t.value,'IDENTIFIER') # 匹配标识符时先检查是否为预留字
# 利用 dict.get() 方法, 若是预留字则修改t.type;若不是则保持t.type为IDENTIFIER不变
if t.type is not 'IDENTIFIER': # 若t.type不是IDENTIFIER
t.value = t.value
return t
# 使用内置的literals,配合下面定义的各种方法识别symbols
literals = ['+', '-', '*', '/','=','(',')',';','<','>']
def t_add(t):
r'\+'
t.type = '+'
t.value = '+'
return t
def t_sub(t):
r'-'
t.type = '-'
t.value = '-'
return t
def t_mult(t):
r'\*'
t.type = '*'
t.value = '*'
return t
def t_div(t):
r'\/'
t.type = '/'
t.value = '/'
return t
def t_equal(t):
r'='
t.type = '='
t.value = '='
return t
def t_lparent(t):
r'\('
t.type = '('
t.value = '('
return t
def t_rparent(t):
r'\)'
t.type = ')'
t.value = ')'
return t
def t_semi(t):
r';'
t.type = ';'
t.value = ';'
return t
def t_lts(t):
r'<'
t.type = '<'
t.value = '<'
return t
def t_gts(t):
r'>'
t.type = '>'
t.value = '>'
return t
# 可以添加ignore_前缀表示将丢弃匹配的token,比如空格' '和换行符'\t'
# 来匹配掉不计入词法分析的\n \s \t 等字符
t_ignore_SPACE = (
r'[\s]+'
)
# 行号跟踪标定
def t_newline(t):
r'\n+'
t.lexer.lineno += len(t.value)
# 异常处理
def t_error(t):
logging.debug("Illegal character '%s'" % t.value[0] + 'in Line ' + str(t.lineno))
# t.lexer.skip(1) # 跳过错误字符继续执行
raise TypeError("Unknown text '%s'" % (t.value,) + '.Please check your syntax.')
#---------------
# lex.lex()
#---------------
# # Build the lexer
lexer = lex.lex()
# lexer.code = None
# lexer.true = None
# lexer.false = None
# lexer.next = None
# lexer.begin = None
# lexer.place = None
# lexer.value = None
# lexer.name = None
# Build the lexer
# lexer = lex.lex()
#
# with open('./lab1_testData.txt','r') as f:
# data = f.read()
#
# # 将测试用数据传入lex
# lexer.input(data)
#
# # 切分次 Tokenize
# while True:
# tok = lex.token()
# if not tok:
# break # no more input
# print (repr(tok.type),repr(tok.value))
# lex.input("CH3COOH")
# for tok in iter(lex.token, None):
# print repr(tok.type), repr(tok.value)
|
[
"31195026+XinyaoTian@users.noreply.github.com"
] |
31195026+XinyaoTian@users.noreply.github.com
|
54cf8fa431ac0ab108f7f4bf8786d1f6f8ca70a3
|
b657f54555330af28ef9f22ee935bfed697a91f0
|
/Exercicios Loop/exercicio 49 - secao 06.py
|
48a38454ab83c0d012d335aa1057b612acbca913
|
[
"MIT"
] |
permissive
|
cristinamais/exercicios_python
|
cecadd29a5f610b30ee929b94b00a4491d90d116
|
8a09b0b68ffaa62d13afb952998e890a79667c7e
|
refs/heads/master
| 2021-06-12T18:58:35.595609
| 2020-04-09T14:01:43
| 2020-04-09T14:01:43
| 254,380,930
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,125
|
py
|
"""
49 - O funcionário chamado Carlos tem um colega chamado João que recebe um salário que equivale a um terço do seu salário. Carlos gosta de fazer
aplicações na caderneta de poupança e vai aplicar seu salário integralmente nela, pois está rendendo 2% ao mês. João aplicará seu salário
integralmente no fundo de renda fixa, que está rendendo 5% ao mês. Construa um programa que deverá calcular e mostrar a quantidade de meses
necessários para que o valor pertencente a João iguale ou ultrapasse o valor pertencente a Carlos. Teste com outros valores para as taxas.
"""
salarioCarlos = int(input('Digite o valor do salário de Carlos: '))
salarioJoao = salarioCarlos // 3
print(f'Salário de João: {salarioJoao}')
renda1 = 0.02
renda2 = 0.05
cont = 0
while salarioJoao <= salarioCarlos:
salarioCarlos = salarioCarlos + (salarioCarlos * renda1)
salarioJoao = salarioJoao + (salarioJoao * renda2)
cont += 1
print(f'Salário Carlos + Aplicação: {salarioCarlos:.2f}\nSalário João + Aplicação: {salarioJoao:.2f}')
print(f'João levou {cont} meses para igualar ou ultrapassar Carlos.')
|
[
"cristinamais2@gmail.com"
] |
cristinamais2@gmail.com
|
bf6825d8634e68270cecd6c3c8ce95dbfbe1286f
|
673e829dda9583c8dd2ac8d958ba1dc304bffeaf
|
/data/multilingual/Latn.BEM/Serif_8/pdf_to_json_test_Latn.BEM_Serif_8.py
|
0a1128ee06141690674f2f7b59296f84324fb32f
|
[
"BSD-3-Clause"
] |
permissive
|
antoinecarme/pdf_to_json_tests
|
58bab9f6ba263531e69f793233ddc4d33b783b7e
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
refs/heads/master
| 2021-01-26T08:41:47.327804
| 2020-02-27T15:54:48
| 2020-02-27T15:54:48
| 243,359,934
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 303
|
py
|
import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.BEM/Serif_8/udhr_Latn.BEM_Serif_8.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
a4c94ed077075a01ef12f176fbccff19ef24a0fc
|
a39eabf8a11d43a2031a89d75cebd0bf62feeb52
|
/100skills/dictgen.py
|
03cf132a2e798129a274f03cda2be51d2a1e1a63
|
[] |
no_license
|
nirajkvinit/pyprac
|
8e7875075916d584a682fc20a54b61b53c6cdeac
|
9697ea1079b84b684951fcfba0c96c77ae8af16c
|
refs/heads/master
| 2021-01-19T22:10:25.830122
| 2020-01-14T15:43:12
| 2020-01-14T15:43:12
| 88,765,361
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 386
|
py
|
'''
With a given number n, write a program to generate a dictionary that
contains (i, i*i) such that i is an number between 1 and n (both included). and
then the program should print the dictionary.
'''
def dictgen():
n = int(input("Enter a number: "))
d = dict()
for i in range(1, n+1):
d[i] = i**i
return d
if __name__ == "__main__":
print(dictgen())
|
[
"niraj.kumar@msr-it.com"
] |
niraj.kumar@msr-it.com
|
f1e8e1d0418ac6f393c84e99558d8991fa587578
|
2ea4cb0bea60e1cf58d8d4adcd5e71c0fc507032
|
/iot-wearable/receiver.py
|
f2819860f60137f7826ea22d8152e803e8bcd507
|
[] |
no_license
|
SkylerLiu0422/danger-tracker
|
7090195ef6f750fdc8470932b5b425f42bed300f
|
a60b8c6c159e43b7011582e51db7de15919056ae
|
refs/heads/main
| 2023-07-09T19:50:56.874683
| 2021-08-08T18:34:57
| 2021-08-08T18:34:57
| 392,039,362
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 978
|
py
|
import pika
import pickle
import time
import dbtools
from wearable_entity import WearableDevice
def receive_priority(key):
conn = pika.BlockingConnection(pika.ConnectionParameters(host="34.146.240.199"))
channel = conn.channel()
max_priority = 10
channel.queue_declare(queue=key, arguments={"x-max-priority": max_priority})
db = dbtools.get_database()
def callback(ch, method, properties, body):
device = pickle.loads(body)
body_status = device.body_status
send_status = dbtools.write_indicator(db, device)
print(device.uid, time.strftime("%Y-%m-%d %H:%M:%S", device.time), device.body_status, send_status)
channel.basic_ack(delivery_tag=method.delivery_tag)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(
queue=key,
on_message_callback=callback,
auto_ack=False
)
channel.start_consuming()
receive_priority("beta")
|
[
"noreply@github.com"
] |
SkylerLiu0422.noreply@github.com
|
b39cb21349c3d5c74ef15687f80ae2891e44bd5b
|
395b531dbc076ee10d033985e7c7ddc7cb721336
|
/main.py
|
0a73dd970bff035c23569d9e42aec46380d39d11
|
[
"MIT"
] |
permissive
|
jimbunny/cartoonVideo
|
652af5618788e11ecece555d7ace1167ffb9aab1
|
c3b947625c71dc7de62e75c91077b6c34819ac84
|
refs/heads/master
| 2020-06-20T13:43:46.575973
| 2019-07-23T09:52:40
| 2019-07-23T09:52:40
| 197,140,390
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,057
|
py
|
from combineVideo.download_video import parser_video
from combineVideo.video_to_picture import video_picture
from combineVideo.read_video_info import read_video_info
from combineVideo.clear_picture import clear_picture
from combineVideo.combine_bk import combine_bk
from combineVideo.combine_video import combine_video
url = 'http://v.douyin.com/By1ten/'
download_url = 'http://videotool.com.cn/video/loadNative?videoUrl=' # 解析后下载地址
download_file = "D:\\pythonpractice\\download\\videoplayback.mp4" # 下载的视频保存路径
from_dir = "D:\\pythonpractice\\from2\\" # 视频分解图片保存路径
to_dir = "D:\\pythonpractice\\to2\\" # 动画渲染后图片保存路径
clear_dir = "D:\\pythonpractice\\clear_to2\\" # 锐化后图片保存路径
bk = "D:\\pythonpractice\\bk.jpg" # 替换背景图片
pathIn = 'D:\\pythonpractice\\test2\\test_output\\' # 合成动画的图片路径
pathOut = 'D:\\pythonpractice\\video_new.avi' # 最终的成果
fps = 23.976023976023978
if __name__ == '__main__':
url = 'http://v.douyin.com/B71E4e/'
# 去除水印下载视频
parser_video(url, download_url, download_file)
# 将视频按帧数分解图片
video_picture(download_file, from_dir)
# 读取视频信息
read_video_info(download_file)
# 锐化图片
clear_picture(from_dir, clear_dir)
# 替换照片背景
combine_bk(bk, clear_dir, to_dir)
# 将图片合成动画
# https://github.com/Yijunmaverick/CartoonGAN-Test-Pytorch-Torch
# python test.py --input_dir /root/CartoonGAN-Test-Pytorch-Torch/test_img --style Hosoda --gpu -1
# 将处理好的图片合成视频
combine_video(pathIn, pathOut, fps)
# 音频视频合成
# ffmpeg -i test.mp3 -ss 00:00:00 -t 00:00:06 -acodec copy test2.mp3
# ffmpeg - i video.avi - i test2.mp3 - c copy video2.avi
# ffmpeg -i output.mp4 -c:a aac -b:a 160k output2.mp4
# ffmpeg -i video.avi -i test2.mp3 -c:v copy -c:a aac -strict experimental output.mp4
# ffmpeg -i 3.mp4 -vn -y -acodec copy 3.aac/m4a
|
[
"jim_yu@skyline.com"
] |
jim_yu@skyline.com
|
dd381424dbde3fe74ab47959adfb0bcf9c694d2d
|
344f8d42822e5c0f5ee86f2a15183f2f10a4e0c4
|
/Assignments/A08/tempCodeRunnerFile.py
|
8b64777d052ab299e2ae4ac5982e8cb82a3eb42c
|
[] |
no_license
|
ShaunJPartridge/4663-Cryptography-Partridge
|
ca912110042fd45b4e3f57dea2b0a77dd84188b0
|
6e397e60b79f8a6cf3b8af0dbd426637f6feed94
|
refs/heads/master
| 2023-02-01T23:13:16.649655
| 2020-12-17T20:06:54
| 2020-12-17T20:06:54
| 290,341,462
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 40
|
py
|
http://localhost:8080/frequency?letter=e
|
[
"spartridgeb15@gmail.com"
] |
spartridgeb15@gmail.com
|
ae52c0868ccb420d0d41bf3d1e99ef20af646bd4
|
2e7fa13a40dafa81c5852b7a9d70555c45814574
|
/DSTools/BCT/network.py
|
c8cd27560b8033a5558dfabee7f2f9bf8612ff37
|
[] |
no_license
|
Ziaeemehr/miscellaneous
|
5768c6f5a2fe76468faed4283a3572a44ccd0239
|
43a62aaa28c577b09f605a135818a2dacc75d67c
|
refs/heads/master
| 2021-07-24T02:43:51.032849
| 2020-09-23T06:17:17
| 2020-09-23T06:17:17
| 217,556,017
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,265
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import pylab as pl
# pl.switch_backend('agg')
import igraph
# import community
import networkx as nx
from copy import copy
from sys import exit
# np.set_printoptions(threshold=np.nan)
adrsf = '../data/'
class make_graph:
''' make different graphs ans return their adjacency matrices
as a 1 dimensional double vector in stl library'''
def __init__(self):
self.G = 0
#---------------------------------------------------------------#
def convert_to_1D_vector(self, G):
''' ----- '''
N = self.N
A = nx.to_numpy_matrix(G)
A = A.reshape(N*N).tolist()[0]
M = SddeSolver.DoubleVector(N*N)
for i in range(N*N):
M[i] = A[i]
return M
def print_adjacency_matrix(self, G):
'''print adjacency matrix of graph on the screen '''
M = nx.to_numpy_matrix(G)
M = np.array(M)
for row in M:
for val in row:
print '%.0f'%val,
print
#---------------------------------------------------------------#
def complete_graph(self, N, plot_adj=False):
''' returns all to all graph '''
self.N = N
self.G = nx.complete_graph(N)
M = nx.to_numpy_matrix(self.G)
if plot_adj:
self.imshow_plot(M, "con")
return np.asarray(M)
#---------------------------------------------------------------#
def complete_weighted(self, N, clusters, weights, d, plot_adj=False):
'''
return a complete weighted graph, weights distribute in clusters
with the same size.
weights: list with length 2, shows the weights of edges in and between clusters, respectivly
clusters : list of cluster lengths
d : delay for all nodes
'''
self.N = N
# I use delay as weight
self.modular_graph(N, 1, 1, clusters, weights[0],weights[1])
M = self.D
if plot_adj:
self.imshow_plot(np.asarray(M).reshape(N,N), "con")
self.D = self.complete_graph(N) * d
return M
#---------------------------------------------------------------#
def complete_hmn_weighted(self, n_M0, level, weights, delay,
plot_adj=False, seed=124):
'''
return a complete weighted graph, weights distribute in a hierarchical
modular form.
Single delay for every node.
'''
self.hierarchical_modular_graph(n_M0, level, 1, 1, 1, weights,
plot_adj=False, seed=124)
N = self.N
M = nx.to_numpy_matrix(self.G, weight='weight')
if plot_adj:
self.imshow_plot(M, "con")
self.D = (delay,)*N*N
return tuple(np.asarray(M).reshape(-1))
#---------------------------------------------------------------#
def erdos_renyi_graph(self, N, p, seed=123, directed=False, plot_adj=False):
''' returns Erdos Renyi graph '''
self.N = N
self.G = nx.erdos_renyi_graph(N,p,seed=seed, directed=False)
M = nx.to_numpy_matrix(self.G)
if plot_adj:
self.imshow_plot(M, "con")
return tuple(np.asarray(M).reshape(-1))
#---------------------------------------------------------------#
def modular_graph(self, N, pIn, pOut, lengths, dIn, dOut, plot_adj=False):
''' returns a modular networks with :
N : number of nodes
pIn : conectivity of nodes insede clusters
pOut: conectivity of nodes between clusters
n_cluster : number of clusters in graph
dIn : delay between nodes inside the clusters
dOut : delay between nodes outside the clusters
'''
self.N = N
M = np.zeros((N,N))
D = np.zeros((N,N))
n_cluster = len(lengths)
for i in range(N):
for j in range(i+1,N):
r = np.random.rand()
if r < pOut :
M[i,j] = M[j,i] = 1.0;
D[i,j] = D[j,i] = dOut
# empty inside the clusters
s = 0;
for k in range(n_cluster):
if k > 0:
s += lengths[k-1]
for i in range(s,(lengths[k]+s)):
for j in range(i+1,(lengths[k]+s)):
M[i,j] = M[j,i] = 0.0
D[i,j] = D[j,i] = 0.0
# fill inside the clusters
s = 0;
for k in range(n_cluster):
if k > 0:
s += lengths[k-1]
for i in range(s,(lengths[k]+s)):
for j in range(i+1,(lengths[k]+s)):
r = np.random.rand()
if r < pIn:
M[i,j] = M[j,i] = 1.0
D[i,j] = D[j,i] = dIn
# print delay matrix
def print_delay_matrix():
ofi = open("delay.txt","w")
for i in range(N):
for j in range(N):
ofi.write("%2.0f"%D[i,j])
ofi.write("\n")
ofi.close()
self.G = nx.from_numpy_matrix(M)
self.D = D
if plot_adj:
self.imshow_plot(M, "con")
# self.print_adjacency_matrix(self.G)
return M
#---------------------------------------------------------------#
def hierarchical_modular_graph(self, n_M0, level, prob0, prob, alpha,
delays, plot_adj=False, seed=125 ):
'''
n_M0 : size of module at level 1
s : number of levels
n_modules : number of modules
N : number of nodes
ps : probability of conection in each level
level one is 1 and the others determine with prob function
delays: delay in each level as a list
'''
def probability(l,a=1,p=0.25):
if l==0:
from sys import exit
print "level shold be integer and > 0", l
exit(0)
else:
return a* p**l
s = level
n_modules = int(2**(s-1)) #number of modules
N = int(n_modules*n_M0) #number of nodes
self.N = N
# M0 = nx.complete_graph(n_M0)
M0 = nx.erdos_renyi_graph(n_M0, prob0, seed=seed)
for e in nx.edges(M0):
M0.add_edge(*e, weight=delays[0]) #delays act in weight attribute
ps = [prob0]+[probability(i,alpha,prob) for i in range(1,s)]
for l in range(1,s):
if l==1:
M_pre = [M0] * n_modules
else:
M_pre = copy(M_next)
M_next = []
k = 0
for ii in range(n_modules/(2**l)):
step = 2**(l-1)
tG = nx.convert_node_labels_to_integers(M_pre[k+1], step*n_M0)
tG1 = nx.compose(M_pre[k],tG)
edge = 0
effort = 1
''' make sure that connected modules are not isolated '''
while edge<1:
# print "effort ", effort
effort +=1
for i in range(len(tG1)):
for j in range(i+1,len(tG1)):
if (i<step*n_M0) & (j>step*n_M0-1) & (np.random.rand()<ps[l]):
tG1.add_edge(i,j,weight=delays[l])
edge += 1
M_next.append(tG1)
k += 2
self.G = M_next[0]
if plot_adj:
M = nx.to_numpy_matrix(self.G,weight=None)
self.imshow_plot(M, "con")
# self.print_adjacency_matrix(self.G)
D = nx.to_numpy_matrix(self.G, weight='weight')
self.D = np.asarray(D)
M = nx.to_numpy_matrix(self.G,weight=None)
return np.asarray(M)
#---------------------------------------------------------------#
def from_adjacency_matrix_graph(self,filename, plot_adj=False):
''' makes a graph from adjacency matrix in filename
and return 1D double vector in stl '''
A = np.genfromtxt(filename)
self.N = len(A)
if plot_adj:
self.imshow_plot(A,"con")
return tuple(A.reshape(-1))
#---------------------------------------------------------------#
def imshow_plot(self,data,name):
from mpl_toolkits.axes_grid1 import make_axes_locatable
fig = pl.figure(140,figsize=(6,6))
pl.clf()
ax = pl.subplot(111)
im = ax.imshow(data,interpolation='nearest', cmap='afmhot') # , cmap=pl.cm.ocean
ax.invert_yaxis()
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
pl.colorbar(im, cax=cax)
pl.savefig(adrsf+name+".png")
# pl.close()
#---------------------------------------------------------------#
def multilevel(self, data):
conn_indices = np.where(data)
weights = data[conn_indices]
edges = zip(*conn_indices)
G = igraph.Graph(edges=edges, directed=False)
G.es['weight'] = weights
comm = G.community_multilevel(weights=weights, return_levels=False)
return comm
#---------------------------------------------------------------#
def read_ctx(self, filename, delays, plot_adj=False):
def check_symmetric(a, tol=1e-8):
return np.allclose(a, a.T, atol=tol)
data = np.genfromtxt(filename)
data = data/float(np.max(data))
if ~check_symmetric(data):
data = np.maximum(data, data.transpose())
print "Matrix is symmetric: %r"% check_symmetric(data)
N = data.shape[0]
self.N = N
comm = self.multilevel(data)
n_comm = max(comm.membership) + 1
lengths = [len(comm[i]) for i in range(n_comm)]
self.cluster_lengths = lengths
print 'There are %d communities. '% n_comm
#new indices of nodes-------------------------------------------
newindices = []
for i in range(n_comm):
newindices += comm[i]
# print newindices
# --------------------------------------------------------------
M = np.zeros((N,N)) # ordered connection matrix
for i in range(N):
for j in range(N):
M[i, j] = data[newindices[i], newindices[j]]
self.G = nx.Graph()
# whole matrix
for i in range(N):
for j in range(N):
if M[i,j]!=0:
self.G.add_edge(i,j, weight=M[i,j], delay=delays[1])
# in clusters
s = 0;
for k in range(n_comm):
if k > 0:
s += lengths[k-1]
for i in range(s,(lengths[k]+s)):
for j in range(i+1,(lengths[k]+s)):
if M[i,j] != 0:
self.G.add_edge(i, j, weight=M[i,j], delay=delays[0])
if plot_adj:
# M = nx.to_numpy_matrix(self.G,weight=None)
A = nx.attr_matrix(self.G, edge_attr='delay', rc_order=range(N))
self.imshow_plot(A,'0_Delay')
B = nx.attr_matrix(self.G, edge_attr='weight', rc_order=range(N))
self.imshow_plot(B,'0_Weight')
self.D = nx.attr_matrix(self.G, edge_attr='delay', rc_order=range(N))
M_weight = nx.attr_matrix(self.G, edge_attr='weight', rc_order=range(N))
self.D = tuple(np.asarray(self.D).reshape(-1))
return tuple(np.asarray(M_weight).reshape(-1))
if __name__ == "__main__":
seed = 125
np.random.seed(seed)
gr = make_graph()
Adj = gr.hierarchical_modular_graph(25, 3, 0.9, 0.25, 1,
[2, 4.3, 5.7], plot_adj=True, seed=seed)
Dij = gr.D
print gr.N
np.savetxt(adrsf+"C.txt", Adj, fmt="%15.6f")
np.savetxt(adrsf+"D.txt", Dij, fmt="%15.6f")
# for row in Adj:
# for val in row:
# print '%.0f'%val,
# print
# print "\n\n"
# for row in Dij:
# for val in row:
# print '%.0f'%val,
# print
|
[
"a.ziaeemehr@gmail.com"
] |
a.ziaeemehr@gmail.com
|
a15aa5a5dc22d142e6f20c444d00b7e18ade8d57
|
9f2252f2337aec93d453c5f8344390ce140c73a8
|
/scripts/test/60sec.py
|
31082bb98016609b4b1ff981b900d662019446d3
|
[
"MIT"
] |
permissive
|
jrthornham2/eve-pi
|
051845f20d117eb930490e2341ecad134f5d3d0c
|
4ff14d3bd023d03312a67397d0bcdf1dec0afada
|
refs/heads/master
| 2020-08-07T09:01:43.118902
| 2019-09-11T10:02:32
| 2019-09-11T10:02:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 816
|
py
|
""" Vishhvaan's Test Script """
import time
import RPi.GPIO as GPIO
# setup the GPIO pins to control the pumps
P_drug_pins = [20]
P_nut_pins = [24]
P_waste_pins = [25]
#P_LED_pins = [21]
#P_fan_pins = [26]
pin_list = [P_drug_pins + P_nut_pins + P_waste_pins]
GPIO.setmode(GPIO.BCM)
for pin in pin_list:
GPIO.setup(pin, GPIO.OUT)
class Morbidostat():
def __init__(self):
GPIO.output(P_drug_pins,1)
GPIO.output(P_nut_pins,1)
GPIO.output(P_waste_pins,1)
# GPIO.output(P_LED_pins,1)
# GPIO.output(P_fan_pins,1)
time.sleep(10)
GPIO.output(P_drug_pins,0)
GPIO.output(P_nut_pins,0)
GPIO.output(P_waste_pins,0)
# GPIO.output(P_LED_pins,0)
# GPIO.output(P_fan_pins,0)
print("Done!")
GPIO.cleanup()
Morbidostat()
|
[
"vishhvaan@think.ccf.org"
] |
vishhvaan@think.ccf.org
|
ee26726bc7979d76556736dead676303b2adfeb5
|
1023b62b11b7d5a5b63c33bdd66afe692cc18724
|
/moveToS3.py
|
a546a4c8c03a81d3f61d7c2e8a1323f134e7ea8b
|
[] |
no_license
|
tomatoTomahto/finance
|
57bf41455aa8b1e4e926216febf811f10ab810fc
|
728143801e5410cc369705a54ce74b9a4e8a9e2c
|
refs/heads/master
| 2019-07-08T08:33:39.774840
| 2018-02-11T04:23:59
| 2018-02-11T04:23:59
| 88,646,798
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
# Move financial data collected to S3 bucket
!aws s3 mv data/historicPrices/ s3://sgupta-s3/finance/data/historicPrices/ --recursive --exclude "*" --include "*.csv"
!aws s3 mv data/historicArticles/ s3://sgupta-s3/finance/data/historicArticles/ --recursive --exclude "*" --include "*.json"
!aws s3 mv data/historicActions/ s3://sgupta-s3/finance/data/historicActions/ --recursive --exclude "*" --include "*.csv"
|
[
"sgupta@cloudera.com"
] |
sgupta@cloudera.com
|
f9d3d435d159b85e00a15eeaf592b94f1e0fdaff
|
52585c8d95cef15199c18ba1a76899d2c31329f0
|
/04Python workbook/ch4function/81Hypotenuse.py
|
30aa5a5f5b5d9bd65c241e0f5d2b3b5d8267a382
|
[] |
no_license
|
greatabel/PythonRepository
|
c7a952257303a21083ed7d535274c339362bd126
|
836fcdd3f5c1b150122302685104fe51b5ebe1a3
|
refs/heads/master
| 2023-08-30T15:56:05.376391
| 2023-08-26T03:34:14
| 2023-08-26T03:34:14
| 29,392,599
| 33
| 6
| null | 2023-02-14T13:33:21
| 2015-01-17T13:54:58
|
Python
|
UTF-8
|
Python
| false
| false
| 299
|
py
|
import math
def computeHypo(a,b):
return math.sqrt(a**2 + b**2)
line = input("Enter a,b of 2 sides of right triangle:")
while line != "":
ilist = line.split(',')
a = int(ilist[0])
b = int(ilist[1])
print(a,b)
print(computeHypo(a,b))
line = input("Enter a,b of 2 sides of right triangle:")
|
[
"greatabel1@126.com"
] |
greatabel1@126.com
|
766211d117d5391f3cc041b74b2457b5e4cf7b20
|
112df2b3b842d497b2b4bd2c79524020e08ae55c
|
/Test/checkbracket.py
|
86b5b687511701dfa993ece503726f61bb6b7835
|
[] |
no_license
|
adaick/DSA
|
3c8b4e0b4951e3339afaa1e33d530f9fc7708c55
|
70462d4e4aa41d7b8eac9cc519132bd78ca9fd21
|
refs/heads/master
| 2022-12-05T22:55:36.487548
| 2020-08-23T14:11:55
| 2020-08-23T14:11:55
| 288,778,906
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 623
|
py
|
string = input()
def stringvalid(string) :
stack = []
invalid=0
for i in range(len(string)):
if string[i]=="{" or string[i]=="(" or string[i]=="[":
stack.append(string[i])
elif string[i]=="}" and stack[-1]=="{":
stack.pop()
elif string[i]==")" and stack[-1]=="(":
stack.pop()
elif string[i]=="]" and stack[-1]=="[":
stack.pop()
elif string[i]=="}" or string[i]=="]" or string[i]==")":
invalid=i
if stack:
print(invalid+1)
else:
print("Sucess")
output=stringvalid(string)
|
[
"38970527+adaick@users.noreply.github.com"
] |
38970527+adaick@users.noreply.github.com
|
b267e3bad29697150647c5f5b762f6749c2cfe51
|
05f8a84d914e67677a71b571aae330a75f01aba7
|
/femas_backend/ready_interiors/migrations/0003_interior_main_photo.py
|
67e595fba6baf312d2563f216185f474777296d5
|
[] |
no_license
|
e-kondr01/femas
|
d2ff4fc154327ec3838f5ceb7729309207455b53
|
9bb16adf719036cf2d15c71ced362faeca0143d1
|
refs/heads/main
| 2023-04-04T07:52:14.875830
| 2021-04-03T14:26:51
| 2021-04-03T14:26:51
| 345,085,553
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 520
|
py
|
# Generated by Django 3.1.7 on 2021-04-01 15:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ready_interiors', '0002_auto_20210331_2107'),
]
operations = [
migrations.AddField(
model_name='interior',
name='main_photo',
field=models.ImageField(default='kek', upload_to='interior_photos', verbose_name='основное изображение'),
preserve_default=False,
),
]
|
[
"e.kondr01@gmail.com"
] |
e.kondr01@gmail.com
|
49b82ac63b1054b70755bb40c09c27c7528980fd
|
73693c590c20d75103c94e281e295d90553a9a4b
|
/item_functions.py
|
651ea731f144833647f1dc091e509c16eb1bfde2
|
[] |
no_license
|
gyrobone/Untitled-Roguelike-Game
|
c73ea159b13a8dcd7953ecf34a76915f6bf545da
|
a9b703a2c0630bfcd94ce876b4e766ba6aa54649
|
refs/heads/master
| 2020-12-24T00:49:48.706284
| 2020-01-31T00:18:18
| 2020-01-31T00:18:18
| 237,328,044
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,557
|
py
|
import tcod as libtcod
from game_messages import Message
from components.ai import ConfusedMonster
def heal(*args, **kwargs):
entity = args[0]
amount = kwargs.get('amount')
results = []
if entity.fighter.hp == entity.fighter.max_hp:
results.append({'consumed': False, 'message': Message('You are already at full health', libtcod.yellow)})
else:
entity.fighter.heal(amount)
results.append({'consumed': True, 'message': Message('Your wounds start to feel better!', libtcod.green)})
return results
def cast_lightning(*args, **kwargs):
caster = args[0]
entities = kwargs.get('entities')
fov_map = kwargs.get('fov_map')
damage = kwargs.get('damage')
maximum_range = kwargs.get('maximum_range')
results = []
target = None
closest_distance = maximum_range + 1
for entity in entities:
if entity.fighter and entity != caster and libtcod.map_is_in_fov(fov_map, entity.x, entity.y):
distance = caster.distance_to(entity)
if distance < closest_distance:
target = entity
closest_distance = distance
if target:
results.append({'consumed': True, 'target': target, 'message': Message('A lighting bolt strikes the {0} with a loud thunder! The damage is {1}'.format(target.name, damage))})
results.extend(target.fighter.take_damage(damage))
else:
results.append({'consumed': False, 'target': None, 'message': Message('No enemy is close enough to strike.', libtcod.red)})
return results
def cast_fireball(*args, **kwargs):
entities = kwargs.get('entities')
fov_map = kwargs.get('fov_map')
damage = kwargs.get('damage')
radius = kwargs.get('radius')
target_x = kwargs.get('target_x')
target_y = kwargs.get('target_y')
results = []
if not libtcod.map_is_in_fov(fov_map, target_x, target_y):
results.append({'consumed': False, 'message': Message('You cannot target a tile outside your field of view.', libtcod.yellow)})
return results
results.append({'consumed': True, 'message': Message('The fireball explodes, burning everything within {0} tiles!'.format(radius), libtcod.orange)})
for entity in entities:
if entity.distance(target_x, target_y) <= radius and entity.fighter:
results.append({'message': Message('The {0} gets burned for {1} hit points.'.format(entity.name, damage), libtcod.orange)})
results.extend(entity.fighter.take_damage(damage))
return results
def cast_confuse(*args, **kwargs):
entities = kwargs.get('entities')
fov_map = kwargs.get('fov_map')
target_x = kwargs.get('target_x')
target_y = kwargs.get('target_y')
results = []
if not libtcod.map_is_in_fov(fov_map, target_x, target_y):
results.append({'consumed': False, 'message': Message('You cannot target a tile outside your field of view.', libtcod.yellow)})
return results
for entity in entities:
if entity.x == target_x and entity.y == target_y and entity.ai:
confused_ai = ConfusedMonster(entity.ai, 10)
confused_ai.owner = entity
entity.ai = confused_ai
results.append({'consumed': True, 'message': Message('The eyes of the {0} look vacant, as he starts to stumble around!'.format(entity.name), libtcod.light_green)})
break
else:
results.append({'consumed': False, 'message': Message('There is no targetable enemy at that location.', libtcod.yellow)})
return results
|
[
"waylonmtroyer@gmail.com"
] |
waylonmtroyer@gmail.com
|
d966c46dedc07657fff4ee3b8a088338fd5a7f54
|
d2eeed8cd13c53571385a70861183b722c75c33c
|
/website/api/urls.py
|
2d85a306c5c708a5f19e4b8177113994577ea54b
|
[] |
no_license
|
4rt3mi5/net-device-manager
|
5765ab16bd32a38d458893fb401816f3c670330e
|
62227897cae26f46ec21fa179aabb06075fde6e9
|
refs/heads/master
| 2020-05-15T11:27:27.507159
| 2019-04-19T03:28:36
| 2019-04-19T03:28:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,686
|
py
|
from django.conf.urls import url
from api.views import device, ap, config, monitor, users, node, alert
urlpatterns = [
url(r'^device/export$', device.DevicedExportView.as_view()),
url(r'^device/import$', device.DeviceImportView.as_view()),
url(r'^ap/search-client$', ap.SearchClientAddress.as_view()),
url(r'^ap/reboot$', ap.RebootAP.as_view()),
url(r'^ap/list$', ap.APlist.as_view()),
url(r'^config/running/(?P<pk>.+)$', config.GetRunningConfig.as_view()),
url(r'^config/get-detail/(?P<pk>.+)$', config.GetConfigDetail.as_view()),
url(r'^config/diff$', config.DiffContext.as_view()),
url(r'^monitor/get-port/(?P<pk>.+)$', monitor.GetPort.as_view()),
url(r'^monitor/bound-port/(?P<pk>.+)$', monitor.BoundPort.as_view()),
url(r'^monitor/new-data/(?P<pk>.+)$', monitor.GetNewMonitorData.as_view()),
url(r'^monitor/get-range/(?P<pk>.+)$', monitor.GetDataRange.as_view()),
url(r'^monitor/get-port-data/(?P<pk>.+)$', monitor.GetPortData.as_view()),
url(r'^monitor/search-time/(?P<pk>.+)$', monitor.SearchTimeRange.as_view()),
url(r'^monitor/tereshold/(?P<pk>.+)$', monitor.ChangeThreshold.as_view()),
url(r'^monitor/set-clean-data$', monitor.CleanData.as_view()),
url(r'^d-user/get/(?P<pk>.+)$', users.GetUsers.as_view()),
url(r'^d-user/delete/(?P<pk>.+)$', users.DeleteUser.as_view()),
url(r'^d-user/create$', users.CreateUsers.as_view()),
url(r'^d-user/change-password/(?P<pk>.+)$', users.ChangePassword.as_view()),
url(r'^node/getall$', node.GetNodeStatus.as_view()),
url(r'^alert-user/getall$', alert.AlertUsers.as_view()),
url(r'^alert-user/delete$', alert.DeleteAlertUser.as_view()),
]
|
[
"912530326@qq.com"
] |
912530326@qq.com
|
85ecbe867861817296bd4a9f30dde99b4e79e9b3
|
5e3a39a7c26559ffb196405168b225845b3d5549
|
/API/create-shipment/create-shipment.py
|
f752642ff6e02f931d8e9f5e56911ec7fe2cd93f
|
[] |
no_license
|
OwenShuaiLiu/Boomerang-dev
|
8771181cbf5ab8b4042660557af1cde2dafc4a37
|
061abb890ede7586e4ac54e68a89f9c1787042a4
|
refs/heads/master
| 2023-06-02T23:27:52.151022
| 2021-06-29T07:19:43
| 2021-06-29T07:19:43
| 381,274,504
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,206
|
py
|
from flask import Flask
from flask import request
from flask import Response
from flask import abort
from flask import jsonify
#import jsonpickle
import json
import requests
import easypost
easypost.api_key = 'EZAK35b0cd3c664944bcac801897af82af4c2TadDMm8yKVzpONQOa9WEA'
app = Flask(__name__)
#create a shippment
@app.route('/create_shipment', methods=["POST"])
def create_shipment():
if request.headers['Content-Type'] == 'application/json':
arguments = request.get_json()
user_email_addr = arguments.get("email_addr")
user_addr_1 = arguments.get("addr_1")
user_addr_2 = arguments.get("addr_2")
user_city = arguments.get("city")
user_state = arguments.get("state")
user_zip_code = arguments.get("zip_code")
user_phone = arguments.get("phone")
product_weight = float(arguments.get("weight"))
to_address = easypost.Address.create(
verify=["delivery"],
street1="2110 HASTE ST APT 425",
street2="",
city="Berkeley",
state="CA",
zip="94704-2084",
country="US",
company="Boomerang Recommerce",
phone= '6692819516',
email= 'support@boomerang.fashion'
)
from_address = easypost.Address.create(
verify=["delivery"],
street1=user_addr_1,
street2=user_addr_2,
city=user_city,
state=user_state,
zip=user_zip_code,
country="US",
phone= user_phone,
email= user_email_addr
)
shipment = easypost.Shipment.create(
to_address = to_address,
from_address = from_address,
parcel={
"length": 20.2,
"width": 10.9,
"height": 5,
"weight": product_weight * 16
}
)
shipment.buy(rate = shipment.lowest_rate())
#data = jsonpickle.encode(shipment)
#return data
label_tracker = {}
label_tracker["label"]=shipment["postage_label"]["label_url"]
label_tracker["tracker"]=shipment["tracker"]["public_url"]
label_tracker["tracking_code"]=shipment["tracker"]["tracking_code"]
data = json.dumps(label_tracker)
return data
if __name__ == '__main__':
app.run(host='0.0.0.0')
|
[
"owen.shuai.liu@berkeley.edu"
] |
owen.shuai.liu@berkeley.edu
|
ed8058178baa3405ccdf616eec84c3f8039d646c
|
68cf7c25bb614883c50d21e5051fbea8dbf18ccb
|
/ecommercejockey/shopify/migrations/0009_shopifyproduct_scope.py
|
0395e9379e526eec3588d09d881985920d936520
|
[
"MIT"
] |
permissive
|
anniethiessen/ecommerce-jockey
|
63bf5af6212a46742dee98d816d0bc2cdb411708
|
9268b72553845a4650cdfe7c88b398db3cf92258
|
refs/heads/master
| 2022-12-14T02:29:25.140796
| 2021-05-15T01:20:30
| 2021-05-15T01:20:30
| 211,400,595
| 1
| 1
|
MIT
| 2022-12-08T06:45:40
| 2019-09-27T20:57:19
|
Python
|
UTF-8
|
Python
| false
| false
| 458
|
py
|
# Generated by Django 2.2.5 on 2019-11-07 00:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shopify', '0008_shopify'),
]
operations = [
migrations.AlterField(
model_name='shopifyproduct',
name='published_scope',
field=models.CharField(choices=[('web', 'web'), ('global', 'global')], default='global', max_length=10),
),
]
|
[
"anniethiessen79@gmail.com"
] |
anniethiessen79@gmail.com
|
39994212c7b49b42596056952e9ff3bcc8f4a395
|
175f6790746f0216b7616ce32eca2e4772a138e8
|
/Forritun/quiz5.py
|
1ba5d2bc97491898ffda6c116f25e076671a5b7f
|
[] |
no_license
|
Astthorarnorsson/M
|
14a1b3602fc3b03e02c25d4d5006465fd7027528
|
cf4685f538a94028fb592384d6b5aa6a3aa6f55f
|
refs/heads/master
| 2022-12-10T08:00:03.819454
| 2020-09-03T15:21:02
| 2020-09-03T15:21:02
| 292,607,679
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 610
|
py
|
#max_int = 0
#while True:
# num_int = int(input("Input a number: "))
# if num_int < 0:
# break
# max_int = max(num_int,max_int)
#
#print("The maximum is", max_int)
n = int(input("Enter the length of the sequence: "))
i = 0
summa = 0
tala1 = 0
tala2 = 0
tala3 = 0
for i in range(1 , n+1):
if i == 1:
tala1 = i
print(tala1)
elif i == 2:
tala2 = i
print(tala2)
elif i == 3:
tala3 = i
print(tala3)
else:
summa = tala1 + tala2 + tala3
tala1 = tala2
tala2 = tala3
tala3 = summa
print(summa)
|
[
"Astiarnars@Astors-MacBook-Pro-3.local"
] |
Astiarnars@Astors-MacBook-Pro-3.local
|
9841160271972f0812ddbfef10e39cb7fa73dc89
|
eebecfd8a1a01602a6ee7de380fb6d6e17cc63ad
|
/SimpleIRCClient.py
|
855fb6086769a188c7e74c717ceb9d86efc15e3d
|
[] |
no_license
|
LiorMoshe/TeamSoB
|
f4bdadf78a0cc8602fc64b0800dac3315d392028
|
5ff9520920773f6ab9cc68617c4ef7d0a781ef57
|
refs/heads/master
| 2021-01-21T18:50:11.053412
| 2017-06-07T14:24:41
| 2017-06-07T14:24:41
| 92,080,572
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 963
|
py
|
import socket
import time
import random
server_ip = 'localhost'
server_port = 6667
irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
irc.connect((server_ip, server_port))
botnick = 'bot1'
irc.send("USER " + botnick + " 127.0.0.1 " + botnick + " :This is a fun bot!\n") # user authentication
irc.send("NICK " + botnick + "\n")
channel = '#testit'
irc.send("JOIN " + channel + "\n") # join the chan
while True:
time.sleep(4)
irc.send("PRIVMSG " + channel + " Hiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii" + "\n") # join the chan
#time.sleep(3)
irc.send("PRIVMSG " + channel + " BLABLALBLALBLALBLALBLALBLALBLALABLLALBLA" + "\n") # join the chan
#time.sleep(3)
irc.send("PRIVMSG " + channel + " ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "\n") # join the chan
#time.sleep(3)
irc.send("PRIVMSG " + channel + " ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ" + "\n") # join the chan
time.sleep(random.random() * 4)
|
[
"noreply@github.com"
] |
LiorMoshe.noreply@github.com
|
297dce2ea9a7e15cfe578ef0ef517716996bd895
|
e51a78cda92aeeba240daecc3808932a6dc1de92
|
/lab3/chunker.py
|
52a2612b9f94bc8feed7ca818877f8a99907da9e
|
[] |
no_license
|
razofz/EDAN20_mnx10jpa_muh11rol
|
db445589a12a15bae47d3c3b7c2a17c63ea09de9
|
bc8142dbc0db55d03cfe8297e0375edd8a7ee228
|
refs/heads/master
| 2021-07-21T20:10:58.625445
| 2017-10-31T15:27:08
| 2017-10-31T15:27:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,468
|
py
|
"""
Baseline chunker for CoNLL 2000
"""
__author__ = "Pierre Nugues"
import conll_reader
def count_pos(corpus):
"""
Computes the part-of-speech distribution
in a CoNLL 2000 file
:param corpus:
:return:
"""
pos_cnt = {}
for sentence in corpus:
for row in sentence:
if row['pos'] in pos_cnt:
pos_cnt[row['pos']] += 1
else:
pos_cnt[row['pos']] = 1
return pos_cnt
def train(corpus):
"""
Computes the chunk distribution by pos
The result is stored in a dictionary
:param corpus:
:return:
"""
pos_cnt = count_pos(corpus)
# We compute the chunk distribution by POS
chunk_dist = {key: {} for key in pos_cnt.keys()}
"""
chunk_dist = {
'VBG':
{},
'TO':
{},
'NNP':
{},
...
}
Fill in code to compute the chunk distribution for each part of speech
corpus =
('form', 'pos', 'chunk')
"Confidence NN B-NP
in IN B-PP
the DT B-NP
pound NN I-NP
is VBZ B-VP
widely RB I-VP
expected VBN I-VP
to TO I-VP
take VB I-VP
another DT B-NP
sharp JJ I-NP
dive NN I-NP
if IN B-SBAR
trade NN B-NP
figures NNS I-NP
for IN B-PP
September NNP B-NP
, , O
due JJ B-ADJP
..."
"""
for sentence in corpus:
for row in sentence:
if row['chunk'] in chunk_dist[row['pos']]:
chunk_dist[row['pos']][row ['chunk']] += 1
else:
chunk_dist[row['pos']][row ['chunk']] = 1
"""
chunk_dist = {
'VBG':
{ 'I-ADJP': 53, 'B-PP': 16, ..},
'TO':
{ 'O': 7403, 'B-SBAR': 64, ..},
'NNP':
{ 'B-VP': 81, ..},
...
}
Fill in code so that for each part of speech, you select the most frequent chunk.
You will build a dictionary with key values:
pos_chunk[pos] = most frequent chunk for pos
"""
# We determine the best association
pos_chunk = {}
counts = []
for pos in chunk_dist:
chunktags_occ = dict(chunk_dist[pos].items())
# for i in range(len(chunktags_occ)):
# if ()
# counts.append(val)
# max(counts)
#print(chunktags_occ)
print(pos, max(chunktags_occ, key = chunktags_occ.get), chunktags_occ[max(chunktags_occ, key = chunktags_occ.get)])
pos_chunk[pos] = max(chunktags_occ, key = chunktags_occ.get)
return pos_chunk
def predict(model, corpus):
"""
Predicts the chunk from the part of speech
Adds a pchunk column
:param model:
:param corpus:
:return:
"""
"""
We add a predicted chunk column: pchunk
"""
for sentence in corpus:
for row in sentence:
row['pchunk'] = model[row['pos']]
return corpus
def eval(predicted):
"""
Evaluates the predicted chunk accuracy
:param predicted:
:return:
"""
word_cnt = 0
correct = 0
for sentence in predicted:
for row in sentence:
word_cnt += 1
if row['chunk'] == row['pchunk']:
correct += 1
return correct / word_cnt
if __name__ == '__main__':
column_names = ['form', 'pos', 'chunk']
train_file = './train.txt'
test_file = './test.txt'
train_corpus = conll_reader.read_sentences(train_file)
train_corpus = conll_reader.split_rows(train_corpus, column_names)
test_corpus = conll_reader.read_sentences(test_file)
test_corpus = conll_reader.split_rows(test_corpus, column_names)
#print(test_corpus)
model = train(train_corpus)
print('after train')
predicted = predict(model, test_corpus)
accuracy = eval(predicted)
print("Accuracy", accuracy)
f_out = open('out_baseline', 'w')
# We write the word (form), part of speech (pos),
# gold-standard chunk (chunk), and predicted chunk (pchunk)
for sentence in predicted:
for row in sentence:
"""print(row['form'] + ' ' + row['pos'] + ' ' +
row['chunk'] + ' ' + row['pchunk'] + '\n')"""
f_out.write(row['form'] + ' ' + row['pos'] + ' ' +
row['chunk'] + ' ' + row['pchunk'] + '\n')
f_out.write('\n')
f_out.close()
"""
bygg modell > träna modellen med train-setet > förutsäg med modellen på test-setet
a b c d e
a b c d E
a b c D e
a b C d e
..
"""
|
[
"rasmus.olofzon@gmail.com"
] |
rasmus.olofzon@gmail.com
|
ac64c09a32db5214ffbca716aca0b3f0fcff5f9d
|
e4eecce9ce98c4248802bd137d6f99312853e5f9
|
/风变编程/第12关/chengjidan.py
|
a7781c5d66da2168a19b98bdc717a4e9c487281c
|
[] |
no_license
|
huqiang252/learngit
|
d74d11b3da7cf9c683dcd449761e201770fa68ce
|
71bb541aad9f85d48d86e20bc1198c901fff856f
|
refs/heads/master
| 2023-08-11T20:44:29.918533
| 2021-10-07T08:07:04
| 2021-10-07T08:07:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,125
|
py
|
# -*- coding: utf-8 -*-
class 成绩单():
@classmethod
def 录入成绩单(cls):
cls.学生姓名 = input('请输入学生姓名:')
cls.语文_成绩 = int(input('请输入语文成绩:'))
cls.数学_成绩 =int(input('请输入数学成绩:'))
@classmethod
def 打印成绩单(cls):
print(cls.学生姓名 + '的成绩单如下:')
print('语文成绩:' + str(cls.语文_成绩))
print('数学成绩:' + str(cls.数学_成绩))
@classmethod
def 打印平均分(cls):
cls.平均成绩=float((cls.语文_成绩+cls.数学_成绩)/2)
return cls.平均成绩
@classmethod
def 评级(cls):
#调用类的其他方法
cls.打印平均分()
print('两科的平均成绩:' + str(cls.平均成绩))
if cls.平均成绩>=90:
print('优')
elif cls.平均成绩>=80:
print('良好')
elif cls.平均成绩>=70:
print('一般')
else:
print('差')
成绩单.录入成绩单()
成绩单.打印成绩单()
成绩单.评级()
|
[
"huqiang252@gmail.com"
] |
huqiang252@gmail.com
|
d2ef11cfe3f03be927efeef9d1d8f3fac615bbde
|
25c82203d00b28330fabd41e0c9d11b1dc8dcdeb
|
/tasks/task_5/models_1/svm.py
|
8e8e4eb76f3918b5cd67012979514c329161cd36
|
[] |
no_license
|
roma-patel/ambiguity
|
2433982514410f398d888b93abb2c30f0f8be5ea
|
2ffc7ed059b59ca56ea6504a5a29707ebf0f8d36
|
refs/heads/master
| 2018-09-20T07:18:20.367439
| 2018-06-06T12:24:56
| 2018-06-06T12:24:56
| 120,149,774
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,566
|
py
|
from collections import defaultdict, namedtuple, Counter
import sys
import logging
import json
import numpy as np
import re
from sklearn import svm
from sklearn.svm import LinearSVC
import pickle
import os
path = '/nlp/data/romap/law/task_4/models_1/shell/'
def createUnigramFeat(word_vecs):
all_feat = []
for word_vec in word_vecs:
all_feat.append(word_vec)
return all_feat
def prepare_features(X_train, y_train, X_test, y_test, fold, test_words):
feat_path = path + set_name + '/features/feat/' + str(fold) + '/'
if os.path.isdir(feat_path) is False: os.mkdir(feat_path)
#unigram features
try:
fileObject = open(feat_path + "/train_unigram_feat.p", "r")
train_unigram_feat = pickle.load(fileObject)
fileObject.close()
except IOError as e:
train_unigram_feat = createUnigramFeat(X_train)
fileObject = open(feat_path + "/train_unigram_feat.p", "wb")
pickle.dump(train_unigram_feat, fileObject)
fileObject.close()
try:
fileObject = open(feat_path + "/test_unigram_feat.p", "r")
test_unigram_feat = pickle.load(fileObject)
fileObject.close()
except IOError as e:
test_unigram_feat = createUnigramFeat(X_test)
fileObject = open(feat_path + "/test_unigram_feat.p", "wb")
pickle.dump(test_unigram_feat, fileObject)
fileObject.close()
print 'Unigram done!'
print 'Stacking features!'
rows = len(y_train)
all_train_feat = np.reshape(train_unigram_feat, (rows, -1));
rows = len(y_test)
all_test_feat = np.reshape(test_unigram_feat, (rows, -1))
print 'Running classifier!'
dirpath = path + set_name + '/' + 'results/' + model_name + '/'
if os.path.isdir(dirpath) is False: os.mkdir(dirpath)
run_classifier(test_words, all_train_feat, y_train, all_test_feat, y_test,
dirpath + '/results-' + str(fold) + '.txt')
print 'Done!'
def run_classifier(test_words, X_train, y_train, X_test, y_test, predicted_labels_file):
svc = svm.SVC(decision_function_shape = 'ovo')
svc.fit(X_train, y_train)
y_pred = svc.predict(X_test)
confusion = []
with open(predicted_labels_file, "w") as f_out:
for i in range(len(y_pred.tolist())):
label, true_label = y_pred.tolist()[i], y_test[i]
f_out.write(str(label) + ' ' + str(true_label) + ' ' + test_words[i] +"\n")
def get_embeddings(vocab, wvec_path):
word_vecs = {}
if wvec_path[-3:] == 'bin':
with open(wvec_path, "rb") as f:
header = f.readline()
vocab_size, layer1_size = map(int, header.split())
binary_len = np.dtype('float32').itemsize * layer1_size
for line in xrange(vocab_size):
word = []
while True:
ch = f.read(1)
if ch == ' ':
word = ''.join(word)
break
if ch != '\n':
word.append(ch)
if word in vocab:
word_vecs[word] = np.fromstring(f.read(binary_len), dtype='float32')
else:
f.read(binary_len)
elif wvec_path[-3:] == 'txt':
f = open(wvec_path, 'r'); lines = f.readlines()[1:]
for line in lines:
items = line.strip().split(' ')
word, vec = items[0], [float(item) for item in items[1:]]
if word in vocab: word_vecs[word] = vec
return word_vecs
def get_datasets():
f = open('/nlp/data/romap/law/word_labels_' + set_name + '_3.tsv', 'r')
data, tokens, tags = [], [], []
prev, cur = 1, 1
lines = f.readlines()
for line in lines:
items = line.strip().split('\t')
cur = int(items[4])
if cur == 1: prev = 1
if cur > prev:
data.append((tokens, tags))
tokens, tags, prev = [], [], cur
else: tokens.append(items[0]); tags.append(items[1])
return data
def prepare_data(fold, data, word_vecs):
bin_val = len(data)/1000 + 1
folds = [data[i: i+bin_val] for i in range(0, len(data), bin_val)]
testing_data = folds[fold]; training_data = [folds[i] for i in range(0, 10) if i != fold]
training_data = [item for sublist in training_data for item in sublist]
X_train = [item[0] for item in training_data]; y_train = [item[1] for item in training_data]
X_test = [item[0] for item in testing_data]; y_test = [item[1] for item in testing_data]
X_train = [item for sublist in X_train for item in sublist]; y_train = [item for sublist in y_train for item in sublist]
X_test = [item for sublist in X_test for item in sublist]; y_test = [item for sublist in y_test for item in sublist]
train_words = X_train; test_words = [item for item in X_test]
for i in range(len(X_train)):
if X_train[i] in word_vecs.keys():
X_train[i] = word_vecs[X_train[i]]
else:
X_train[i] = np.array([np.random.rand(1)[0] for j in range(0, 300)], dtype='float32')
for i in range(len(X_test)):
if X_test[i] in word_vecs.keys():
X_test[i] = word_vecs[X_test[i]]
else:
X_test[i] = np.array([np.random.rand(1)[0] for j in range(0, 300)], dtype='float32')
tag_to_idx = {'D': 0, 'G': 1, 'O': 2}
tag_to_idx = {'D': 0, 'G': 1, 'O': 1}
y_train = [tag_to_idx[tag] for tag in y_train]
y_test = [tag_to_idx[tag] for tag in y_test]
return X_train, y_train, X_test, y_test, train_words, test_words
if __name__ == '__main__':
global set_name, wvec_path, model_name
wvec_paths = {'google': '/nlp/data/corpora/GoogleNews-vectors-negative300.bin',
'legal': '/nlp/data/romap/ambig/w2v/w2v100-300.txt',
'concept': '/nlp/data/romap/conceptnet/numberbatch-en-17.06.txt'
}
set_name = sys.argv[1]; wvec_path = wvec_paths[set_name]; model_name = sys.argv[2]
data = get_datasets()
X = [item[0] for item in data]; y = [item[1] for item in data]
X = [item for sublist in X for item in sublist]; y = [item for sublist in y for item in sublist]
vocab = list(set(X));
word_vecs = get_embeddings(vocab, wvec_path)
for fold in range(0, 10):
print fold
X_train, y_train, X_test, y_test, train_words, test_words = prepare_data(fold, data, word_vecs)
prepare_features(X_train, y_train, X_test, y_test, fold, test_words)
|
[
"romapatel996@gmail.com"
] |
romapatel996@gmail.com
|
e58d3c303fd246494e2371c64614975ebfadffaf
|
9c5bcd615c7ec343034ca745fce71105a0e81f50
|
/Target_App/firstFrame.py
|
0c967141ed490f86bc1d79943ccea91b9ce3a46c
|
[] |
no_license
|
aryanguptaSG/python_projects
|
05fb0e685d3333a88234f22c6517fd77e48dc63b
|
20748908e0f6a57e73b580051ee7a246a5c47f60
|
refs/heads/master
| 2023-06-20T11:14:55.165294
| 2021-07-14T03:21:50
| 2021-07-14T03:21:50
| 298,515,502
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,011
|
py
|
from secondFrame import *
global Name
global root
def addtarget(target,root,name):
if str(target).strip:
insertTarget(target)
root.destroy()
firstFrame(name=name)
def taketargetFrame(root,name,frame=None):
if frame:
frame.destroy()
root.config(menu='none')
new = Frame(root)
new.pack(fill=BOTH,expand=True)
Label(new,text="Please add your target ..",font="cursive 25 bold").pack(pady=30)
taketarget = Frame(new)
taketarget.pack(pady=100)
Label(taketarget,text="Enter Your target ..",font="cursive 15 bold").grid(row=0,column=0)
target_box = Entry(taketarget,font="cursive 15 bold",bg="lightgray")
target_box.grid(row=0,column=1)
Button(taketarget,text="Add",font="cursive 15 bold",command= lambda:addtarget(target_box.get(),root,name)).grid(row=1,column=2,pady=10)
image = Image.open("back1.png")
resize_image = image.resize((20, 20))
img = ImageTk.PhotoImage(resize_image)
Button(new,image=img,command= lambda:firstFrame(frem=root,name=name)).place(x=20,y=20)
mainloop()
def opensubject(event):
var = event.widget.curselection()[0]
target = event.widget.get(var).strip()
secondFrame(root,Name,target)
print(target,Name)
def firstFrame(frem=None,name=None):
global Name
global root
Name=name
if frem:
frem.destroy()
targets = list()
for i in getAlltarget():
targets.append(i[0])
root = Tk()
root.title("Target App")
root.geometry("800x500+300-100")
root.minsize(800,500)
root.maxsize(800,500)
if len(targets)>0:
frame = Frame(root)
frame.pack(fill=BOTH,expand=True)
main_menu = Menu(root)
root.config(menu=main_menu)
target_menu = Menu(main_menu)
main_menu.add_cascade(label="Target",menu=target_menu)
target_menu.add_command(label="Add New Target +",command= lambda:taketargetFrame(root = root,frame=frame,name=name))
Label(frame,text="Hey "+name+" Thses Are Your Targets ",font="cursive 25 bold").pack(side=TOP,fill=X,pady=20)
box = Listbox(frame,height=18,font="cursive 20 ",activestyle='none')
box.pack(fill=X,padx=10,pady=10)
space = " "
for i in targets:
box.insert(END,"")
box.insert(END,space+i)
box.bind("<Double-1>",opensubject)
box.bind("<<ListboxSelect>>",clear)
else:
Label(root,text="Please add your target ..",font="cursive 25 bold").pack(pady=30)
taketarget = Frame(root)
taketarget.pack(pady=100)
Label(taketarget,text="Enter Your target ..",font="cursive 15 bold").grid(row=0,column=0)
target_box = Entry(taketarget,font="cursive 15 bold",bg="lightgray")
target_box.grid(row=0,column=1)
Button(taketarget,text="Add",font="cursive 15 bold",command= lambda:addtarget(target_box.get(),root,name)).grid(row=1,column=2,pady=10)
mainloop()
|
[
"aryan31102001@gmail.com"
] |
aryan31102001@gmail.com
|
766bfa670c18ac551af9a2429bd8b38581866e60
|
2b4955fe21e4c4b1c4689ac7100ddf9ca9034fde
|
/contrib/pyminer/pyminer.py
|
b1c80ef460e70a310c112a2c88424bac2e5927a2
|
[
"MIT"
] |
permissive
|
charitypub/Sourcecode
|
977c83a8a4e7e3af3ce2fc0d892fb2fbb75ee369
|
95583c6755ee337ca68a053b0a23a381ec843a9f
|
refs/heads/master
| 2020-03-10T08:19:05.429568
| 2018-04-13T08:38:05
| 2018-04-13T08:38:05
| 129,282,451
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,435
|
py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 36304
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
[
"81852521@qq.com"
] |
81852521@qq.com
|
4b7dc7e61c46c44cb0de31a192bc3482c88607d3
|
1786a416a81ef7046314949efda2af0532dfe9d5
|
/tests/mask_test.py
|
6e4692eace346094bc1911cb089f93f0c856c366
|
[] |
no_license
|
KiyoshiMu/cpp_bag
|
aa06d199e1e6409bb52af25dbed0e854f72840af
|
55c037c757662a1bf7fc400415a3b1c88f0c6ea3
|
refs/heads/master
| 2023-04-18T21:24:17.875783
| 2022-11-22T20:58:39
| 2022-11-22T20:58:39
| 462,886,870
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 524
|
py
|
from __future__ import annotations
import numpy as np
import torch
def test_mask_clone():
tensor = torch.rand(5, 10)
mask_tensor = torch.zeros(10)
mask = torch.tensor([True, False, True, False, True])
masked = tensor.clone()
masked[mask] = mask_tensor
print(masked)
print(tensor)
assert tensor.sum() != masked.sum()
def test_mask_flag():
mask_flag = np.zeros(5468, dtype=bool)
mask_flag[np.random.choice(5468, size=1000, replace=False)] = True
assert mask_flag.sum() == 1000
|
[
"mooyewtsing@gmail.com"
] |
mooyewtsing@gmail.com
|
52d71a69fed483d5ad50824b86faf5ee2490319f
|
d959becea64f0960110ae8ef8cf8c765a1509857
|
/xxSite/settings.py
|
81be9a347aaf8e3e2ec3caca8d92c95349195b62
|
[] |
no_license
|
yanhaocn2000/xxSite
|
80c423565506b72d6b6b165c500b5a741f190bbf
|
aa7e5e2e9a5eca64af7874afdc031b5098eaffe0
|
refs/heads/master
| 2020-05-28T08:20:08.611485
| 2013-12-29T14:53:51
| 2013-12-29T14:53:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,984
|
py
|
"""
Django settings for xxSite project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@5v#vlcbt$1+@q#tya3d-7v+f5%xygz^5(!z#9rjnx7%(mu$ap'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'News',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'xxSite.urls'
WSGI_APPLICATION = 'xxSite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
|
[
"yanhaocn2000@163.com"
] |
yanhaocn2000@163.com
|
f6432f9196a0e19c150d1c90fa184eefa11fe378
|
04603f8b493fad6fee33c54b082b52075070f81e
|
/if_and_or_test.py
|
7fca125ac44a21cb6aad083c07831aa6d16315f2
|
[] |
no_license
|
KIMSEUNGHYUN-eng/python_lecture
|
565b110b2f7000d42e31f9c6b12550b5d2918b59
|
b8ff4f0c1e8ec0530a92f999b6d8b064ec0bf347
|
refs/heads/main
| 2023-04-25T17:59:55.231426
| 2021-05-25T08:37:03
| 2021-05-25T08:37:03
| 370,264,449
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 172
|
py
|
a =10
b= 13
if (a % 2==0) and (b% 2 ==0):
print("두 수 모두 짝수입니다.")
if( a%2 ==0) or (b%2 ==0):
print("두 수 중 하나 이상이 짝수입니다.")
|
[
"you@example.com"
] |
you@example.com
|
a122d96f4d6c40d8b69a2766711d05de77787cdf
|
d9f0a280abcfe0efa65d4f9e681db6f909c587af
|
/mae_envs/wrappers/line_of_sight.py
|
2f63d9da176ae04d569387376904c3bd0339f96c
|
[
"MIT"
] |
permissive
|
bowenbaker/multi-agent-emergence-environments
|
5222972b89ff2afd4f6ef7c39f75b44daffcdbdc
|
85d2167999110edc6c975d1f81ab7ec8ff39a809
|
refs/heads/master
| 2023-01-05T14:33:10.698645
| 2020-11-04T17:19:58
| 2020-11-04T17:19:58
| 308,440,938
| 1
| 0
|
MIT
| 2020-11-04T17:19:59
| 2020-10-29T20:22:11
| null |
UTF-8
|
Python
| false
| false
| 5,112
|
py
|
import gym
import numpy as np
from mae_envs.util.vision import insight, in_cone2d
from mae_envs.wrappers.util import update_obs_space
class AgentAgentObsMask2D(gym.ObservationWrapper):
""" Adds an mask observation that states which agents are visible to which agents.
Args:
cone_angle: (float) the angle in radians btw the axis and edge of the observation cone
"""
def __init__(self, env, cone_angle=3/8 * np.pi):
super().__init__(env)
self.cone_angle = cone_angle
self.n_agents = self.unwrapped.n_agents
self.observation_space = update_obs_space(env, {'mask_aa_obs': (self.n_agents, self.n_agents)})
def observation(self, obs):
# Agent to agent obs mask
agent_pos2d = obs['agent_pos'][:, :-1]
agent_angle = obs['agent_angle']
cone_mask = in_cone2d(agent_pos2d, np.squeeze(agent_angle, -1), self.cone_angle, agent_pos2d)
# Make sure they are in line of sight
for i, j in np.argwhere(cone_mask):
if i != j:
cone_mask[i, j] = insight(self.unwrapped.sim,
self.metadata['agent_geom_idxs'][i],
self.metadata['agent_geom_idxs'][j])
obs['mask_aa_obs'] = cone_mask
return obs
class AgentSiteObsMask2D(gym.ObservationWrapper):
""" Adds an mask observation that states which sites are visible to which agents.
Args:
pos_obs_key: (string) the name of the site position observation of shape (n_sites, 3)
mask_obs_key: (string) the name of the mask observation to output
cone_angle: (float) the angle in radians btw the axis and edge of the observation cone
"""
def __init__(self, env, pos_obs_key, mask_obs_key, cone_angle=3/8 * np.pi):
super().__init__(env)
self.cone_angle = cone_angle
self.n_agents = self.unwrapped.n_agents
assert(self.n_agents == self.observation_space.spaces['agent_pos'].shape[0])
self.n_objects = self.observation_space.spaces[pos_obs_key].shape[0]
self.observation_space = update_obs_space(env, {mask_obs_key: (self.n_agents, self.n_objects)})
self.pos_obs_key = pos_obs_key
self.mask_obs_key = mask_obs_key
def observation(self, obs):
agent_pos2d = obs['agent_pos'][:, :-1]
agent_angle = obs['agent_angle']
pos2d = obs[self.pos_obs_key][:, :2]
cone_mask = in_cone2d(agent_pos2d, np.squeeze(agent_angle, -1), self.cone_angle, pos2d)
# Make sure they are in line of sight
for i, j in np.argwhere(cone_mask):
agent_geom_id = self.metadata['agent_geom_idxs'][i]
pt2 = obs[self.pos_obs_key][j]
cone_mask[i, j] = insight(self.unwrapped.sim, agent_geom_id, pt2=pt2)
obs[self.mask_obs_key] = cone_mask
return obs
class AgentGeomObsMask2D(gym.ObservationWrapper):
""" Adds an mask observation that states which geoms are visible to which agents.
Args:
pos_obs_key: (string) the name of the site position observation of shape (n_geoms, 3)
geom_idxs_obs_key: (string) the name of an observation that, for each object to be
masked, gives the Mujoco index of the geom (e.g. in sim.geom_names)
as an array of shape (n_geoms, 1)
mask_obs_key: (string) the name of the mask observation to output
cone_angle: (float) the angle in radians btw the axis and edge of the observation cone
"""
def __init__(self, env, pos_obs_key, geom_idxs_obs_key, mask_obs_key, cone_angle=3/8 * np.pi):
super().__init__(env)
self.cone_angle = cone_angle
self.n_agents = self.unwrapped.n_agents
assert(self.n_agents == self.observation_space.spaces['agent_pos'].shape[0])
self.n_objects = self.observation_space.spaces[pos_obs_key].shape[0]
self.observation_space = update_obs_space(env, {mask_obs_key: (self.n_agents, self.n_objects)})
self.pos_obs_key = pos_obs_key
self.mask_obs_key = mask_obs_key
self.geom_idxs_obs_key = geom_idxs_obs_key
def observation(self, obs):
agent_pos2d = obs['agent_pos'][:, :-1]
agent_angle = obs['agent_angle']
pos2d = obs[self.pos_obs_key][:, :2]
cone_mask = in_cone2d(agent_pos2d, np.squeeze(agent_angle, -1), self.cone_angle, pos2d)
# Make sure they are in line of sight
for i, j in np.argwhere(cone_mask):
agent_geom_id = self.metadata['agent_geom_idxs'][i]
geom_id = obs[self.geom_idxs_obs_key][j, 0]
if geom_id == -1:
# This option is helpful if the number of geoms varies between episodes
# If geoms don't exists this wrapper expects that the geom idx is
# set to -1
cone_mask[i, j] = 0
else:
cone_mask[i, j] = insight(self.unwrapped.sim, agent_geom_id, geom2_id=geom_id)
obs[self.mask_obs_key] = cone_mask
return obs
|
[
"todor.m.markov@gmail.com"
] |
todor.m.markov@gmail.com
|
198a2b5342443d4d2c5667fbb25b0fdb97553de8
|
19a1bad0b1467a7d4b35798a2ec042b193ea845d
|
/fenye_app/models.py
|
4baeaef848101215191a7a3c464cb59530e36409
|
[] |
no_license
|
lzj3278/django_paging
|
0ebf4a615dd8dbad9265c63b277307eea8a0a33f
|
cc3bbee18788b8587664216262193577f4fad29d
|
refs/heads/master
| 2021-05-31T07:51:13.391111
| 2016-04-28T02:30:38
| 2016-04-28T02:30:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 207
|
py
|
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class host(models.Model):
hostname = models.CharField(max_length=20)
ip = models.GenericIPAddressField()
|
[
"lzj3278@126.com"
] |
lzj3278@126.com
|
927f1557347ca959e680b1ba8da5fd9543d4ba5f
|
ec3fa0c10a9168b3f11bc5f26f880b71d5ce0abe
|
/06_04/valid_anagram.py
|
c6fa585f714be9d40d4bc3bd0ae1c62422c5410b
|
[] |
no_license
|
dongheelee1/leetcode_chapters
|
b4a8e986525bfc93999dcd9244f07285202cd078
|
d47c9501e266194aafebdb701068ba88e813fdc5
|
refs/heads/master
| 2020-05-31T13:19:59.581056
| 2019-06-26T01:14:09
| 2019-06-26T01:14:09
| 190,300,198
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 614
|
py
|
'''
Given two strings s and t , write a function to determine if t is an anagram of s.
Example 1:
Input: s = "anagram", t = "nagaram"
Output: true
Example 2:
Input: s = "rat", t = "car"
Output: false
Note:
You may assume the string contains only lowercase alphabets.
Follow up:
What if the inputs contain unicode characters? How would you adapt your solution to such case?
'''
class Solution(object):
def isAnagram(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
if sorted(t) != sorted(s):
return False
return True
|
[
"noreply@github.com"
] |
dongheelee1.noreply@github.com
|
ff724ac851c4f75432657c28af0607296409fb36
|
4ee7f7a15594e3b47c196333c96e0e0a0d6b60a7
|
/product/migrations/0008_auto_20200131_1114.py
|
e8459d0b80d9c4705cbf3d12c5886590205cc2e1
|
[] |
no_license
|
itjuba/Instagram_like_system_django
|
899d4a8bb08152c5941a97d9c09ee61dd3dcc00c
|
b8036eed0bb3238e9e83fecb09b428290fb5d7be
|
refs/heads/master
| 2023-01-04T16:21:46.288913
| 2020-10-31T19:03:08
| 2020-10-31T19:03:08
| 308,952,314
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 558
|
py
|
# Generated by Django 3.0.2 on 2020-01-31 11:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('product', '0007_auto_20200131_1041'),
]
operations = [
migrations.AlterField(
model_name='voted_product',
name='product_id',
field=models.CharField(max_length=20),
),
migrations.AlterField(
model_name='voted_product',
name='user_id',
field=models.CharField(max_length=20),
),
]
|
[
"attignadjib@gmail.com"
] |
attignadjib@gmail.com
|
2393b2366101d347e88229a76744d392c72135ba
|
5681006b96d7dfd32722f635e59423490a50c6ff
|
/optimizee/mnist.py
|
6f943c24b87663a51642d5742ebb5531316ab0bf
|
[
"MIT"
] |
permissive
|
raintung/RNNOPTIMIZE
|
2e963994c7c28c21e482b0c00dc652aa69625828
|
c43c264b277b4a5b45c5dedda945c77955f40002
|
refs/heads/master
| 2021-05-14T05:31:05.606651
| 2018-01-04T06:28:29
| 2018-01-04T06:28:29
| 116,223,287
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,866
|
py
|
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import optimizee
class MnistLinearModel(optimizee.Optimizee):
'''A MLP on dataset MNIST.'''
mnist = None
def __init__(self, activation='sigmoid', n_batches=128, n_h=20, n_l=1, initial_param_scale=0.1):
optimizee.Optimizee.__init__(self)
self.activation = activation
self.n_batches = n_batches
self.n_l = n_l
self.n_h = n_h
self.initial_param_scale = initial_param_scale
if n_l == 0:
self.x_dim = 784 * 10 + 10
else:
self.x_dim = 784 * n_h + n_h + (n_h * n_h + n_h) * (n_l - 1) + n_h * 10 + 10
def build(self):
if MnistLinearModel.mnist == None:
MnistLinearModel.mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
self.mnist = MnistLinearModel.mnist
self.x = tf.placeholder(tf.float32, [None, None, 784])
self.y_ = tf.placeholder(tf.float32, [None, None, 10])
def get_x_dim(self):
return self.x_dim
def get_initial_x(self):
return np.random.normal(size=[self.x_dim], scale=self.initial_param_scale)
def next_internal_feed_dict(self):
return {}
def next_feed_dict(self, n_iterations):
x_data = np.zeros([n_iterations, self.n_batches, 784])
y_data = np.zeros([n_iterations, self.n_batches, 10])
for i in range(n_iterations):
x_data[i], y_data[i] = self.mnist.train.next_batch(self.n_batches)
return { self.x: x_data, self.y_: y_data }
def loss(self, i, x):
self.start_get_weights(x)
if self.n_l > 0:
w1 = self.get_weights([784, self.n_h])
b1 = self.get_weights([self.n_h])
w2 = self.get_weights([self.n_h, 10])
b2 = self.get_weights([10])
wl = [self.get_weights([self.n_h, self.n_h]) for k in range(self.n_l - 1)]
bl = [self.get_weights([self.n_h]) for k in range(self.n_l - 1)]
def act(x):
if self.activation == 'sigmoid':
return tf.sigmoid(x)
elif self.activation == 'relu':
return tf.nn.relu(x)
elif self.activation == 'elu':
return tf.nn.elu(x)
elif self.activation == 'tanh':
return tf.tanh(x)
last = act(tf.matmul(self.x[i], w1) + b1)
for k in range(self.n_l - 1):
last = act(tf.matmul(last, wl[k]) + bl[k])
last = tf.matmul(last, w2) + b2
else:
w = self.get_weights([784, 10])
b = self.get_weights([10])
last = tf.matmul(self.x[i], w) + b
return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=last,labels=self.y_[i]))
|
[
"raintung.li@gmail.com"
] |
raintung.li@gmail.com
|
04184e3d08273a771d2ef481b4866228c3a52782
|
1e8524d11c13863dec62299eaaf271a32352d52a
|
/DZHBL/Data_loan.py
|
8e34ab96a65f20607ba7d398d01645929344a964
|
[] |
no_license
|
20Youk/ChooseStock
|
e461a00f163936c5d110ff92590d856527156a9f
|
0cdd8259df1b6c1464fa6649b0ab3cac1b005456
|
refs/heads/master
| 2021-06-16T17:56:41.618610
| 2019-09-10T13:12:36
| 2019-09-10T13:12:36
| 97,783,717
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,505
|
py
|
# -*- coding:utf-8 -*-
# Author:Youk.Lin
# 应用:查询上个月放款数据汇总
import xlsxwriter
import pymssql
import datetime
from dateutil.relativedelta import relativedelta
def conn_sql(hostname, database, username, password, sql):
conn = pymssql.connect(hostname=hostname, database=database, user=username, password=password, charset='utf8')
cursor = conn.cursor()
cursor.execute(sql)
sqldata = cursor.fetchall()
field = cursor.description
return sqldata, field
if __name__ == '__main__':
host = '***'
db = '***'
user = '***'
pw = '***'
sql1 = '''with t2 as (select t1.DocEntry, convert(decimal(18,2), sum(t1.LineTotal)) as SumLineTotle, convert(decimal(18,2), sum(t1.ALineTotal)) as SumALineTotle
from U_RZQR1 t1 group by t1.DocEntry)
select t1.RZHCode as '融资申请编号',
t.CardName as '融资方名称',
t.BCardName as '买方酒店名称',
case when t1.RzType = 1 then '票前融资' else '票后融资' end as 融资方式,
year(t1.RMoth)+MONTH(t1.RMoth)*0.01 as 融资月份,
convert(date, t.SDate, 120) as '账期开始日期',
convert(date, t.EDate, 120) as '帐期结束日期',
t2.SumLineTotle as 应收款总额,
t2.SumALineTotle as 应放款总额,
convert(date, t.DocDate, 120) as '放款日期'
from U_OPFK1 t, U_RZQR t1 , t2, U_OPFK t3
where t.DocEntry = t3.DocEntry
and t3.DocType = 'F'
and t.QCode = t1.HCode
and t1.DocType = 'Q'
and t1.DocEntry = t2.DocEntry
and convert(date, t.DocDate, 120) >= '%s'
and convert(date, t.DocDate, 120) < '%s'
order by convert(date, t.DocDate, 120) '''
firstDay = datetime.datetime.now()
lastDay = datetime.datetime.now() + relativedelta(months=+1)
changedSql = sql1 % (firstDay.strftime('%Y-%m-%d'), lastDay.strftime('%Y-%m-%d'))
sqlData1, field1 = conn_sql(host, db, user, pw, changedSql)
excelPath = '../file/loan_%s.xlsx' % firstDay.strftime('%Y-%m-%d')
wb = xlsxwriter.Workbook(excelPath)
ws = wb.add_worksheet(u'放款明细')
for j in range(0, len(field1)):
ws.write(0, j, field1[0])
dataList = list(sqlData1)
for i in range(0, len(sqlData1)):
ws.write_row(i + 1, 0, list(sqlData1[i]))
wb.close()
|
[
"30305851+20Youk@users.noreply.github.com"
] |
30305851+20Youk@users.noreply.github.com
|
10bdef883e48465fd5208bd5dfd5abf64aa5d8b9
|
24db6985a016c3e4767c95ca51190e659d0847cd
|
/faustctf2020/cartography/exploit.py
|
5355a1de2a44c1c11717ed054418a5d7d8e3e157
|
[
"MIT"
] |
permissive
|
datajerk/ctf-write-ups
|
463f53db224410a51df481b9e41b7777a09f3e2c
|
c33815911de3f4a66cbafbf5f12d7b57239250d9
|
refs/heads/master
| 2022-09-30T02:29:44.097435
| 2022-09-05T02:16:19
| 2022-09-05T02:16:19
| 204,361,251
| 136
| 36
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,040
|
py
|
#!/usr/bin/python3
from pwn import *
binary = ELF('./cartography')
#libc = ELF('/lib/x86_64-linux-gnu/libc.so.6')
libc = ELF('libc-database/db/libc6_2.28-10_amd64.so')
#p = process(['stdbuf','-i0','-o0','-e0',binary.path])
p = remote('vulnbox', 6666)
# null pointer
p.sendlineafter('>','0')
p.sendlineafter('Enter the sector\'s size:\n',str(2**48 - 1))
# leak libc
p.sendlineafter('> ','2')
p.sendlineafter('Where do you want to read?\n',str(binary.got['puts']))
p.sendlineafter('How much do you want to read?\n','8')
puts = u64(p.recv(8))
log.info('puts: ' + hex(puts))
baselibc = puts - libc.symbols['puts']
log.info('baselibc: ' + hex(baselibc))
libc.address = baselibc
# strtoall -> system
p.sendlineafter('> ','1')
p.sendlineafter('Where do you want to write?',str(binary.got['strtoll']))
p.sendlineafter('How much do you want to write?\n','8')
p.sendlineafter('Enter your sensor data:',p64(libc.symbols['system']))
# get a shell
p.sendlineafter('>','0')
p.sendlineafter('Enter the sector\'s size:\n','/bin/sh')
p.interactive()
|
[
"datajerk@gmail.com"
] |
datajerk@gmail.com
|
b068d7fad32a526c57a8fe0a3220984e33b7bad3
|
896c14380828cdb43df6e1e2905e6fbd948099ad
|
/enhanced_dnc.py
|
c0381fc759bdc12b0d54dc635b5e9388a68a3969
|
[] |
no_license
|
jemisonf/closest_pair_of_points
|
ddecc19f35aee10d9ab4c6fd58012f634dd46d44
|
a2f731cf96a5dd3d24c2674bf9806e75e36aff23
|
refs/heads/master
| 2020-04-19T20:09:18.766821
| 2019-02-04T05:47:01
| 2019-02-04T05:47:01
| 168,407,645
| 2
| 1
| null | 2019-02-03T03:12:58
| 2019-01-30T20:08:05
|
Python
|
UTF-8
|
Python
| false
| false
| 4,006
|
py
|
from math import pow, sqrt
from statistics import median
from sys import argv
from lib import read_inputs, write_outputs
# Computes the closest pairs and the distance between these two points
# Returns a tuple of a float and a list of tuples of tuples representing pairs
# of points
def closest_pair(points_by_x, points_by_y):
# Manually compute closest pair if length is 2 or 3
if len(points_by_x) <= 3:
return simple_closest_pair(points_by_x)
# Compute the median x value
median_x = median(map(lambda it: it[0], points_by_x))
# Split into left and right portions
left_half_by_x = list(filter(lambda it: it[0] <= median_x, points_by_x))
left_half_by_y = list(filter(lambda it: it[0] <= median_x, points_by_y))
right_half_by_x = list(filter(lambda it: it[0] > median_x, points_by_x))
right_half_by_y = list(filter(lambda it: it[0] > median_x, points_by_y))
# Recursively call closest_pair on both halves
closest_left = closest_pair(left_half_by_x, left_half_by_y)
closest_right = closest_pair(right_half_by_x, right_half_by_y)
# Find the minimum distance delta between both sides and use to make middle
# band
delta = min([closest_left[0], closest_right[0]])
middle_band = list(filter(
lambda it: median_x - delta <= it[0] <= median_x + delta,
points_by_y
))
# Sort by y coord and get closest cross pair
#middle_band.sort(key=lambda it: it[1])
closest_cross = closest_cross_pair(middle_band, delta)
# Update minimum distance if smaller distance was found in cross pair
new_min = min(delta, closest_cross[0])
result = (new_min, [])
for section in [closest_left, closest_right, closest_cross]:
if section[0] == new_min:
for pair in section[1]:
if not any(x in result[1] for x in [pair, (pair[1], pair[0])]):
result[1].append(pair)
return result
# Compute closest pair for length 2 or 3
def simple_closest_pair(points):
if len(points) == 2:
result = list()
result.append(tuple(points))
return (compute_distance(points[0], points[1]), result)
if len(points) == 3:
distances = {
(points[0], points[1]): compute_distance(points[0], points[1]),
(points[1], points[2]): compute_distance(points[1], points[2]),
(points[0], points[2]): compute_distance(points[0], points[2])
}
min_distance = min(distances.values())
result = list(filter(
lambda it: distances[it] == min_distance,
distances.keys()
))
return (min_distance, result)
# Compute the closest cross-pair
# points should be sorted by y-coordinate in ascending order
# Returns a tuple with first item being the shortest distance and the second
# item being the list of tuples with distance equal to the shortest distance
def closest_cross_pair(points, delta):
dm = delta
for i in range(len(points) - 1):
j = i + 1
while(j < len(points) and points[j][1] - points[i][1] <= delta):
d = compute_distance(points[0], points[1])
dm = min(d, dm)
j = j + 1
possible_pairs = list((x, y) for x in points for y in points)
closest_pairs = list(filter(
lambda it: compute_distance(it[0], it[1]) <= dm and it[0] != it[1],
possible_pairs
))
final_result = (dm, closest_pairs)
return final_result
# Compute the distance between two points
def compute_distance(point_a, point_b):
return sqrt(
pow(point_b[0] - point_a[0], 2) + pow(point_b[1] - point_a[1], 2)
)
if __name__ == "__main__":
try:
inputs = read_inputs(argv[1])
except IndexError:
exit("No input file was provided")
inputs.sort(key=lambda it: it[0])
inputs_by_y = inputs.copy()
inputs_by_y.sort(key=lambda it: it[1])
result = closest_pair(inputs, inputs_by_y)
write_outputs('enhanceddnc-output.txt', result[0], result[1])
|
[
"gillens@oregonstate.edu"
] |
gillens@oregonstate.edu
|
95a37a55cf991c65aaf322dc714aec9338d55eed
|
d68bfaf211e749e41d4be3abdb8f22bd4d2bec2e
|
/backend/pycoffee/setup.py
|
f84e28d95f67efac867bb4599c8f224f0dd5d540
|
[] |
no_license
|
jbarratt/leancoffee
|
0eab41befb609c3b484baae6fd3201ab0218766e
|
432505f7b9c636ebf5a063ccda764e4e7162adbb
|
refs/heads/master
| 2021-04-30T22:47:18.480823
| 2017-01-01T20:15:03
| 2017-01-01T20:15:03
| 66,486,533
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 465
|
py
|
from setuptools import setup
setup(name='pycoffee',
version=0.3,
description="Python backend for a lean coffee service",
url="http://serialized.net",
author="Joshua Barratt",
author_email="jbarratt@serialized.net",
license="MIT",
packages=["pycoffee"],
install_requires=[
'boto3',
'pynamodb',
],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
zip_safe=False)
|
[
"jbarratt@serialized.net"
] |
jbarratt@serialized.net
|
5a6fe73ab539641de2aea5ec0a3f4c9c19a529db
|
a71906d44e8874b8cebb11077ec4bd98128dc6bb
|
/tunga_profiles/filters.py
|
f7da79dcdcf4f00543c952cd098574b496be196d
|
[] |
no_license
|
mossplix/tunga-api
|
f44a640e4fdf0b9a27baa755ac732cea13887818
|
57bd90fe63d1d328056b068e4edab1bac23dd790
|
refs/heads/master
| 2021-01-21T18:43:05.158363
| 2016-06-14T09:11:56
| 2016-06-14T09:11:56
| 59,886,519
| 0
| 0
| null | 2016-05-28T09:13:23
| 2016-05-28T09:13:22
| null |
UTF-8
|
Python
| false
| false
| 665
|
py
|
from tunga_profiles.models import Education, Work, Connection, SocialLink
from tunga_utils.filters import GenericDateFilterSet
class SocialLinkFilter(GenericDateFilterSet):
class Meta:
model = SocialLink
fields = ('user', 'platform')
class EducationFilter(GenericDateFilterSet):
class Meta:
model = Education
fields = ('institution', 'award')
class WorkFilter(GenericDateFilterSet):
class Meta:
model = Work
fields = ('company', 'position')
class ConnectionFilter(GenericDateFilterSet):
class Meta:
model = Connection
fields = ('from_user', 'to_user', 'accepted', 'responded')
|
[
"tdsemakula@gmail.com"
] |
tdsemakula@gmail.com
|
e707afba2c1b9a8da5d8a9b2a46fce5f52e1cf71
|
757687b6e4aba130cc58db87571aacf833e19f59
|
/road_classify/train.py
|
49596521905584c7b7871b0303e433cd1904a122
|
[] |
no_license
|
jeff012345/ECE5831-Term-Project
|
77cd55d857b0d9ed0c9ef6e58e5465e194d06bd5
|
59c6674caa527f8c20974c97dc66edfc671cef56
|
refs/heads/master
| 2020-08-05T20:26:43.180300
| 2020-02-01T15:25:31
| 2020-02-01T15:25:31
| 212,695,692
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,758
|
py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
import matplotlib.pyplot as plt
import os
from sklearn.model_selection import KFold
import cv2
import seaborn as sns
import numpy as np
import pandas as pd
from datasets import loadData
from model import create_model
import util
##
## setup values
##
## where are the images
data_root_folder = 'F:/ece5831/windows/Ricky/groupings/separated_2class'
#data_root_folder = 'F:/ece5831/windows/Ricky/groupings/full_urban_rural'
## within the folder, what extensions are we looking for
data_images_extension = '*.*'
##
## END setup values
##
## find the class names
class_names = [name for name in os.listdir(data_root_folder) if os.path.isdir(os.path.join(data_root_folder,name))]
print("classes = ", class_names)
## load the data
(train_images, train_labels), (test_images, test_labels) = loadData(data_root_folder, class_names, data_images_extension, even = True)
## create test images output folders
for class_name in class_names:
util.create_dir_or_is_empty(os.path.join("training", "test_" + class_name))
## save the test images
cnt = 0
for img in test_images:
class_name = class_names[test_labels[cnt][0]]
img_file_path = os.path.join(os.path.join("training", "test_" + class_name), str(cnt) + ".bmp")
cv2.imwrite(img_file_path, img)
cnt += 1
## normalize the RGB values
train_images, test_images = train_images / 255.0, test_images / 255.0
## preview the images
#plt.figure(figsize=(10,10))
#for i in range(25):
# plt.subplot(5,5,i+1)
# plt.xticks([])
# plt.yticks([])
# plt.grid(False)
# plt.imshow(train_images[i], cmap=plt.cm.binary)
# plt.xlabel(class_names[train_labels[i][0]])
#plt.show()
## create the model
model = create_model(len(class_names))
#weights = tf.train.latest_checkpoint('F:/ece5831/ECE5831-Term-Project/road_classify/training')
#model.load_weights(weights)
checkpoint_path = "training/cp.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
# Create a callback that saves the model's weights
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
save_weights_only=True,
verbose=1)
history = model.fit(train_images, train_labels, epochs=50,
validation_data=(test_images, test_labels),
callbacks=[cp_callback])
model.save('training/trained_model.h5')
plt.plot(history.history['accuracy'], label='accuracy')
plt.plot(history.history['val_accuracy'], label = 'val_accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.ylim([0.5, 1])
plt.legend(loc='lower right')
plt.show()
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
classes = list(range(0, len(class_names)))
classesDict = {}
i = 0
for c in class_names:
classesDict[c] = i
i+=1
predictions = model.predict(test_images)
## use the max value from array for each prediction
predictions = np.argmax(predictions, axis = 1)
## convert the label to the class
#train_classes = list(map(lambda l: classesDict[l], train_labels))
confusion_matrix = tf.math.confusion_matrix(
test_labels,
predictions
).numpy()
print("confusion_matrix")
print(confusion_matrix)
con_mat_norm = np.around(confusion_matrix.astype('float') / confusion_matrix.sum(axis=1)[:, np.newaxis], decimals=2)
con_mat_df = pd.DataFrame(con_mat_norm,
index = classes,
columns = classes)
figure = plt.figure(figsize=(8, 8))
sns.heatmap(con_mat_df, annot = True, cmap = plt.cm.Blues, square = True)
#plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
|
[
"jtmontic@gmail.com"
] |
jtmontic@gmail.com
|
06ad8b103f41a0397828529c9dc732e632d57175
|
b8e65cdd875bd81c02b2ac6a0dee6fc4366352f1
|
/crawler/ref.py
|
5ec895bc72503cafc3f022292d584f2856c09c66
|
[] |
no_license
|
sonacsn/Bare-Metal-Agentless-Introspection
|
56008b0b502bb6f35c427feb38aa8c21acc2db7d
|
bfdad42081d02eb3f5b57958b25f50ebaaf19b33
|
refs/heads/master
| 2020-03-31T14:15:28.434920
| 2018-10-12T00:56:56
| 2018-10-12T00:56:56
| 152,286,849
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,037
|
py
|
import os
import json
def getVals(data):
if isinstance(data,(list,)):
for x in data:
print(x)
else:
print(data)
obj_initial = {}
obj = {}
not_list = ["x.py", "y.py", "op.json", ".DS_Store", "v.xml"]
for filename in os.listdir(os.getcwd()):
if filename not in not_list:
print("---------------------",filename)
with open(filename) as json_data:
d = json.load(json_data)
obj_initial[filename] = d
if "id" in d.keys():
print("id => ", d["id"])
if "category" in d.keys():
print("category => ", d["category"])
if "summary" in d.keys():
print("summary => ", d["summary"])
if "cveid" in d.keys():
print("cveid => ")
getVals(d["cveid"])
if "fixes" in d.keys():
fixes = d["fixes"]
for f in fixes:
os = f["os"]
packages = f["packages"]
print("packages => ")
getVals(packages)
print("os => ")
getVals(os)
with open('op.json', 'w') as outfile:
json.dump(obj_initial, outfile)
|
[
"noreply@github.com"
] |
sonacsn.noreply@github.com
|
7c20ada96589c63baea34691006f73ae2c608e5c
|
8d62c65b2d07adde3b03e97fd28512bfe7d89188
|
/config/local_setting.py
|
daf5943c4d2f50fcbc66d69c8fba29c3b77a771b
|
[] |
no_license
|
Joedidi/flask_demo
|
00b64ce0fbabcbdfaa39221336140842a53d046a
|
2e859c12c141137235b8e3d820b992a36f8564a8
|
refs/heads/master
| 2022-12-16T16:55:15.902698
| 2019-11-08T09:57:49
| 2019-11-08T09:57:49
| 216,952,832
| 1
| 1
| null | 2022-12-08T06:15:33
| 2019-10-23T02:41:55
|
Python
|
UTF-8
|
Python
| false
| false
| 302
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author: Jamin Chen
@date: 2019/8/16 16:03
@explain:
@file: local_setting.py
"""
DEBUG = True
SQLALCHEMY_ECHO = True
SQLALCHEMY_DATABASE_URI = 'mysql://root:123456@localhost/mysql'
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_ENCODING = 'utf-8'
|
[
"352514951@qq.com"
] |
352514951@qq.com
|
afa2107e98505984228c85111c0c2c800fb6a39c
|
121ab18bfbdb690a3365e84b7da6d1b143fb8768
|
/expression/call_cufflinks_preDE.py
|
590247be81ed98bc549e49eebf9184ca35032740
|
[] |
no_license
|
zy041225/python_bin
|
1661ef120ce22d63f7f8929dfe637bf12e7b31b4
|
4b0219b4a99394b48c3dad19727cbfb2038d2e3e
|
refs/heads/master
| 2020-09-13T15:34:02.145545
| 2019-11-20T02:07:34
| 2019-11-20T02:07:34
| 222,830,326
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,274
|
py
|
#!/usr/bin/python3
def main():
import sys
import os
from index import read_info
if len(sys.argv) != 6:
sys.exit('python3 %s <reads.info> <gff> <hisat2.dir> <outdir> <fa>' % (sys.argv[0]))
info = os.path.abspath(sys.argv[1])
gffFile = os.path.abspath(sys.argv[2])
indir = os.path.abspath(sys.argv[3])
outdir = os.path.abspath(sys.argv[4])
faFile = os.path.abspath(sys.argv[5])
infoDic = read_info(info)
cufflinks = '/share/app/cufflinks-2.2.1/cufflinks'
preDE = '/hwfssz1/ST_DIVERSITY/PUB/USER/zhouyang/software/stringtie-1.3.3b/prepDE.py'
for id in sorted(infoDic):
l = infoDic[id][2]
bamFile = os.path.join(indir, id, id+'.sort.bam')
if not os.path.isfile(bamFile):
sys.stderr.write('%s not found\n' % (bamFile))
else:
outdir1 = os.path.join(outdir, id)
if not os.path.exists(outdir1): os.mkdir(outdir1)
outprefix = os.path.join(outdir, id, id)
print('{0} -p 10 -G {2} -b {3} -u {1} -o {4}; sed \'s/gene_id ""; //\' {4}/transcripts.gtf > {4}/transcripts.mod.gtf; echo -e {6}\'\\t\'{4}/transcripts.mod.gtf > {5}.lst; python {7} -i {5}.lst -l {8} -g {5}.gene_count_matrix.csv -t {5}.transcript_count_matrix.csv'.format(cufflinks, bamFile, gffFile, faFile, outdir1, outprefix, id, preDE, l))
if __name__ == '__main__':
main()
|
[
"zy041225@gmail.com"
] |
zy041225@gmail.com
|
48d3066bfd9e0b0a4c59a4ba94048806daff7e51
|
340085e8a4c7ef228baf77ca43c2b20cb616e4f6
|
/leetcode/tree/main-572.py
|
a1df6a039e30268224fb7a9309cfb8ae4611ec85
|
[] |
no_license
|
saint-yellow/python-pl
|
f9c3e9dcfd9a2fe163ee5e1d14b56cac6a855648
|
e59bd9fcb09968be4f16c1f525c74f68fc23957f
|
refs/heads/master
| 2023-07-14T20:15:54.958632
| 2021-08-29T00:39:53
| 2021-08-29T00:39:53
| 209,847,785
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 978
|
py
|
# LeetCode Problem Nr. 572: 另一棵树的子树
from ..ds import BinaryNode
from typing_extensions import TypeAlias
TreeNode: TypeAlias = BinaryNode
class Solution:
def isSubtree(self, root: TreeNode, subRoot: TreeNode) -> bool:
return self.__method1(root, subRoot)
def __method1(self, root: TreeNode, subRoot: TreeNode) -> bool:
if not root:
return False
if root.val == subRoot.val and self.__isSameTree(root, subRoot):
return True
return self.__method1(root.left, subRoot) or self.__method1(root.right, subRoot)
def __isSameTree(self, p: TreeNode, q: TreeNode) -> bool:
if not (p or q):
return True
if not (p and q):
return False
if p.val != q.val:
return False
comapreLeftTree = self.__isSameTree(p.left, q.left)
comapreRightTree = self.__isSameTree(p.right, q.right)
return comapreLeftTree and comapreRightTree
|
[
"saint-yellow@hotmail.com"
] |
saint-yellow@hotmail.com
|
a85c1f1e8f38446e50224c914b8f06324e7ce873
|
23b7663519eaafc79799bf11cb83111dc4a5d50b
|
/tests/initialisations/test_kkz1994.py
|
8bdc2f8e60a077c1cae9a4e025aedb01e81f110f
|
[
"MIT"
] |
permissive
|
yun-lai-yun-qu/pykmeans
|
9bed671d1827907c4d85418b10ebb85159585893
|
256e0c6c7284182aae9c10783cf50778af120514
|
refs/heads/master
| 2023-07-16T10:25:26.398727
| 2021-08-30T09:11:04
| 2021-08-30T09:11:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,104
|
py
|
"""
Tests for KKZ in1994 initialisation algorithm
"""
import unittest
import numpy as np
from initialisations import kkz
class KKZTestSuite(unittest.TestCase):
"""Test for KKZ 194 initialisation algorithm"""
def setUp(self):
self._load_data()
def test_with_1(self):
"""Sanity check it gets the most extreme point"""
centroids = kkz.generate(self._data, 1)
np.testing.assert_array_equal(centroids, np.array([self._data[1]]))
def test_with_3(self):
"""Then find the furthest nearest point"""
centroids = kkz.generate(self._data, 3)
np.testing.assert_array_equal(centroids,
np.array([self._data[1],
self._data[4],
self._data[0]])
)
def _load_data(self):
self._data = np.array([
[1, 2, 3, 4],
[-100, -200, -100, -124],
[5, 3, 4, 5],
[1, 3, 4, 5],
[10, 20, 10, 30],
])
|
[
"github@pointbeing.net"
] |
github@pointbeing.net
|
8c0650ff8c2b39077921c90cc3a77c47d849f2d0
|
1eba4c314af15cac607fc21e322d2cb1a107bd9f
|
/btbot/feeders/core.py
|
b3533da721406759f79b41248a702d3852d46018
|
[
"MIT"
] |
permissive
|
ultra1971/btbot
|
a97c485f10b92a73104f766abf637059ab7021e1
|
c7ed77beecdae59d2bf72c62e93afa847f60493a
|
refs/heads/master
| 2021-09-23T20:04:51.323022
| 2018-09-27T06:21:02
| 2018-09-27T06:21:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 240
|
py
|
from abc import ABC, abstractmethod
class BaseFeeder(ABC):
@abstractmethod
def get_feed(self, idx=0):
"""Implement the method to get feed at idx"""
@property
def current_feed(self):
return self.get_feed(0)
|
[
"f.j.akimoto@gmail.com"
] |
f.j.akimoto@gmail.com
|
4e145908273fede975cc06b2e337b630d310e3ae
|
9db35644ff1603eef458558fd6a40aaf06356fc1
|
/Feature_extraction/Approximate_entropy.py
|
c873b2137c52481b46049df465cdadf41b27e794
|
[] |
no_license
|
ldcrchao/PycharmProjects
|
0bf83e3d44d96be7bad5dbcc47dfcd8fa729883a
|
a6e8bb4ec211f241aecd4be344d04c14e14f59b3
|
refs/heads/main
| 2023-04-19T15:12:50.070297
| 2021-05-09T08:07:42
| 2021-05-09T08:07:42
| 374,944,409
| 1
| 0
| null | 2021-06-08T08:50:56
| 2021-06-08T08:50:56
| null |
UTF-8
|
Python
| false
| false
| 3,972
|
py
|
#%%
# -*- coding UTF-8 -*-
'''
@Project : python学习工程文件夹
@File : Approximate_entropy.py
@Author : chenbei
@Date : 2020/12/23 13:39
'''
import numpy as np
class BaseApEn(object):
def __init__(self, m, r):
'''
:param m: 子集的大小,int
:param r: 阀值基数,0.1---0.2
'''
self.m = m
self.r = r
@staticmethod
def _maxdist(x_i, x_j):
return np.max([np.abs(np.array(x_i) - np.array(x_j))])
@staticmethod
def _biaozhuncha(U):
if not isinstance(U, np.ndarray):
U = np.array(U)
return np.std(U, ddof=1)
class NewBaseApen(object):
"""新算法基类"""
@staticmethod
def _get_array_zeros(x):
"""
创建N*N的0矩阵
:param U:
:return:
"""
N = np.size(x, 0)
return np.zeros((N, N), dtype=int)
@staticmethod
def _get_c(z, m):
"""
计算熵值的算法
:param z:
:param m:
:return:
"""
N = len(z[0])
# 概率矩阵C计算
c = np.zeros((1, N - m + 1))
if m == 2:
for j in range(N - m + 1):
for i in range(N - m + 1):
c[0, j] += z[j, i] & z[j + 1, i + 1]
if m == 3:
for j in range(N - m + 1):
for i in range(N - m + 1):
c[0, j] += z[j, i] & z[j + 1, i + 1] & z[j + 2, i + 2]
if m != 2 and m != 3:
raise AttributeError('m的取值不正确!')
data = list(filter(lambda x:x, c[0]/(N - m + 1.0)))
if not all(data):
return 0
return np.sum(np.log(data)) / (N - m + 1.0)
class ApEn(BaseApEn):
def biaozhunhua(self, U):
self.me = np.mean(U)
self.biao = self._biaozhuncha(U)
return np.array([(x - self.me) / self.biao for x in U])
def _dazhi(self, U):
if not hasattr(self, "f"):
self.f = self._biaozhuncha(U) * self.r
return self.f
def _phi(self, m, U):
# 获取矢量列表
x = [U[i:i + m] for i in range(len(U) - m + 1)]
# 获取所有的比值列表
C = [len([1 for x_j in x if self._maxdist(x_i, x_j) <= self._dazhi(U)]) / (len(U) - m + 1.0) for x_i in x]
# 计算熵
return np.sum(np.log(list(filter(lambda a: a, C)))) / (len(U) - m + 1.0)
def _phi_b(self, m, U):
# 获取矢量列表
x = [U[i:i + m] for i in range(len(U) - m + 1)]
# 获取所有的比值列表
C = [len([1 for x_j in x if self._maxdist(x_i, x_j) <= self.r]) / (len(U) - m + 1.0) for x_i in x]
# 计算熵
return np.sum(np.log(list(filter(lambda x: x, C)))) / (len(U) - m + 1.0)
def jinshishang(self, U):
return np.abs(self._phi(self.m + 1, U) - self._phi(self.m, U))
def jinshishangbiao(self, U):
eeg = self._biaozhunhua(U)
return np.abs(self._phi_b(self.m + 1, eeg) - self._phi_b(self.m, eeg))
class NewApEn(ApEn, NewBaseApen):
"""
洪波等人提出的快速实用算法计算近似熵
"""
def _get_distance_array(self, U):
"""
获取距离矩阵
:param U:
:return:
"""
z = self._get_array_zeros(U)
fa = self._dazhi(U)
for i in range(len(z[0])):
z[i, :] = (np.abs(U - U[i]) <= fa) + 0
return z
def _get_shang(self, m, U):
"""
计算熵值
:param U:
:return:
"""
# 获取距离矩阵
Z = self._get_distance_array(U)
return self._get_c(Z, m)
def hongbo_jinshishang(self, U):
"""
计算近似熵
:param U:
:return:
"""
return np.abs(self._get_shang(self.m + 1, U) - self._get_shang(self.m, U))
if __name__ == "__main__":
U = np.array([2, 4, 6, 8, 10] * 17)
ap = ApEn(2, 0.2)
AEntropy = ap.jinshishang(U) # 计算近似熵 Approximate entropy
|
[
"noreply@github.com"
] |
ldcrchao.noreply@github.com
|
4918019576d1bb2c764d597fb23d1b9b1b0770d4
|
f02d13defa83b17585563d12699713f4f83a1a3f
|
/farm.py
|
825dc597dc5df7af8fa6540b2f8bdc66acde7fe1
|
[] |
no_license
|
Priya-Pathak/Demo_Project
|
c5089107b9d2fc60650560e3c51f605680d7ba87
|
f9076bcbf378ce88f442a8e5338febfaa8a26e66
|
refs/heads/master
| 2023-02-20T15:54:21.079395
| 2021-01-25T17:30:30
| 2021-01-25T17:30:30
| 332,827,282
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 865
|
py
|
import requests
#import urllib2
import re
from bs4 import BeautifulSoup
html_page = requests.get("http://imgur.com")
# #page = requests.get("https://www.amazon.in/events/greatindiansale?ext_vrnc=hi&tag=googhydrabk-21&ascsubtag=_k_CjwKCAiAxp-ABhALEiwAXm6IyVxEEsg6-nyCVzhHOjbhCXbJ99UiPVe3cR2fqubVZtibSpNnat57zRoCyVUQAvD_BwE_k_&ext_vrnc=hi&gclid=CjwKCAiAxp-ABhALEiwAXm6IyVxEEsg6-nyCVzhHOjbhCXbJ99UiPVe3cR2fqubVZtibSpNnat57zRoCyVUQAvD_BwE")
# print(page.status_code)
# contents = page.content
# soup = BeautifulSoup(contents, 'html.parser')
# results = soup.find_all('img')
# for result in results :
# if len(result.attrs) == 1 :
# print(result)
# print('this is running')
# html_page = urllib2.urlopen("http://imgur.com")
soup = BeautifulSoup(html_page)
images = []
for img in soup.findAll('img'):
images.append(img.get('src'))
print(images)
|
[
"priyapathak24.07@gmail.com"
] |
priyapathak24.07@gmail.com
|
0fc13596f5a832149d9950e11fa583228c461fea
|
0d44d7bab621065fca2547eb384cc80aba8e828e
|
/src/unitest/sparse_matrix_test.py
|
9d24822efa91b5dc3fd6b3b1d4665303bc54db31
|
[] |
no_license
|
tttthomasssss/dissect-py3
|
dc823029ca971d8ff57e98f253e1be902a1be244
|
7b5eae7bfd7e7914ba0ecffcf5e36088c5cf66a5
|
refs/heads/master
| 2020-04-01T09:32:14.219686
| 2018-10-15T08:33:51
| 2018-10-15T08:33:51
| 153,079,099
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,176
|
py
|
'''
Created on Sep 18, 2012
@author: Georgiana Dinu, Pham The Nghia
'''
import unittest
import numpy as np
import numpy.testing
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse.sputils import isintlike
from composes.matrix.sparse_matrix import SparseMatrix
from composes.matrix.dense_matrix import DenseMatrix
class TestSparseMatrix(unittest.TestCase):
def setUp(self):
self.a = np.array([[1,2,3],[4,0,5]])
self.b = np.array([[0,0,0],[0,0,0]])
self.c = np.array([[0,0],[0,0],[0,0]])
self.d = np.array([[1,0],[0,1]])
self.e = np.array([1,10])
self.f = np.array([1,10,100])
self.matrix_a = SparseMatrix(self.a)
self.matrix_b = SparseMatrix(self.b)
self.matrix_c = SparseMatrix(self.c)
self.matrix_d = SparseMatrix(self.d)
def tearDown(self):
pass
def test_reshape(self):
test_cases = [(self.matrix_a, (1,6), self.a.reshape((1,6))),
(self.matrix_a, (3,2), self.a.reshape((3,2))),
(self.matrix_b, (1,6), self.b.reshape((1,6))),
(self.matrix_b, (6,1), self.b.reshape((6,1))),
(self.matrix_b, (2,3), self.b.reshape((2,3))),
]
for mat, shape, expected in test_cases:
mat.reshape(shape)
np.testing.assert_array_equal(mat.mat.todense(), expected)
self.assertTupleEqual(shape, mat.shape)
def test_reshape_raises(self):
test_cases = [(3,0), (3,3), 3, (3,3,3), ("3","5"), (2,"4")]
for shape in test_cases:
self.assertRaises(ValueError, self.matrix_a.reshape, shape)
def test_init(self):
nparr = self.a
test_cases = [nparr,
np.mat(nparr),
csr_matrix(nparr),
csc_matrix(nparr),
DenseMatrix(nparr)]
for inmat in test_cases:
outmat = SparseMatrix(inmat)
self.assertIsInstance(outmat.mat, csr_matrix)
numpy.testing.assert_array_equal(nparr,
np.array(outmat.mat.todense()))
def test_add(self):
test_cases = [(self.matrix_a, self.matrix_a, np.mat([[2,4,6],[8,0,10]])),
(self.matrix_a, self.matrix_b, np.mat(self.a))
]
for (term1, term2, expected) in test_cases:
sum_ = term1 + term2
numpy.testing.assert_array_equal(sum_.mat.todense(), expected)
self.assertIsInstance(sum_, type(term1))
def test_add_raises(self):
test_cases = [(self.matrix_a, self.a),
(self.matrix_a, DenseMatrix(self.a))]
for (term1, term2) in test_cases:
self.assertRaises(TypeError, term1.__add__, term2)
def test_mul(self):
test_cases = [(self.matrix_a, self.matrix_c, np.mat([[0,0],[0,0]])),
(self.matrix_d, self.matrix_a, self.matrix_a.mat.todense()),
(self.matrix_a, 2, np.mat([[2,4,6],[8,0,10]])),
(self.matrix_a, np.int64(2), np.mat([[2,4,6],[8,0,10]]))
]
for (term1, term2, expected) in test_cases:
sum_ = term1 * term2
numpy.testing.assert_array_equal(sum_.mat.todense(), expected)
self.assertIsInstance(sum_, type(term1))
def test_mul_raises(self):
test_cases = [(self.matrix_a, self.a),
(self.matrix_a, DenseMatrix(self.a)),
(self.matrix_a, "3")]
for (term1, term2) in test_cases:
self.assertRaises(TypeError, term1.__mul__, term2)
def test_get_item(self):
out_mat = SparseMatrix(self.a)[0,:]
np.testing.assert_array_equal(out_mat.mat.todense(),np.mat(self.a[0,:]))
out_int = SparseMatrix(self.a)[0,1]
self.assertEqual(out_int, 2)
out_mat = SparseMatrix(self.a)[0,1:2]
np.testing.assert_array_equal(out_mat.mat.todense(),np.mat(self.a[0,1:2]))
out_mat = SparseMatrix(self.a)[0]
np.testing.assert_array_equal(out_mat.mat.todense(),np.mat(self.a[0,:]))
def test_scale_rows(self):
outcome = np.mat([[1,2,3],[40,0,50]])
test_cases = [(self.matrix_a.copy(), self.e, outcome),
(self.matrix_a.copy(), np.mat(self.e).T, outcome),
]
for (term1, term2, expected) in test_cases:
term1 = term1.scale_rows(term2)
numpy.testing.assert_array_equal(term1.mat.todense(), expected)
def test_scale_columns(self):
test_cases = [(self.matrix_a.copy(), self.f, np.mat([[1,20,300],[4,0,500]]))]
for (term1, term2, expected) in test_cases:
term1 = term1.scale_columns(term2)
numpy.testing.assert_array_equal(term1.mat.todense(), expected)
self.assertIsInstance(term1.mat, csr_matrix)
def test_scale_raises(self):
test_cases = [(self.matrix_a, self.f, ValueError, self.matrix_a.scale_rows),
(self.matrix_a, self.e, ValueError, self.matrix_a.scale_columns),
(self.matrix_a, self.b, ValueError, self.matrix_a.scale_rows),
(self.matrix_a, self.b, ValueError, self.matrix_a.scale_columns),
(self.matrix_a, "3", TypeError, self.matrix_a.scale_rows),
]
for (term1, term2, error_type, function) in test_cases:
self.assertRaises(error_type, function, term2)
def test_plog(self):
m = SparseMatrix(np.mat([[0.5,1.0,1.5],[2.0,0.0,2.5]]))
m_expected = np.mat([[0.,0.,0.4054],[ 0.6931,0.,0.9162]])
a_expected = np.mat([[0.,0.6931,1.0986],[1.3862,0.,1.6094]])
test_cases = [(self.matrix_a.copy(), a_expected),
(m, m_expected)
]
for (term, expected) in test_cases:
term.plog()
numpy.testing.assert_array_almost_equal(term.mat.todense(), expected, 3)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
[
"th0mas.ko6er@gmail.com"
] |
th0mas.ko6er@gmail.com
|
cb481845e9c1ef0a92553d65ed4dce425109b17e
|
232e1fdcf311744f9d186be4bbf874aeea290e24
|
/Assignment1/Solution/q2.py
|
036a2045e612a7145afc1053349e9a05e726812c
|
[] |
no_license
|
LulwahAlkwai/cs595-f13
|
43ab0e1539643dedb48ebae9758d797be96474ab
|
8840d8b7da7747eb944c826c7bf28372e1dce3ba
|
refs/heads/master
| 2020-12-24T10:14:46.768979
| 2013-12-13T15:04:21
| 2013-12-13T15:04:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,813
|
py
|
#cs595
#Assignment1-q2
#Lulwah Alkwai
#!/usr/bin/python
import sys
import time
import urllib2
import BeautifulSoup
if len(sys.argv) == 4:
InputTeamName=sys.argv[1]
TimeInSeconds=int(sys.argv[2])
URI=sys.argv[3]
#Handling exception
#Arguments not correctly inputed
#using default input
else:
print('***********************************************************************************************')
print('Please try again by entering: q2.py "TeamName" TimeInSeconds "URI"')
print(' Default:')
print(' Team Name: Old Dominion')
print(' Time In Seconds: 60 ')
print(' URI: http://scores.espn.go.com/ncf/scoreboard?confId=80&seasonYear=2013&seasonType=2&weekNumber=2')
print('***********************************************************************************************')
InputTeamName="Old Dominion"
TimeInSeconds=60
URI="http://scores.espn.go.com/ncf/scoreboard?confId=80&seasonYear=2013&seasonType=2&weekNumber=2"
condition = True
while condition:
redditFile = urllib2.urlopen(URI)
redditHtml = redditFile.read()
redditFile.close();
soup = BeautifulSoup.BeautifulSoup(redditHtml)
for allteams in soup.findAll('a',{'title':InputTeamName}):
game = allteams.findParent().findParent().findParent().findParent().findParent()
Team1 = game.find('div', {"class":"team home"})
Team1Name = Team1.find('p', {"class":"team-name"}).find('a').string
Team1Points = Team1.find('li', {"class":"final"}).string
Team2 = game.find('div', {"class":"team visitor"})
Team2Name = Team2.find('p', {"class":"team-name"}).find('a').string
Team2Points = Team2.find('li', {"class":"final"}).string
print(Team1Name + ' ' + Team1Points)
print(Team2Name + ' ' + Team2Points)
print('. . .')
time.sleep(TimeInSeconds)
|
[
"lalkwai@cs.odu.edu"
] |
lalkwai@cs.odu.edu
|
e00d307da9da567025a35c28735648a6e27b20c8
|
fcc88521f63a3c22c81a9242ae3b203f2ea888fd
|
/Python3/0706-Design-HashMap/soln.py
|
e21d3dfd6923092dc9aed5123cf0f59efdc39a4a
|
[
"MIT"
] |
permissive
|
wyaadarsh/LeetCode-Solutions
|
b5963e3427aa547d485d3a2cb24e6cedc72804fd
|
3719f5cb059eefd66b83eb8ae990652f4b7fd124
|
refs/heads/master
| 2022-12-06T15:50:37.930987
| 2020-08-30T15:49:27
| 2020-08-30T15:49:27
| 291,811,790
| 0
| 1
|
MIT
| 2020-08-31T19:57:35
| 2020-08-31T19:57:34
| null |
UTF-8
|
Python
| false
| false
| 1,451
|
py
|
class MyHashMap:
def __init__(self):
"""
Initialize your data structure here.
"""
self.size = 1337
self.lst = [[] for _ in range(self.size)]
def put(self, key, value):
"""
value will always be non-negative.
:type key: int
:type value: int
:rtype: void
"""
pos = key % self.size
for pair in self.lst[pos]:
if pair[0] == key:
pair[1] = value
break
else:
self.lst[pos].append([key, value])
def get(self, key):
"""
Returns the value to which the specified key is mapped, or -1 if this map contains no mapping for the key
:type key: int
:rtype: int
"""
pos = key % self.size
for k, v in self.lst[pos]:
if k == key:
return v
return -1
def remove(self, key):
"""
Removes the mapping of the specified value key if this map contains a mapping for the key
:type key: int
:rtype: void
"""
pos = key % self.size
for i, (k, v) in enumerate(self.lst[pos]):
if k == key:
del self.lst[pos][i]
break
# Your MyHashMap object will be instantiated and called as such:
# obj = MyHashMap()
# obj.put(key,value)
# param_2 = obj.get(key)
# obj.remove(key)
|
[
"zhang623@wisc.edu"
] |
zhang623@wisc.edu
|
a101187eb7a6666ef2d639900235fd12398cddc2
|
7478faa51668132406e313e2cdfdebbc4c698dbe
|
/data_utils.py
|
e707c753a750f5a3554bd0f0a31958829faeb330
|
[] |
no_license
|
lxk00/KD-NAS-EMD
|
d339c037d13702f33c37e7a020e5ccba754f3a28
|
5426d237a71590db398a31663758cd277adb45d1
|
refs/heads/main
| 2023-04-10T09:19:07.232451
| 2021-04-26T13:07:54
| 2021-04-26T13:07:54
| 361,752,661
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,646
|
py
|
from torch.utils.data import TensorDataset
import torch
import numpy as np
import random
from torch.utils.data.sampler import RandomSampler, SubsetRandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
import os
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id, seq_length=None, guid=None):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.seq_length = seq_length
self.label_id = label_id
self.guid = guid
def get_tensor_data(output_mode, features, ):
if output_mode == "classification":
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
else:
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.float)
all_seq_lengths = torch.tensor([f.seq_length for f in features], dtype=torch.long)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
s_ids = [f.guid for f in features]
tensor_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids,
all_seq_lengths)
return tensor_data, s_ids
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, output_mode, is_master=True):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0 and is_master:
print("Writing example %d of %d" % (ex_index, len(examples)))
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
seq_length = len(input_ids)
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if output_mode == "classification":
label_id = label_map[example.label]
elif output_mode == "regression":
label_id = float(example.label)
else:
raise KeyError(output_mode)
if ex_index == 0 and is_master:
print("*** Example ***")
print("guid: %s" % (example.guid))
print("tokens: %s" % " ".join([str(x) for x in tokens]))
print("input_ids: %s" % " ".join([str(x) for x in input_ids]))
print("input_mask: %s" % " ".join([str(x) for x in input_mask]))
print("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
print("label: {}".format(example.label))
print("label_id: {}".format(label_id))
features.append(
InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
seq_length=seq_length,
guid=example.guid))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def load_glue_dataset(config):
from bert_fineturn.data_processor.glue import glue_processors as processors
from bert_fineturn.data_processor.glue import glue_output_modes as output_modes
from transformers import BertConfig, BertTokenizer
task_name = config.datasets
config.is_master = True
config.multi_gpu = False
processor = processors[task_name.lower()]()
output_mode = output_modes[task_name.lower()]
label_list = processor.get_labels()
if output_mode == 'classification':
n_classes = len(label_list)
else:
n_classes = 1
sids = dict()
tokenizer = BertTokenizer.from_pretrained('teacher_utils/bert_base_uncased', do_lower_case=True)
train_examples = processor.get_train_examples(config.data_src_path)
train_features = convert_examples_to_features(train_examples, label_list,
config.max_seq_length, tokenizer,
output_mode, config.is_master)
train_data, train_sids = get_tensor_data(output_mode, train_features)
eval_examples = processor.get_dev_examples(config.data_src_path)
eval_features = convert_examples_to_features(eval_examples, label_list,
config.max_seq_length, tokenizer,
output_mode, config.is_master)
eval_data, eval_sids = get_tensor_data(output_mode, eval_features)
test_examples = processor.get_test_examples(config.data_src_path)
test_features = convert_examples_to_features(test_examples, label_list,
config.max_seq_length, tokenizer,
output_mode, config.is_master)
test_data, test_sids = get_tensor_data(output_mode, test_features)
train_eval_data, _ = get_tensor_data(output_mode, eval_features)
if not config.multi_gpu:
train_sampler = RandomSampler(train_data)
train_eval_sampler = RandomSampler(train_eval_data)
else:
train_sampler = DistributedSampler(train_data)
train_eval_sampler = DistributedSampler(train_eval_data)
eval_sampler = SequentialSampler(eval_data)
test_sampler = SequentialSampler(test_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=config.batch_size)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=config.batch_size)
train_eval_dataloader = DataLoader(train_eval_data, sampler=train_eval_sampler, batch_size=config.batch_size)
test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=config.batch_size)
bert_config = BertConfig.from_pretrained("teacher_utils/bert_base_uncased/config.json")
config.bert_config = bert_config
sids = {"train": train_sids, "test": test_sids, "dev": eval_sids}
return train_dataloader, train_eval_dataloader, eval_dataloader, test_dataloader, output_mode, n_classes, config, sids
from torch.utils.data.sampler import Sampler
class OrderdedSampler(Sampler):
def __init__(self, dataset, order):
self._dataset = dataset
self._train_data_list = order
self._train_data_list
def __len__(self):
return len(self._dataset)
def __iter__(self):
random.shuffle(self._train_data_list)
for index in self._train_data_list:
yield self._dataset[index]
def check_data_vaild(data1, data2):
# data1, data2 = next(iter(data1)), next(iter(data2))
def pad_replace(x):
x = np.array(x)
pad_mask = np.array([not(i == '[PAD]' or i == "<pad>") for i in x])
new_x = x[pad_mask].tolist() + [f'[PAD] * { - sum(pad_mask - 1)}']
return new_x
def mask_replace(x):
t = sum(x)
new_x = f"1 * {t}, 0 * {len(x) - t}"
return new_x
with open('/data/lxk/NLP/github/darts-KD/data/MRPC-nas/embedding/vocab.txt') as f:
vocab1 = {i:x.strip() for i, x in enumerate(f.readlines())}
with open('/data/lxk/NLP/github/darts-KD/teacher_utils/teacher_model/MRPC/vocab.txt') as f:
vocab2 = {i:x.strip() for i, x in enumerate(f.readlines())}
sent_words = torch.split(data1[0], 1, dim=1)
sent_words = [torch.squeeze(x, dim=1) for x in sent_words]
mask = [x.ne(0) for x in sent_words]
if len(mask) > 1:
mask = torch.logical_or(mask[0], mask[1])
else:
mask = mask[0]
print("SENT1:", pad_replace([vocab1[x.item()] for x in data1[0][0][0]]))
if data1[0].shape[1] == 2:
print("SENT2:", pad_replace([vocab1[x.item()] for x in data1[0][0][1]]))
print("MASK:", mask_replace(mask[0]))
print("LABEL:", data1[2][0].item())
input_ids, input_mask, segment_ids, label_ids, seq_lengths = data2
print("TEACHER SENT:", pad_replace([vocab2[x.item()] for x in input_ids[0]]))
print("TEACHER MASK", mask_replace(input_mask[0]))
print("TEACHER LABEL", label_ids[0].item())
class RandomSamplerByOrder(Sampler):
r"""Samples elements randomly. If without replacement, then sample from a shuffled dataset.
If with replacement, then user can specify :attr:`num_samples` to draw.
Arguments:
data_source (Dataset): dataset to sample from
replacement (bool): samples are drawn with replacement if ``True``, default=``False``
num_samples (int): number of samples to draw, default=`len(dataset)`. This argument
is supposed to be specified only when `replacement` is ``True``.
"""
def __init__(self, data_source, replacement=False, num_samples=None):
self.data_source = data_source
self.replacement = replacement
self._num_samples = num_samples
if not isinstance(self.replacement, bool):
raise ValueError("replacement should be a boolean value, but got "
"replacement={}".format(self.replacement))
if self._num_samples is not None and not replacement:
raise ValueError("With replacement=False, num_samples should not be specified, "
"since a random permute will be performed.")
if not isinstance(self.num_samples, int) or self.num_samples <= 0:
raise ValueError("num_samples should be a positive integer "
"value, but got num_samples={}".format(self.num_samples))
@property
def num_samples(self):
# dataset size might change at runtime
if self._num_samples is None:
return len(self.data_source)
return self._num_samples
def __iter__(self):
n = len(self.data_source)
if self.replacement:
return iter(torch.randint(high=n, size=(self.num_samples,), dtype=torch.int64).tolist())
return iter(torch.randperm(n).tolist())
def __len__(self):
return self.num_samples
def bert_batch_split(data, rank, device=None):
if device == torch.device('cpu'):
data = [x for x in data]
else:
data = [x.to(f"cuda:{rank}", non_blocking=True) for x in data]
input_ids, input_mask, segment_ids, label_ids, seq_lengths = data
X = [input_ids, input_mask, segment_ids, seq_lengths]
Y = label_ids
return X, Y
|
[
"noreply@github.com"
] |
lxk00.noreply@github.com
|
bc0cea1eb0c490b4a90b599b03211f8ccadbb334
|
8fd35da0ce8cf4f993a6847d115a650df408f5c9
|
/src/Analysis_1_mass1013.py
|
f823fef8d50fcbe552fc16d370eb62106b746b95
|
[] |
no_license
|
Fernandez-Trincado/Bullet_Groups-2014
|
c8e3f49b7f71cdde9509ba31cfe27bd76ca95b07
|
b414dcdcae8404e5ce663bed2c64f57c2090d5d3
|
refs/heads/master
| 2020-06-07T12:35:19.942900
| 2015-11-08T13:23:53
| 2015-11-08T13:23:53
| 17,151,556
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,685
|
py
|
#!/usr/bin/python
#Bullet Cluster and Bullet Groups with Jaime Forero at Universidad de Los Andes, Bogota - Colombia.
import numpy as np
import scipy as sc
import pylab as plt
data=sc.genfromtxt('MassiveCatshortV.0416_mass_1013.DAT')
mask1013=data[:,6]<1E14
mask1014=data[:,6]>1E14
f=plt.figure(1)
ax1=f.add_subplot(1,2,1)
ax2=f.add_subplot(1,2,2)
N, N13=float(len(data[mask1013,15])), float(len(data[mask1014,15]))
Y_axis=1.-(np.arange(N))/N
Y_axis13=1.-(np.arange(N13))/N13
#figure 1
data_ord1, data_ord13=np.argsort(data[mask1013,15]), np.argsort(data[mask1014,15])
Xoff,Xoff13=np.array([]), np.array([])
for i in np.arange(len(data_ord1)): Xoff=np.append(Xoff,data[mask1013,15][data_ord1[i]])
for k in np.arange(len(data_ord13)): Xoff13=np.append(Xoff13,data[mask1014,15][data_ord13[k]])
ax1.plot(Xoff,Y_axis,color='gray',label=r'$M>10^{13} M_{\odot}$')
ax1.plot(Xoff13,Y_axis13,color='red',label=r'$M>10^{14} M_{\odot}$')
ax1.set_xlabel(r'X$_{off}$')
ax1.set_ylabel(r'P(>X$_{off}$)')
ax1.set_yscale('log')
#figure2
data_ord2, data_ord13_2=np.argsort(data[mask1013,8]), np.argsort(data[mask1014,8])
Rvirial, Rvirial13=np.array([]), np.array([])
for i in np.arange(len(data_ord2)): Rvirial=np.append(Rvirial,data[mask1013,8][data_ord2[i]])
for k in np.arange(len(data_ord13_2)): Rvirial13=np.append(Rvirial13,data[mask1014,8][data_ord13_2[k]])
ax2.plot(Xoff*Rvirial/0.6777,Y_axis,color='gray',label=r'$10^{13} M_{\odot}<M<10^{14} M_{\odot}$')
ax2.plot(Xoff13*Rvirial13/0.6777,Y_axis13,color='red',label=r'$M>10^{14} M_{\odot}$')
ax2.set_xlabel(r'd [kpc]')
ax2.set_ylabel(r'P(>d)')
ax2.set_yscale('log')
plt.legend(loc=1,numpoints=1,fontsize='large')
plt.show()
|
[
"jfernandezt87@gmail.com"
] |
jfernandezt87@gmail.com
|
1d4ca7f8a7effb45b21224c271eaa445e0746341
|
dfb67fdaaa5db5dbb62c653cb49558c53ae5289f
|
/demo2/venv/test.py
|
29323b89876d40db257606fa3865acf54eb2b19f
|
[] |
no_license
|
franklinmf16/Python
|
97f24cda19a080de34ea2c2874df584911b10fda
|
4f72a77a86097ca0df4253f811dc2a57ffbb19aa
|
refs/heads/master
| 2020-03-25T08:29:34.692373
| 2018-08-06T02:27:08
| 2018-08-06T02:27:08
| 143,615,269
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
filename = 'you_can.txt'
print('\n replace')
with open(filename) as file_object:
list = file_object.readlines()
for message in list:
message = message.replace('Python', 'C')
print(message.strip())
|
[
"boxianmo@Boxians-MacBook-Pro.local"
] |
boxianmo@Boxians-MacBook-Pro.local
|
c36475c03b3c82b403e44b5b09971ac66b693c2a
|
a225fed17f1a2dd5ec224651615018df3d1d9644
|
/src/crawler/pymining/test.naive_bayes_train_test.py
|
48674c888c10cd3c979d71731d4358c78af1478e
|
[] |
no_license
|
vaneeylee/0f523140-f3b3-4653-89b0-eb08c39940ad
|
533e27363ec6e7f88680761aef678e6e9ceef9b0
|
6b354617ea416a357df78097ab3a5ef72c3b0b00
|
refs/heads/master
| 2021-12-07T11:59:33.207076
| 2011-05-04T13:01:54
| 2011-05-04T13:01:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 901
|
py
|
from matrix import Matrix
from classifier_matrix import ClassifierMatrix
from segmenter import Segmenter
from py_mining import PyMining
from configuration import Configuration
from chisquare_filter import ChiSquareFilter
from naive_bayes import NaiveBayes
if __name__ == "__main__":
config = Configuration.FromFile("conf/test.xml")
PyMining.Init(config, "__global__")
matCreater = ClassifierMatrix(config, "__matrix__")
[trainx, trainy] = matCreater.CreateTrainMatrix("data/sogou.train.txt")
chiFilter = ChiSquareFilter(config, "__filter__")
chiFilter.TrainFilter(trainx, trainy)
nbModel = NaiveBayes(config, "naive_bayes")
nbModel.Train(trainx, trainy)
[testx, testy] = matCreater.CreatePredictMatrix("data/sogou.test.txt")
[testx, testy] = chiFilter.MatrixFilter(testx, testy)
[resultY, precision] = nbModel.Test(testx, testy)
print precision
|
[
"felixcat.gng@gmail.com"
] |
felixcat.gng@gmail.com
|
153eda9d5fa5013d2f878bf59e3fb6cdd746b50f
|
1f8d81fe50369f4b3d4df96d2da01a7dd404f843
|
/AppliedAI.py
|
99299f02d4db6df65f9903bcf9296b6bebbfee15
|
[] |
no_license
|
vidhyasagar13/kaggle_building_energy_consumption
|
c463e91253041f923429bd7ba5a27e0f6df325db
|
ffcc062d12ada84b78495875819e1b275d507a11
|
refs/heads/master
| 2022-11-12T09:13:59.690738
| 2020-06-26T05:30:52
| 2020-06-26T05:30:52
| 275,080,737
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,498
|
py
|
import os
import numpy as np
import pandas as pd
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
import seaborn as sns
from time import time
import datetime
import gc
from sklearn.model_selection import train_test_split,KFold
from sklearn.preprocessing import LabelEncoder
import lightgbm as lgb
pd.set_option('display.max_columns',100)
pd.set_option('display.max_rows',1500)
pd.set_option('display.float_format', lambda x: '%.5f' % x)
metadata_dtype = {'site_id':"uint8",'building_id':'uint16','square_feet':'float32','year_built':'float32','floor_count':"float16"}
metadata = pd.read_csv("building_metadata.csv",dtype=metadata_dtype)
metadata.info(memory_usage='deep')
weather_dtype = {"site_id":"uint8",'air_temperature':"float16",'cloud_coverage':"float16",'dew_temperature':"float16",'precip_depth_1_hr':"float16",
'sea_level_pressure':"float32",'wind_direction':"float16",'wind_speed':"float16"}
weather_train = pd.read_csv("weather_train.csv",parse_dates=['timestamp'],dtype=weather_dtype)
weather_test = pd.read_csv("weather_test.csv",parse_dates=['timestamp'],dtype=weather_dtype)
print (weather_train.info(memory_usage='deep'))
print (weather_test.info(memory_usage='deep'))
train_dtype = {'meter':"uint8",'building_id':'uint16','meter_reading':"float32"}
train = pd.read_csv("train.csv",parse_dates=['timestamp'],dtype=train_dtype)
test_dtype = {'meter':"uint8",'building_id':'uint16'}
test_cols_to_read = ['building_id','meter','timestamp']
test = pd.read_csv("test.csv",parse_dates=['timestamp'],usecols=test_cols_to_read,dtype=test_dtype)
Submission = pd.DataFrame(test.index,columns=['row_id'])
train.head()
test.head()
metadata.head()
weather_train.head()
weather_test.head()
missing_weather = pd.DataFrame(weather_train.isna().sum()/len(weather_train),columns=["Weather_Train_Missing_Pct"])
missing_weather["Weather_Test_Missing_Pct"] = weather_test.isna().sum()/len(weather_test)
metadata.isna().sum()/len(metadata)
metadata['floor_count_isNa'] = metadata['floor_count'].isna().astype('uint8')
metadata['year_built_isNa'] = metadata['year_built'].isna().astype('uint8')
# Dropping floor_count variable as it has 75% missing values
metadata.drop('floor_count',axis=1,inplace=True)
missing_train_test = pd.DataFrame(train.isna().sum()/len(train),columns=["Missing_Pct_Train"])
missing_train_test["Missing_Pct_Test"] = test.isna().sum()/len(test)
missing_train_test
dataset.fillna(dataset.mean(), inplace=True)
train.describe(include='all')
train['meter'].replace({0:"Electricity",1:"ChilledWater",2:"Steam",3:"HotWater"},inplace=True)
test['meter'].replace({0:"Electricity",1:"ChilledWater",2:"Steam",3:"HotWater"},inplace=True)
sns.countplot(train['meter'],order=train['meter'].value_counts().sort_values().index)
plt.title("Distribution of Meter Id Code")
plt.xlabel("Meter Id Code")
plt.ylabel("Frequency")
print ("There are {} unique Buildings in the training data".format(train['building_id'].nunique()))
train['building_id'].value_counts(dropna=False).head(20)
train[train['building_id'] == 1094]['meter'].unique()
for df in [train, test]:
df['Month'] = df['timestamp'].dt.month.astype("uint8")
df['DayOfMonth'] = df['timestamp'].dt.day.astype("uint8")
df['DayOfWeek'] = df['timestamp'].dt.dayofweek.astype("uint8")
df['Hour'] = df['timestamp'].dt.hour.astype("uint8")
# In[28]:
train[['timestamp','meter_reading']].set_index('timestamp').resample("H")['meter_reading'].mean().plot(kind='line',figsize=(10,6),label='Avg_Meter_by_Hour')
train[['timestamp','meter_reading']].set_index('timestamp').resample("D")['meter_reading'].mean().plot(kind='line',figsize=(10,6),label='Avg_Meter_by_Day')
plt.legend()
plt.xlabel("Timestamp")
plt.ylabel("Average Meter Reading")
plt.title("Graph of Average Meter Reading")
meter_Electricity = train[train['meter'] == "Electricity"]
meter_Electricity[['timestamp','meter_reading']].set_index('timestamp').resample("H")['meter_reading'].mean().plot(kind='line',figsize=(10,6),label='Avg_Meter_by_Hour')
meter_Electricity[['timestamp','meter_reading']].set_index('timestamp').resample("D")['meter_reading'].mean().plot(kind='line',figsize=(10,6),label='Avg_Meter_by_Day')
plt.legend()
plt.xlabel("Timestamp")
plt.ylabel("Average Meter Reading")
plt.title("Graph of Average Meter Readingfor Electricity Meter")
meter_ChilledWater = train[train['meter'] == "ChilledWater"]
meter_ChilledWater[['timestamp','meter_reading']].set_index('timestamp').resample("H")['meter_reading'].mean().plot(kind='line',figsize=(10,6),label='Avg_Meter_by_Hour')
meter_ChilledWater[['timestamp','meter_reading']].set_index('timestamp').resample("D")['meter_reading'].mean().plot(kind='line',figsize=(10,6),label='Avg_Meter_by_Day')
plt.legend()
plt.xlabel("Timestamp")
plt.ylabel("Average Meter Reading")
plt.title("Graph of Average Meter Readingfor ChilledWater Meter")
meter_Steam = train[train['meter'] == "Steam"]
meter_Steam[['timestamp','meter_reading']].set_index('timestamp').resample("H")['meter_reading'].mean().plot(kind='line',figsize=(10,6),label='Avg_Meter_by_Hour')
meter_Steam[['timestamp','meter_reading']].set_index('timestamp').resample("D")['meter_reading'].mean().plot(kind='line',figsize=(10,6),label='Avg_Meter_by_Day')
plt.legend()
plt.xlabel("Timestamp")
plt.ylabel("Average Meter Reading")
plt.title("Graph of Average Meter Readingfor Steam Meter")
meter_HotWater = train[train['meter'] == "HotWater"]
meter_HotWater[['timestamp','meter_reading']].set_index('timestamp').resample("H")['meter_reading'].mean().plot(kind='line',figsize=(10,6),label='Avg_Meter_by_Hour')
meter_HotWater[['timestamp','meter_reading']].set_index('timestamp').resample("D")['meter_reading'].mean().plot(kind='line',figsize=(10,6),label='Avg_Meter_by_Day')
plt.legend()
plt.xlabel("Timestamp")
plt.ylabel("Average Meter Reading")
plt.title("Graph of Average Meter Readingfor HotWater Meter")
holidays = ["2016-01-01", "2016-01-18", "2016-02-15", "2016-05-30", "2016-07-04",
"2016-09-05", "2016-10-10", "2016-11-11", "2016-11-24", "2016-12-26",
"2017-01-02", "2017-01-16", "2017-02-20", "2017-05-29", "2017-07-04",
"2017-09-04", "2017-10-09", "2017-11-10", "2017-11-23", "2017-12-25",
"2018-01-01", "2018-01-15", "2018-02-19", "2018-05-28", "2018-07-04",
"2018-09-03", "2018-10-08", "2018-11-12", "2018-11-22", "2018-12-25",
"2019-01-01"]
for df in [train,test]:
df['square_feet'] = df['square_feet'].astype('float16')
df["Holiday"] = (df.timestamp.isin(holidays)).astype(int)
idx_to_drop = list((train[(train['site_id'] == 0) & (train['timestamp'] < "2016-05-21 00:00:00")]).index)
print (len(idx_to_drop))
train.drop(idx_to_drop,axis='rows',inplace=True)
idx_to_drop = list(train[(train['meter'] == "Electricity") & (train['meter_reading'] == 0)].index)
print(len(idx_to_drop))
train.drop(idx_to_drop,axis='rows',inplace=True)
idx_to_drop = list((train[(train['building_id']==1099)&(train['meter_reading'] > 30000)&(train['meter'] == "Steam")]).index)
print (len(idx_to_drop))
train.drop(idx_to_drop,axis='rows',inplace=True)
train['meter_reading'] = np.log1p(train['meter_reading'])
upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(np.bool))
upper.head()
to_drop = [column for column in upper.columns if any(upper[column] > threshold)]
print('There are %d columns to remove.' % (len(to_drop)))
print ("Following columns can be dropped {}".format(to_drop))
train.drop(to_drop,axis=1,inplace=True)
test.drop(to_drop,axis=1,inplace=True)
y = train['meter_reading']
train.drop('meter_reading',axis=1,inplace=True)
categorical_cols = ['building_id','Month','meter','Hour','primary_use','DayOfWeek','DayOfMonth','floor_count_isNa','Holiday']
get_ipython().run_cell_magic('time', '', 'x_train,x_test,y_train,y_test = train_test_split(train,y,test_size=0.25,random_state=42)\nprint (x_train.shape)\nprint (y_train.shape)\nprint (x_test.shape)\nprint (y_test.shape)\n\nlgb_train = lgb.Dataset(x_train, y_train,categorical_feature=categorical_cols)\nlgb_test = lgb.Dataset(x_test, y_test,categorical_feature=categorical_cols)\ndel x_train, x_test , y_train, y_test\n\nparams = {\'feature_fraction\': 0.75,\n \'bagging_fraction\': 0.75,\n \'objective\': \'regression\',\n \'max_depth\': -1,\n \'learning_rate\': 0.15,\n "boosting_type": "gbdt",\n "bagging_seed": 11,\n "metric": \'rmse\',\n "verbosity": -1,\n \'reg_alpha\': 0.5,\n \'reg_lambda\': 0.5,\n \'random_state\': 47,\n "num_leaves": 41}\n\nreg = lgb.train(params, lgb_train, num_boost_round=3000, valid_sets=[lgb_train, lgb_test], early_stopping_rounds=100, verbose_eval = 100)')
del lgb_train,lgb_test
# In[100]:
ser = pd.DataFrame(reg.feature_importance(),train.columns,columns=['Importance']).sort_values(by='Importance')
ser['Importance'].plot(kind='bar',figsize=(10,6))
get_ipython().run_cell_magic('time', '', 'prediction = []\nstep = 100000\nfor i in range(0, len(test), step):\n prediction.extend(np.expm1(reg.predict(test.iloc[i: min(i+step, len(test)), :], num_iteration=reg.best_iteration)))')
get_ipython().run_cell_magic('time', '', 'Submission[\'meter_reading\'] = prediction\nSubmission[\'meter_reading\'].clip(lower=0,upper=None,inplace=True)\nSubmission.to_csv("output.csv",index=None)')
|
[
"noreply@github.com"
] |
vidhyasagar13.noreply@github.com
|
0cb69f4f57b47376edf32dc2eac99e8fa48ae1ee
|
d605841d2367f9ca71e589a716f875d7fbb65792
|
/get_hot.py
|
568fbb97bbd9850e01af24fc3b54fec8ecb7202c
|
[] |
no_license
|
insmoin/AnalyseBilibili
|
9edb37dba4b14842ee9e47937bdd3b3ba07f99ad
|
ab2f21ea7260b21f99961135994c75cf79965375
|
refs/heads/master
| 2020-04-29T12:25:47.141945
| 2018-06-08T03:37:15
| 2018-06-08T03:37:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,875
|
py
|
# -*-coding:utf8-*-
import requests
import sqlite3
import json
import datetime
import time
import random
from login import Login
#import sys
#from importlib import reload
#reload(sys)
#sys.setdefaultencoding('utf-8')
header = {
'Referer': 'https://t.bilibili.com/?tab=1000',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36',
# 'Accept': 'application/json, text/plain, */*',
# 'Accept-Encoding': 'gzip, deflate, br',
# 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,ja;q=0.7,zh-TW;q=0.6,ca;q=0.5',
# 'Connection': 'keep-alive',
# #'Cookie': 'l=v; finger=edc6ecda; LIVE_BUVID=AUTO6415282736010851; fts=1528273622; sid=8wlapccd; DedeUserID=17486373; DedeUserID__ckMd5=ec15607bb9522c5c; SESSDATA=2aea00a0%2C1530865609%2C97f99c36; bili_jct=b870ce10ebf12d5717bb8e57ab40c40d; _dfcaptcha=2005c4acf2085ba5c7141ae1a8fe956f; bp_t_offset_17486373=125729012904691804; UM_distinctid=163d46b7658500-04380da18b4fc3-3c3c520d-1fa400-163d46b7659461; buvid3=38B95E3A-7426-405F-ADB3-326985B7BFFD20808infoc; rpdid=owiwopkxpidosiqiqwmxw; CURRENT_QUALITY=80; im_local_unread_17486373=0; im_notify_type_17486373=0',
# 'Host': 'api.vc.bilibili.com',
# 'Origin': 'https://t.bilibili.com',
# 'Referer': 'https://t.bilibili.com/?tab=1000',
# 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36',
}
format_data = {
'uid': '17486373',
'page': '1'
}
before_data_0 = {
'0000171528376188234https://t.bilibili.com/?tab=1000|444.41.selfDef.page_hotshow||1528376188233|0|0|573x974|1|{"event":"page_hotshow","value":{"tab_type":"hot"}}' : ''
}
before_data_1 = {
'0005021528376188234https://t.bilibili.com/?tab=1000|444.41.selfDef.page_hotshow||1528376188233|0|0|573x974|1|{"event":"page_hotshow","value":{"tab_type":"hot"}}' : ''
}
before_get_hot_url_0 = '''https://data.bilibili.com/log/web?000017{TimeStamp}https%3A%2F%2Ft.bilibili.com%2F%3Ftab%3D1000|
444.41.selfDef.page_hotshow||{TimeStamp}|0|0|573x974|1|{%22event%22:%22page_hotshow%22,%22value%22:{%22tab_type%22:%22hot%22}}'''
before_get_hot_url_1 = '''https://data.bilibili.com/log/web?000502{TimeStamp}https%3A%2F%2Ft.bilibili.com%2F%3Ftab%3D1000|
444.41.selfDef.page_hotshow||{TimeStamp}|0|0|573x974|1|{%22event%22:%22page_hotshow%22,%22value%22:{%22tab_type%22:%22hot%22}}'''
get_hot_url_base = 'https://api.vc.bilibili.com/dynamic_svr/v1/dynamic_svr/recommend'
def request_before():
curr_time = time.time()*1000
print('curr_time: {}'.format(curr_time))
url0 = 'https://data.bilibili.com/log/web'#before_get_hot_url_0.replace('{TimeStamp}', str(int(curr_time)))
url1 = 'https://data.bilibili.com/log/web'#before_get_hot_url_1.replace('{TimeStamp}', str(int(curr_time)))
print(url0)
print(url1)
response = requests.get(url0, params=before_data_0)
print(response.url)
response = requests.get(url1, params=before_data_1)
def get_hot_page_num():
response = requests.get(get_hot_url_base)
#print(response.headers)
json_hot = json.loads(response.text)
hot_data = json_hot['data']
total_page = hot_data['total_page']
print("total_page: ", total_page)
return int(total_page)
def get_hot_datas_of_page(page_idx):
print('===============================Page {}================================'.format(page_idx))
format_data['page'] = str(page_idx)
response = requests.session().get(get_hot_url_base, params=format_data)# + str(page_idx))
#print(response.headers)
json_hot = json.loads(response.text)
#print(response.text)
hot_data = json_hot['data']
total_page = hot_data['total_page']
cards = hot_data['cards']
print("cards_num: ", len(cards))
hot_datas = []
for card in cards:
hot_datas.append(format_card(card))
return hot_datas
def format_card(card_json):
card = card_json
like_cnt = card['desc']['like']
repost_cnt = card['desc']['repost']
card_info = json.loads(card['card'])
reply_cnt = 0
card_type = card['desc']['type']
#card_type: 1:转发,2:图文,4:纯文字,8,投稿视频(常规视频)16: 小视频,64:文章,
if card_type & (1+2+4+16) != 0:
reply_cnt = card_info['item']['reply']
elif card_type & (8+64) != 0:
reply_cnt = card_info['stats']['reply']
elif card_type & 256 != 0:
reply_cnt = card_info['replyCnt']
local_time = time.localtime(card['desc']['timestamp'])
post_date = time.strftime("%Y-%m-%d %H:%M:%S",local_time)
user_profile = card['desc']['user_profile']
uid = user_profile['info']['uid']
uname = user_profile['info']['uname']
print('uid: {}, uname: {}, card_type: {}, like_cnt:{}, repost_cnt: {}, reply_cnt: {}, post_date: {}'
.format(uid, uname, card_type, like_cnt, repost_cnt, reply_cnt, post_date))
hot_data = {}
hot_data['uid'] = uid
hot_data['uname'] = uname
hot_data['card_type'] = card_type
hot_data['like_cnt'] = like_cnt
hot_data['repost_cnt'] = repost_cnt
hot_data['reply_cnt'] = reply_cnt
hot_data['post_date'] = post_date
return hot_data
def create_table():
conn = sqlite3.connect('bilibili.db')
cursor = conn.cursor()
cursor.execute('''create table if not exists hot_data (id integer primary key autoincrement,
uid varchar(20),
uname varchar(50),
card_type integer,
like_cnt integer,
repost_cnt integer,
reply_cnt integer,
post_date datetime,
spide_date datetime); ''')
conn.commit()
conn.close()
def save_hot_datas(hot_datas):
conn = sqlite3.connect('bilibili.db')
cursor = conn.cursor()
local_date = time.localtime(time.time())
spide_date = time.strftime("%Y-%m-%d %H:%M:%S",local_date)
for data in hot_datas:
#cursor.execute('insert into hot_data values(NULL, \'4399\', \'zqj\', 1, 520, 521, 522, \'2018-06-06 12:21:30\', {})'.format(spide_date))
cursor.execute('insert into hot_data values(NULL, ?, ?, ?, ?, ?, ?, ?, ?)',
(data['uid'], data['uname'], int(data['card_type']), int(data['like_cnt']),
int(data['repost_cnt']), int(data['reply_cnt']), data['post_date'], str(spide_date)))
conn.commit()
conn.close()
if __name__ == "__main__":
create_table()
page_num = get_hot_page_num()
hot_datas = []
#request_before()
for i in range(1, page_num+1):
page_hot_data = get_hot_datas_of_page(i)
for data in page_hot_data:
hot_datas.append(data)
time.sleep(2)
save_hot_datas(hot_datas)
|
[
"zhangqingjie@doodlemobile.com"
] |
zhangqingjie@doodlemobile.com
|
91d6b1e6e00d76a4584803195b4c0029d3c42f07
|
ea71ac18939e99c1295506d3e3808bb76daabc84
|
/.venv35/Lib/site-packages/PyInstaller/hooks/hook-langcodes.py
|
88c6322f94ec65e283dcd505443905735985d22e
|
[
"GPL-1.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
ndaley7/BodySlide-Group-Generator
|
543ef48b4dfe58953b0694b2941c9131d5f4caef
|
3ed7b78c5f5ccec103b6bf06bc24398cfb6ad014
|
refs/heads/master
| 2020-08-26T17:44:38.768274
| 2019-12-10T19:01:23
| 2019-12-10T19:01:23
| 217,089,038
| 2
| 0
|
BSD-3-Clause
| 2019-12-10T19:02:29
| 2019-10-23T15:18:10
|
Python
|
UTF-8
|
Python
| false
| false
| 505
|
py
|
#-----------------------------------------------------------------------------
# Copyright (c) 2017-2019, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
from PyInstaller.utils.hooks import collect_data_files
datas = collect_data_files('langcodes')
|
[
"ndaley7@gatech.edu"
] |
ndaley7@gatech.edu
|
7b36e7c6f2e581fce8dc9f4e4854060e561a0cab
|
5f2b37277c9b04f85adda6da6c5a6431fb9bb4ea
|
/求级数和.py
|
bfe9cd4151bf77d02400506c973893f96ab6a027
|
[] |
no_license
|
USA-Citizen/Basic-Computer-of-Liberal-Art
|
b28f58a2b11ef8d13ba6b9443d439acc1f62b8ee
|
a192599ed08d1956bf7033f3770dceeb4110e1c3
|
refs/heads/main
| 2023-07-08T19:15:50.200311
| 2021-08-13T02:45:17
| 2021-08-13T02:45:17
| 395,498,855
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 428
|
py
|
def func(x,y):
s = 1
for i in range(1,y+1):
s *= x
return s
def main():
while 1:
number = int(input('Please input an even number ranged from 2 to 8(\'0\' for exit):'))
if number == 0 :
break
elif 2 <= number <= 8 and number % 2 == 0:
n = func(number+1, number) - func(number,number-1)
print(f"The result is {n}")
else:
print("Out of the range")
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
USA-Citizen.noreply@github.com
|
ae1b5c6b28cdd358c43d2406cd1a9072965b64cd
|
265e92f0e0782a07f0cb3a601097182f51d49c19
|
/sina/apifun.py
|
843131671ced6e5c3a0e51aed2b03e66cc989b88
|
[] |
no_license
|
yishenggudou/opensnsrpcserver
|
13ed7aab88fdaf4cfc358bb5580eef5d8118381f
|
516afa0cb73a112b3d76740899dfd8df429217d4
|
refs/heads/master
| 2021-01-17T11:33:46.083148
| 2011-11-21T13:17:48
| 2011-11-21T13:17:48
| 2,340,076
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,278
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Copyright 2011 timger
+Author timger
+Gtalk&Email yishenggudou@gmail.com
+Msn yishenggudou@msn.cn
+Weibo http://t.sina.com/zhanghaibo
Licensed under the MIT License, Version 2.0 (the "License");
'''
import os,sys
import conf as _conf
from weibopy import OAuthHandler
from weibopy import oauth
from sqlalchemy import and_,or_
from lib import *
DIR_PATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
NAME_FILE = os.path.relpath(__file__)
__author__ = 'timger & yishenggudou@gmail.com'
__license__ = 'MIT'
__version__ = (0,0,0)
def get_authorization_url(user_name,server_user_name,server_user_pix='timger',server_name='sina',callback=None,consumer_key=_conf.consumer_key,consumer_secret=_conf.consumer_secret,):
import sys
sys.path.append(os.path.join(DIR_PATH,'../'))
from db import *
o = OAuthHandler(consumer_key, consumer_secret,callback=callback)
authorization_url = o.get_authorization_url()
request_token = o.request_token
print request_token
sdb = TWDBS(user_name=user_name,server_user_name=server_user_name,server_name=server_name,server_user_pix=server_user_pix,consumer_key=consumer_key,consumer_secret=consumer_secret,request_token=str(request_token))
print sdb
session.add(sdb)
session.commit()
session.close()
res = {}
res['request_token'] = request_token
res['authorization_url'] = authorization_url
print authorization_url
return res
def verify(user_name,server_user_name,verify,server_user_pix='timger',server_name='sina',callback='/login',consumer_key=_conf.consumer_key,consumer_secret=_conf.consumer_secret,):
import sys
sys.path.append(os.path.join(DIR_PATH,'../'))
from db import *
res = {}
#sdb = session.query(TWDBS).filter_by(TWDBS.consumer_key==consumer_key,TWDBS.consumer_secret==consumer_secret,TWDBS.user_name==user_name,TWDBS.server_user_name==server_user_name,TWDBS.server_name==server).all()[-1]
sdb = session.query(TWDBS).filter(and_(TWDBS.consumer_key==consumer_key,TWDBS.consumer_secret==consumer_secret,TWDBS.user_name==user_name,TWDBS.server_user_name==server_user_name,TWDBS.server_name==server_name)).all()
print sdb
sdb=sdb[-1]
o = OAuthHandler(consumer_key=consumer_key, consumer_secret=consumer_secret,callback=callback)
request_token_sercet,request_token_key = [ i.split('=')[1] for i in sdb.request_token.split('&')]
#o.request_token = oauth.OAuthToken(request_token_key,request_token_sercet)
print sdb.request_token
print request_token_key,'\n',request_token_sercet
o.set_request_token(request_token_key,request_token_sercet)
#try:
access_token = o.get_access_token(verify)
sdb.access_token_key = access_token.key
sdb.access_token_secret = access_token.secret
sdb.access_token = access_token.to_string()
# res['status'] = True
#except:
# res['status'] = False
session.flush()
session.commit()
session.close()
return res
def get_api(consumer_key,consumer_sercet,access_token_key,access_token_sercet,access_token,user_name,server_user_name,server='sina'):
import sys, os.path
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.join(DIR_PATH,'../'))
from weibopy import OAuthHandler, API, WeibopError
from db import *
sdb = session.query(TWDBS).filter(TWDBS.consumer_key==consumer_key,TWDBS.consumer_secret==consumer_secret,TWDBS.user_name==user_name,TWDBS.server_user_name==server_user_name,TWDBS.server_name==server).all()[-1]
try:
o = OAuthHandler(consumer_key, consumer_secret)
o.setToken(sdb.access_token_key,sdb.access_token_secret)
except KeyError, e:
sys.stderr.write("you should run get_oauthed.py first.\n")
return API(o)
def post(user_name,server_user_name,status,server_user_pix='timger',server='sina',callback='/login',consumer_key=_conf.consumer_key,consumer_secret=_conf.consumer_secret,):
res = {}
try:
o = get_api(consumer_key,consumer_sercet,access_token_key,access_token_sercet,access_token,user_name,server_user_name,server_name='sina')
o.update_status(status)
res['status'] = True
except:
res['status'] = False
return res
def get(user_name,server_user_name,status,server_user_pix='timger',server='sina',callback='/login',consumer_key=_conf.consumer_key,consumer_secret=_conf.consumer_secret,):
res = {}
try:
o = get_api(consumer_key,consumer_sercet,access_token_key,access_token_sercet,access_token,user_name,server_user_name,server='sina')
res['msgs'] = o.user_timeline(count=10, page=1)
res['status'] = True
except:
res['status'] = False
return res
if __name__ == "__main__":
print sys.argv
if sys.argv[1] == 'url':
get_authorization_url('timger','timger',server_user_pix='timger',server_name='sina',callback=None,consumer_key=_conf.consumer_key,consumer_secret=_conf.consumer_secret,)
if sys.argv[1] == 'verify':
verify('timger','timger',sys.argv[2],server_user_pix='timger',server_name='sina',callback='/login',consumer_key=_conf.consumer_key,consumer_secret=_conf.consumer_secret)
|
[
"yishenggudou@gmail.com"
] |
yishenggudou@gmail.com
|
a6889b370914909f3300d5d84eeaf6f84dd8ed5a
|
8578c3722b1922b937c6015cc97ac854ea0c9d6a
|
/etherum_scrapper/pipelines.py
|
6de4b807f4f65df435d13ab9612695496313a0ab
|
[] |
no_license
|
dzordanq/etherum_scrapper
|
76db7b10f79c2ae9a25d091ed68fce6904677edc
|
911f0a32efcc5e19eceea356b6130a6b9a3ff85d
|
refs/heads/master
| 2023-06-08T17:21:17.219826
| 2021-06-26T15:39:53
| 2021-06-26T15:39:53
| 380,537,358
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 964
|
py
|
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from etherum_scrapper.settings import FEED_EXPORT_FIELDS
from scrapy.exporters import CsvItemExporter
import csv
class EtherumScrapperPipeline:
def __init__(self) -> None:
self.file_handle = open('data.csv', 'wb')
self.__set_exporter()
def process_item(self, item, spider):
self.exporter.export_item(item)
return item
def close_spider(self, spider):
self.exporter.finish_exporting()
self.file_handle.close()
def __set_exporter(self):
self.exporter = CsvItemExporter(self.file_handle, include_headers_line=True, delimiter=",", quotechar=r"'", quoting=csv.QUOTE_ALL, fields_to_export=FEED_EXPORT_FIELDS)
self.exporter.start_exporting()
|
[
"random05510@gmail.com"
] |
random05510@gmail.com
|
cfdca820a4ae3e719ec9de6ca53c96280c2eb439
|
ddc4a9248e5f3a2443da5dac3341ef6af3360eaa
|
/learning_oop/utils/utils.py
|
374379705b947cfafe255a14df6bddb964c11af0
|
[] |
no_license
|
StavisT/Playground
|
9bb04ee4819751d5487e4d51ee336172705fc4de
|
17074545e1bf88710f178924a0a17501670a6ac0
|
refs/heads/master
| 2022-12-09T14:06:10.221813
| 2020-01-11T20:06:19
| 2020-01-11T20:06:19
| 123,172,846
| 0
| 0
| null | 2022-12-08T03:30:45
| 2018-02-27T18:46:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,020
|
py
|
# descriptor for a type-checked attribute
class Typed:
def __init__(self, name, expected_type):
self.name = name
self.expected_type = expected_type
if isinstance(self.expected_type, str):
self.expected_type = eval(self.expected_type)
def __get__(self, instance, cls):
if instance is None:
return self
else:
return instance.__dict__[self.name]
def __set__(self, instance, value):
if not isinstance(value, self.expected_type):
raise TypeError("Expected " + str(self.expected_type))
instance.__dict__[self.name] = value
def __delete__(self, instance):
del instance.__dict__[self.name]
# Class decorator that applies to selected attributes
def typeassert(**kwargs):
def decorate(cls):
for name, expected_type in kwargs.items():
# Attach a typed descriptor to the class
setattr(cls, name, Typed(name, expected_type))
return cls
return decorate
|
[
"simon.stavland@cognite.com"
] |
simon.stavland@cognite.com
|
ed3612ff104034bb6c86ff3d667a98b4eb5b479b
|
e8e06f57c8089509092930884a6c44e2641e317a
|
/AiXianFeng/mine/views.py
|
b2c637de147392abe8a8b4bc4c72dd2ffc01815f
|
[] |
no_license
|
Huang0321/AiXianFeng
|
61a5e86bafdcbbd1bca81af00ba4b49f0d636eca
|
4e5792dab9323db643875742890d97f6bcf69268
|
refs/heads/master
| 2020-03-15T23:12:29.572597
| 2018-05-08T12:51:22
| 2018-05-08T12:51:22
| 132,388,994
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,172
|
py
|
from django.shortcuts import render
from AXF.models import Order
def mine_home(request):
if request.method == 'GET':
users1 = request.user.order_set.filter(o_status=0) # 获取已下单未付款的对象set
users2 = request.user.order_set.filter(o_status=1) # 获取已付款未收货的对象set
num1 = 0
num2 = 0
if users1:
for user in users1:
num1 += user.o_num # 已下单未付款的订单总数
if users2:
for user in users2:
num2 += user.o_num # 已付款未收货的订单总数
return render(request, 'mine/mine.html', {'num1': num1, 'num2': num2})
def not_pay(request):
# 通过用户找到订单对象set
orders_not_pay = request.user.order_set.filter(o_status=0)
goods_not_pay = [] # 先设一个未付款的商品对象set
# 判断是否存在未付款的订单 或 未收货的订单
if orders_not_pay:
# 通过未完成的订单找到对应的 商品订单 信息 set
goods_order_not_pay = orders_not_pay[0].ordergoods_set
if goods_order_not_pay:
# 通过 商品订单 信息找到对应的 商品set
for good in goods_order_not_pay:
goods_not_pay += good.goods # 将订单对象set相加
else:
goods_not_pay = []
else:
goods_not_pay = []
# 与上面相同
return render(request, 'order/order_list_wait_pay.html', {'goods_not_pay': goods_not_pay})
def not_recieve(request):
orders_not_recieve = request.user.order_set.filter(o_status=1)
goods_not_recieve = [] # 先设一个未收货的商品对象set
if orders_not_recieve:
goods_order_not_recieve = orders_not_recieve[0].ordergoods_set
if goods_order_not_recieve:
for good in goods_order_not_recieve:
goods_not_recieve += good.goods
else:
goods_not_recieve = []
else:
goods_not_recieve = []
return render(request, 'order/order_list_payed.html', {'goods_not_recieve': goods_not_recieve})
|
[
"312919870@qq.com"
] |
312919870@qq.com
|
03c2c047b95a649505cf80383ff3cdd4fd11f539
|
14a6067d7f6e42d0c9ed51e7c687f44792c8739d
|
/mab_cms/manage.py
|
a96d528ed8eb71c176f903b9cf3a1bd2ff66fa82
|
[] |
no_license
|
movimento-dos-atingidos-por-barragens/django-cms
|
bbd5bd3c645a69d8cbf4c0f344a769fbdd79dd0f
|
0b9de9c28cef2275ba6720f155357d139f4d809f
|
refs/heads/master
| 2020-12-02T21:00:35.397278
| 2017-07-21T23:48:37
| 2017-07-21T23:48:37
| 96,243,280
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 805
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mab_cms.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[
"akanim@gmail.com"
] |
akanim@gmail.com
|
e67ed739f248765f645381528f2f3be9767e3c06
|
2c599e08afcd7600fbc1de8c0a009230ee613aeb
|
/vgg/vgg_train_seahawks.py
|
af4fea59c97b6fab8cb95bd0e6a3b838fc031f4d
|
[] |
no_license
|
Hammania689/semantically_driven_multitask-leaning
|
1278977225c07a30472a4b489ef240985e583d4f
|
6e6a5c2fc9a4060bf1580ad945d4955a643b8b3d
|
refs/heads/master
| 2020-09-07T07:52:12.948424
| 2019-11-23T03:11:38
| 2019-11-23T03:11:38
| 220,712,833
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,631
|
py
|
from torch import nn
from helper import *
from tensorboardX import SummaryWriter
import torch
import torchvision.models as models
# Model + Hyper Paramaeters
vgg = models.vgg16(pretrained=False)
# Change classifier accordingly
vgg.classifier = nn.Sequential(
nn.Linear(in_features=25088, out_features=4096, bias=True),
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(in_features=4096, out_features=4096, bias=True),
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(in_features=4096, out_features=365, bias=True)
)
# Load in converted weights into the untrained model
places365_weights = torch.load('vgg_places365.pth')
# Freeze all layers in VGG
for param in vgg.parameters():
param.requires_grad = False
# Change classifier accordingly
vgg.classifier = nn.Sequential(
nn.Linear(in_features=25088, out_features=4096, bias=True),
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(in_features=4096, out_features=1028, bias=True),
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(in_features=1028, out_features=512, bias=True),
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(in_features=512, out_features=30, bias=True)
)
#prev_run = torch.load('checkpoint1.pth')
#vgg.load_state_dict(prev_run['state_dict'])
lr = 0.00001
optim = torch.optim.SGD(vgg.parameters(),lr=lr)
# scheduler = torch.optim.lr_scheduler.StepLR(optim, step_size=20, gamma=.01)
epochs = 100
batch_size = 64
criterion = nn.CrossEntropyLoss()
vgg.name = 'VGG16'
# Define devices and multi gpu if available
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
vgg = nn.DataParallel(vgg)
vgg.cuda(device)
# Load places data for cross validation
shrec_sub_path = 'Data/Scene_SBR_IBR_Dataset/'
data_dir = Path(Path.home(), shrec_sub_path)
dataset, dataloader, dataset_sizes = load_data(data_dir, batch_size=batch_size)
writer = SummaryWriter()
phases = ['train', 'val']
since = time.time()
best_model_wts = 0
best_acc = 0.0
needed_epochs = 0
training_epoch_time = 0
phase = ['train', 'val']
for epoch in range(epochs):
# scheduler.step()
for phase in phases:
if phase == 'train':
vgg.train() # Set model to training mode
vgg.to(device)
else:
vgg.eval() # Set model to evaluate mode
vgg.to(device)
running_cls_loss = 0.0
running_corrects = 0
epoch_since = time.time()
for idx, (inputs, labels) in enumerate(dataloader[phase]):
inputs = inputs.to(device)
labels = labels.to(device)
optim.zero_grad()
with torch.set_grad_enabled(phase == 'train'):
# Forward Pass
output = vgg(inputs)
# Class prediction and number of instance
_, pred = torch.max(output.data, 1)
loss = criterion(output, labels)
# Calculate loss and Backprop
if phase == 'train':
# NOTE: this needs to be done for the current implementation. Can only calculate gradient on single value
loss.sum().backward()
optim.step()
# (Classifer) Average Loss and Accuracy for current batch
running_cls_loss += loss.sum().item() * inputs.size(0)
running_corrects += torch.sum(pred == labels.data)
if idx % 100 == 0:
print(f'{phase} Batch Loss: {running_cls_loss}| Acc: {running_corrects / batch_size}')
# (Classifer) Average Loss and Accuracy for the current epoch
classifier_loss = running_cls_loss / dataset_sizes[phase]
classifier_acc = running_corrects.double() / dataset_sizes[phase]
epoch_time = epoch_since - time.time()
writer.add_scalar("Classification Loss", classifier_loss, global_step=epoch)
writer.add_scalar("Classification Accuracy", classifier_acc, global_step=epoch)
# writer.add_graph(models.vgg16.cuda(), (inputs, labels), global_step=epoch)
if phase == 'train':
linebrk = '='
training_epoch_time = epoch_time
train_log = (f'\nEpoch[{epoch + 1}/{epochs}]'
+ f' | Classifer Loss: {classifier_loss:.3f} Acc: {classifier_acc:.3f}')
print(f"{linebrk * 125}", train_log)
else:
epoch_time += training_epoch_time
test_log = (f'\nEpoch[{epoch + 1}/{epochs}]'
+ f' | Classifer Loss: {classifier_loss:.3f} Acc: {classifier_acc:.3f}'
+ f'\nTime:{epoch_time // 60:.0f}m {epoch_time % 60:.0f}s')
print(test_log)
# deep copy the model
if phase == 'val' and classifier_acc > best_acc:
print(f"Validation accuracy increased ({best_acc:.6f} --> {classifier_acc:.6f}). Saving model ...")
best_acc = classifier_acc
needed_epochs = epoch + 1
best_model_wts = copy.deepcopy(vgg.module.state_dict())
save_model(vgg, optim, criterion, needed_epochs)
time_elapsed = time.time() - since
print(f'Training complete in {(time_elapsed // 60):.0f}m {(time_elapsed % 60):.0f}s')
print(f'Best Classification Acc: {best_acc:4f}')
# load best model weights
vgg.module.load_state_dict(best_model_wts)
save_model(vgg, optim, criterion, needed_epochs)
writer.close()
|
[
"hameedabdulrashid@gmail.com"
] |
hameedabdulrashid@gmail.com
|
1f91d60f38bd1be8149029705cc2206405ec3261
|
9567c7e689c44f6ce6b98225509c2f6b7d426f0f
|
/models/u_net.py
|
fcf129554150f11d4a62d504b025d670227d2a24
|
[
"MIT"
] |
permissive
|
Ding1119/Foveation-Segmentation
|
62e210fabfb32dc822621f14477ebc2bed57b1bd
|
557ad1c88343c05d7dfc765b5fa4cbe6fb237d74
|
refs/heads/master
| 2023-06-23T08:20:35.598571
| 2021-07-20T15:28:37
| 2021-07-20T15:28:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,169
|
py
|
import torch
import numpy as np
import torch.nn as nn
__all__ = ['u_net']
def double_conv(in_channels, out_channels, step=1):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, stride=1,
padding=1, groups=1, bias=False),
nn.ReLU6(inplace=True),
nn.InstanceNorm2d(out_channels, affine=True),
nn.Conv2d(out_channels, out_channels, 3, stride=step,
padding=1, groups=1, bias=False),
nn.ReLU6(inplace=True),
nn.BatchNorm2d(out_channels, affine=True)
)
class UNet(nn.Module):
def __init__(self, in_ch, width, class_no, res_mode):
super().__init__()
self.identity_add = res_mode
# change class number here:
if class_no > 2:
self.final_in = class_no
else:
self.final_in = 1
self.w1 = width
self.w2 = width*2
self.w3 = width*4
self.w4 = width*8
#
self.dconv_up1 = double_conv(self.w1 + self.w2, self.w1, step=1)
self.dconv_up2 = double_conv(self.w2 + self.w3, self.w2, step=1)
self.dconv_up3 = double_conv(self.w3 + self.w4, self.w3, step=1)
#
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
# self.conv_last = nn.Conv2d(width, self.final_in, 1)
#
if self.identity_add is False:
self.dconv_down1 = double_conv(in_ch, self.w1, step=1)
self.dconv_down2 = double_conv(self.w1, self.w2, step=2)
self.dconv_down3 = double_conv(self.w2, self.w3, step=2)
self.dconv_down4 = double_conv(self.w3, self.w4, step=2)
self.bridge = double_conv(self.w4, self.w4, step=1)
self.bridge_2 = double_conv(self.w4, self.w4, step=1)
self.bridge_3 = double_conv(self.w4, self.w4, step=1)
self.bridge_4 = double_conv(self.w4, self.w4, step=1)
else:
# identity mapping in the encoder:
# when the dimensionality is the same, element-wise addition
# when the dimensioanlity is not the same, concatenation
self.dconv_down1 = double_conv(in_ch, self.w1, step=1)
self.dconv_down2 = double_conv(self.w1, self.w2, step=2)
self.dconv_down3 = double_conv(self.w2+self.w1, self.w3, step=2)
self.dconv_down4 = double_conv(self.w3+self.w2, self.w4, step=2)
self.bridge = double_conv(self.w4+self.w3, self.w4, step=1)
self.bridge_2 = double_conv(self.w4, self.w4, step=1)
self.bridge_3 = double_conv(self.w4, self.w4, step=1)
self.bridge_4 = double_conv(self.w4, self.w4, step=1)
#
self.max_pool = nn.MaxPool2d(2)
def forward(self, x, return_feature_maps=False):
if self.identity_add is False:
conv1 = self.dconv_down1(x)
conv2 = self.dconv_down2(conv1)
conv3 = self.dconv_down3(conv2)
conv4 = self.dconv_down4(conv3)
conv4 = self.bridge_4(self.bridge_3(self.bridge_2(self.bridge(conv4))))
else:
conv1 = self.dconv_down1(x)
conv2 = self.dconv_down2(conv1)
conv3 = self.dconv_down3(torch.cat([conv2, self.max_pool(conv1)], dim=1))
conv4 = self.dconv_down4(torch.cat([conv3, self.max_pool(conv2)], dim=1))
conv4 = self.bridge(torch.cat([conv4, self.max_pool(conv3)], dim=1))
conv4 = self.bridge_2(conv4) + conv4
conv4 = self.bridge_3(conv4) + conv4
conv4 = self.bridge_4(conv4) + conv4
x = self.upsample(conv4)
x = torch.cat([x, conv3], dim=1)
x = self.dconv_up3(x)
x = self.upsample(x)
x = torch.cat([x, conv2], dim=1)
x = self.dconv_up2(x)
x = self.upsample(x)
x = torch.cat([x, conv1], dim=1)
out = self.dconv_up1(x)
# out = self.conv_last(x)
return [out]
def u_net(pretrained=False, **kwargs):
model = UNet(in_ch=3, width=32, class_no=7, res_mode=True)
#if pretrained:
# model.load_state_dict(load_url(model_urls['hrnetv2']), strict=False)
return model
|
[
"jinchen0227@gmail.com"
] |
jinchen0227@gmail.com
|
2408767ddb5d1ed874f058eb66ea7391382cc56a
|
e58518d60c094d835557bb3f79f19b017284e23a
|
/TwitterHealth1.0/backend/delete_docs_over_5_days_old.py
|
9081a5ef21ee9583f18931bed8ca70b7421ebd48
|
[] |
no_license
|
Rsquared2016/RocData
|
d2123801f63841f4510e1669927bddb6421e94fc
|
f5016e5fad5e4c51b2a2fc7909bfe0a742069bac
|
refs/heads/master
| 2020-05-18T18:23:07.953718
| 2014-04-04T16:10:06
| 2014-04-04T16:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 798
|
py
|
"""
deletes tweets that are more than 5 days old with respect to current time on the server
"""
import couchdb
import datetime
import json
import sys
def log(s):
print '%s: %s' % (datetime.datetime.today(), s)
def deleteDocs(db):
print 'Loading old tweets ...'
oldTweets = db.view('Tweet/by_day_older_five_days')
print 'Found %d tweets' % len(oldTweets)
numTweets = 0
for row in oldTweets:
doc = db[row.id]
print 'Deleting: %s' % doc['created_at']
db.delete(doc)
numTweets += 1
print '%d tweets deleted from %s' % (numTweets, db)
if __name__ == '__main__':
dbName = sys.argv[1]
couch = couchdb.Server('http://166.78.236.179:5984/')
couch.resource.credentials = ('admin', 'admin')
db = couch[dbName]
deleteDocs(db)
|
[
"sadilekadam@gmail.com"
] |
sadilekadam@gmail.com
|
ffe573a415593fc833edaf0348fe3b6662e2f2d6
|
c45e3462bad3503a526770653dd25d9bf3b9da07
|
/pylib/mps/packager/pkg_rules_interface.py
|
92b0527d77da3122566b1208212b83fb87920192
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
room77/py77
|
f88da6fb552127723ba6518d88848c475293a374
|
70280110ec342a6f6db1c102e96756fcc3c3c01b
|
refs/heads/master
| 2021-06-29T02:34:22.760479
| 2020-07-13T16:29:46
| 2020-07-13T16:29:46
| 18,349,519
| 0
| 2
|
MIT
| 2021-06-17T12:31:06
| 2014-04-02T00:17:08
|
Python
|
UTF-8
|
Python
| false
| false
| 516
|
py
|
#!/usr/bin/env python
"""Interface for packaging"""
__author__ = 'edelman@room77.com (Nicholas Edelman)'
__copyright__ = 'Copyright 2013 Room77, Inc.'
class PkgRulesInterface(object):
"""Base class for making packages"""
@classmethod
def make_package(cls, rule):
"""Generates a package
Args:
rule: rule to generate the package
Returns:
tuple(string, string) the package name followed by the package
version name (e.g. $pkgname_$timestamp)
"""
raise NotImplementedError
|
[
"oztekin@room77.com"
] |
oztekin@room77.com
|
c5b0864915efa3ca1c5570ca8bc9688e52af6da8
|
7cb0d630965a124a04db0e8dce13b44806c02d36
|
/menu.py
|
1e80409d8c8302e867f7158d0ff856fdd495ce6a
|
[] |
no_license
|
KISHU-123/scrape-books
|
c34dea927c4ab37643723e4385c15ba7a26f18c6
|
14823f10a828ef80d9238e7d9342effb11656d3d
|
refs/heads/master
| 2022-06-24T19:49:52.517884
| 2020-05-08T08:42:07
| 2020-05-08T08:42:07
| 262,271,269
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 929
|
py
|
from app import books
USER_CHOICE = '''ENTER ONE OF THE FOLLOWING
-'b' to look at 5-star books
-'c' to look at the cheapest books
-'n' to look at the next books in the catalogue
-'q' to quit
'''
def print_cheapest_book():
best_books = sorted(books, key=lambda x: x.price)[:10]
for book in best_books:
print(book)
def print_best_books():
best_ratings = sorted(books, key=lambda x: x.rating*-1)[:10]
for ratings in best_ratings:
print(ratings)
books_generator = (x for x in books)
def get_next_book():
print(next(books_generator))
def menu():
user_input = input(USER_CHOICE)
while user_input != 'q':
if user_input == 'b':
print_best_books()
elif user_input == 'n':
get_next_book()
elif user_input == 'c':
print_cheapest_book()
else:
print('INVALID')
user_input = input(USER_CHOICE)
menu()
|
[
"KISHU-123"
] |
KISHU-123
|
319779da84a802496431e4503815606afb22b943
|
1348c4e9219defbc85a4c3c6019e654084c32215
|
/test/test_manifest_request.py
|
d98f7cdfdd445eab795d155c530f56eb4601183d
|
[] |
no_license
|
joantune/rmail-shipping-REST-python-api
|
77cf95a92e6b5a018f0d22f4cf8d5234a91e4cb3
|
c7293adc8a52259dfb43ae277ba9652754fe4308
|
refs/heads/master
| 2022-12-15T21:56:12.296453
| 2020-09-15T08:44:06
| 2020-09-15T08:44:06
| 180,843,828
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,523
|
py
|
# coding: utf-8
"""
Royal Mail API Shipping V2 (REST)
This API specification details the requirements for integrating with Royal Mail API Shipping V2 (REST). It specifically covers how the Royal Mail API Shipping V2 (REST) can be used by business customers to conduct shipping activity with Royal Mail and provides the technical information to build this integration. This specification must be used with the relevant accompanying specifications for customers wishing to interface their systems with Royal Mail services. Royal Mail API Shipping V2 (REST) exposes a fully RESTful service that allows account customers to create shipments, produce labels, and produce documentation for all the tasks required to ship domestic items with Royal Mail. Built on industry standards, Royal Mail API Shipping V2 (REST) provides a simple and low cost method for customers to integrate with Royal Mail, and allows them to get shipping quickly. The API offers data streaming and offline barcoding to allow customers greater flexibility when generating their labels. There are no costs to customers for using the Royal Mail API Shipping V2 (REST) services, however customers’ own development costs must be covered by the customer developing the solution. Royal Mail will not accept any responsibility for these development, implementation and testing costs. Customers should address initial enquiries regarding development of systems for these purposes to their account handler. This API can be used in conjunction with Royal Mail Pro Shipping, a GUI based shipping platform. For more details on Royal Mail Pro Shipping, including videos on and briefs on updating cancelling a shipment and manifesting click here: www.royalmail.com/pro-shipping-help # noqa: E501
OpenAPI spec version: 1.0.21
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import rmail_api
from models.manifest_request import ManifestRequest # noqa: E501
from rmail_api.rest import ApiException
class TestManifestRequest(unittest.TestCase):
"""ManifestRequest unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testManifestRequest(self):
"""Test ManifestRequest"""
# FIXME: construct object with mandatory attributes with example values
# model = rmail_api.models.manifest_request.ManifestRequest() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"joao.antunes@tagus.ist.utl.pt"
] |
joao.antunes@tagus.ist.utl.pt
|
1e0a3a0a55ea8ac494c984743d3a92d9d2523ac3
|
c7fe4d40fb8507b9032c2b2ade77b450cf6e3a56
|
/server.py
|
c27fc8412c20c4d5b2ee61dfb0db1dba51b011fc
|
[] |
no_license
|
nLevin13/Shipbook
|
ce204ddf43277963919ad8f333bb66f9c1485560
|
dc2266de5be1dabbde3c0c13bcef1d26b56e5671
|
refs/heads/master
| 2020-04-04T13:31:11.202196
| 2018-11-04T16:45:40
| 2018-11-04T16:45:40
| 155,964,576
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,149
|
py
|
from http.server import HTTPServer, BaseHTTPRequestHandler
from mapstest import mapstest
from io import BytesIO
class SimpleHTTPRequestHandler(BaseHTTPRequestHandler):
coords = {'lat':0,'lon':0}
def do_GET(self):
self.send_response(200)
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
pts = SimpleHTTPRequestHandler.coords
res = str(mapstest(pts['lat'], pts['lon'])).encode('utf-8')
self.wfile.write(res)
def do_POST(self):
content_length = int(self.headers['Content-Length'])
body = self.rfile.read(content_length)
self.send_response(200)
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
b = str(body)
b = b[b.index('=')+1:]
lat = b[:b.index('&')]
lon = b[b.index('=')+1:-1]
SimpleHTTPRequestHandler.coords['lat'] = float(lat)
SimpleHTTPRequestHandler.coords['lon'] = float(lon)
a = str(lat+','+lon).encode('utf-8')
self.wfile.write(a)
httpd = HTTPServer(('localhost', 8001), SimpleHTTPRequestHandler)
httpd.serve_forever()
|
[
"nyro9999@gmail.com"
] |
nyro9999@gmail.com
|
50f786c78b5b11bf44c44f553aeb1ee4feb34219
|
3e34bab78901f17526fb629084d8dffa1cdb6b33
|
/server/api/socket/client.py
|
7cde061d4afba8469f20da85111d597cfde0c512
|
[
"MIT"
] |
permissive
|
hmqgg/PlanarAlly
|
549e73b9ad80c0fd29bb2d73f2609159b832361e
|
792664549cc1656c1e4b50b300018517c9f5ae71
|
refs/heads/master
| 2021-12-02T21:37:51.386041
| 2021-07-21T20:11:08
| 2021-07-21T20:11:08
| 200,386,014
| 0
| 0
|
MIT
| 2019-12-08T11:04:49
| 2019-08-03T14:40:05
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 3,552
|
py
|
from typing import Any, Dict
from typing_extensions import TypedDict
import auth
from api.socket.constants import GAME_NS
from app import app, sio
from models import Floor, Layer, LocationUserOption, PlayerRoom
from models.db import db
from models.role import Role
from models.user import UserOptions
from state.game import game_state
# DATA CLASSES FOR TYPE CHECKING
class LocationOptions(TypedDict):
pan_x: int
pan_y: int
zoom_display: int
zoom_factor: int
client_w: int
client_h: int
class MoveClientData(TypedDict):
player: int
data: LocationOptions
class ClientOptions(TypedDict, total=False):
grid_colour: str
fow_colour: str
ruler_colour: str
invert_alt: bool
disable_scroll_to_zoom: bool
use_high_dpi: bool
grid_size: int
use_as_physical_board: bool
mini_size: int
ppi: int
initiative_camera_lock: bool
initiative_vision_lock: bool
initiative_effect_visibility: int
@sio.on("Client.Options.Default.Set", namespace=GAME_NS)
@auth.login_required(app, sio)
async def set_client_default_options(sid: str, data: ClientOptions):
pr: PlayerRoom = game_state.get(sid)
UserOptions.update(**data).where(
UserOptions.id == pr.player.default_options
).execute()
@sio.on("Client.Options.Room.Set", namespace=GAME_NS)
@auth.login_required(app, sio)
async def set_client_room_options(sid: str, data: ClientOptions):
pr: PlayerRoom = game_state.get(sid)
with db.atomic():
if pr.user_options is None:
pr.user_options = UserOptions.create_empty()
pr.save()
UserOptions.update(**data).where(UserOptions.id == pr.user_options).execute()
async def update_client_location(
player: int, room: int, sid: str, data: LocationOptions
):
pr = PlayerRoom.get(player=player, room=room)
LocationUserOption.update(
pan_x=data["pan_x"],
pan_y=data["pan_y"],
zoom_display=data["zoom_display"],
).where(
(LocationUserOption.location == pr.active_location)
& (LocationUserOption.user == pr.player)
).execute()
if pr.role != Role.DM:
for p_sid, p_player in game_state.get_t(skip_sid=sid):
if p_player.role == Role.DM or p_player.player.id == player:
await sio.emit(
"Client.Move",
{"player": pr.player.id, **data},
room=p_sid,
namespace=GAME_NS,
)
@sio.on("Client.Options.Location.Set", namespace=GAME_NS)
@auth.login_required(app, sio)
async def set_client_location_options(sid: str, data: LocationOptions):
pr: PlayerRoom = game_state.get(sid)
await update_client_location(pr.player.id, pr.room.id, sid, data)
@sio.on("Client.Move", namespace=GAME_NS)
@auth.login_required(app, sio)
async def move_client(sid: str, data: MoveClientData):
pr: PlayerRoom = game_state.get(sid)
await update_client_location(data["player"], pr.room.id, sid, data["data"])
@sio.on("Client.ActiveLayer.Set", namespace=GAME_NS)
@auth.login_required(app, sio)
async def set_layer(sid: str, data: Dict[str, Any]):
pr: PlayerRoom = game_state.get(sid)
try:
floor = pr.active_location.floors.select().where(Floor.name == data["floor"])[0]
layer = floor.layers.select().where(Layer.name == data["layer"])[0]
except IndexError:
pass
else:
luo = LocationUserOption.get(user=pr.player, location=pr.active_location)
luo.active_layer = layer
luo.save()
|
[
"info@darragh.dev"
] |
info@darragh.dev
|
6a59c0ce22b4667f799816e05242314884347754
|
4eff2ee3697fee36b49c57f5bc0d282088e0c70a
|
/faker_integration/faker_integration/asgi.py
|
974cf9225dd8ad80e0962f112456afde72abcd59
|
[] |
no_license
|
sraktate/faker-integration
|
8651232da68c266d7e76a176222d7f0fb6568028
|
28331de37a1d1efcf9685e621d6c3fc498eb1236
|
refs/heads/main
| 2023-07-31T20:15:28.271487
| 2021-09-20T03:52:08
| 2021-09-20T03:52:08
| 408,303,152
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
"""
ASGI config for faker_integration project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'faker_integration.settings')
application = get_asgi_application()
|
[
"shubhamraktate28@gmail.com"
] |
shubhamraktate28@gmail.com
|
c23fe7ff6cb365c7caeeb3333be47d5127841a7d
|
0d86e45ba26560f6acf95632950683e428b31e34
|
/Terminal_bot/main.py
|
b1d324438f48c851f3f95d9a77a4591dbd1df592
|
[] |
no_license
|
dpk-a7/C19_chatbot
|
e2a8e4021a338a9168ca89b384f11c230d5c1688
|
67c189b7d672a512af4af00e0fe06c2169eef0bb
|
refs/heads/main
| 2023-01-27T22:56:41.433824
| 2020-12-07T10:45:30
| 2020-12-07T10:45:30
| 319,284,877
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 67
|
py
|
from bot import startbot
if __name__ == '__main__':
startbot()
|
[
"deepakavudiappan@gmail.com"
] |
deepakavudiappan@gmail.com
|
3f80bf8e6976a50cf97be01dee7d8035744100de
|
317e0e30e474dfa8ebbca4981d9b07d7ebec5f40
|
/pms5003_driver.py
|
aea226a3458e0bdba295ff5f29fa4199fde947c3
|
[] |
no_license
|
zeshi1990/pms5003
|
85f13e6ea36c320d824058fcd71648f3b661eff5
|
74fb00d6f60f587306ab17a5d57b53ec8a119006
|
refs/heads/master
| 2020-03-17T09:07:45.529165
| 2018-05-15T05:11:15
| 2018-05-15T05:11:15
| 133,462,169
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,548
|
py
|
import serial
import datetime
import struct
import time
class PMS5003(serial.Serial):
def initialize(self):
self.set_mode('passive')
def read_passive(self):
cmd = self.format_cmd("read")
self.write(cmd)
print "start parsing results"
res_data = self.parse_data()
if res_data is None:
print "Read passive failed"
return None
else:
res = self.format_data(res_data)
return res
@staticmethod
def format_data(rcv):
fields = ["apm10", "apm25", 'apm100',
'pm10', 'pm25', 'pm100',
'gt03um', 'gt05um', 'gt10um',
'gt25um', 'gt50um', 'gt100um']
res = {
'timestamp': datetime.datetime.now(),
}
for i, f in enumerate(fields):
idx_1 = i * 2 + 4
idx_2 = i * 2 + 5
res[f] = ord(rcv[idx_1]) * 256 + ord(rcv[idx_2])
return res
def parse_data(self):
rv = b''
ch1 = self.read()
if ch1 == b'\x42':
ch2 = self.read()
if ch2 == b'\x4d':
rv += ch1 + ch2
rv += self.read(28)
self.read(2)
return rv
return None
def set_mode(self, mode):
assert mode in ["passive", "active", "sleep", "wakeup"]
mode_cmd = self.format_cmd(mode)
cmd_return = self.get_cmd_correct_return(mode)
if mode != "wakeup":
sum_cmd_return = sum(cmd_return)
self.write(mode_cmd)
if mode == "wakeup":
return 0
ss = self.read(8)
res = sum([ord(c) for c in ss])
print mode, sum_cmd_return, res
if res == sum_cmd_return:
print "The {} mode is set!".format(mode)
else:
print "The {} model set failed!".format(mode)
return 0
@staticmethod
def get_cmd_correct_return(mode='passive'):
if mode == "wakeup":
return 0
e = None
no = None
chckSum = 0x42 + 0x4D + 0x00 + 0x04
if mode == "passive":
e = 0xE1
no = 0x0
elif mode == "active":
e = 0xE1
no = 0x01
elif mode == "sleep":
e = 0xE4
no = 0x0
chckSum += e + no
res = struct.pack("!BBBBBBH", 0x42, 0x4D, 0x00, 0x04, e, no, chckSum)
res = struct.unpack("!BBBBBBBB", res)
return res
def format_cmd(self, mode='passive'):
cmd = None
ON = None
chckSum = 0x42 + 0x4D
if mode == "passive":
cmd = 0xE1
ON = 0x0
elif mode == "active":
cmd = 0xE1
ON = 0x1
elif mode == "read":
cmd = 0xE2
ON = 0x0
elif mode == "sleep":
cmd = 0xE4
ON = 0x0
elif mode == "wakeup":
cmd = 0xE4
ON = 0x01
chckSum += cmd + 0x00 + ON
data = struct.pack('!BBBBBH', 0x42, 0x4D, cmd, 0x0, ON, chckSum)
cmd = struct.unpack("!BBBBBBB", data)
return cmd
def main():
sensor = PMS5003(port="/dev/tty.wchusbserial1420", baudrate=9600)
sensor.initialize()
time.sleep(2)
print "Start reading passive"
data = sensor.read_passive()
print data
time.sleep(2)
sensor.set_mode("sleep")
time.sleep(5)
sensor.set_mode("wakeup")
time.sleep(5)
data = sensor.read_passive()
print data
if __name__ == "__main__":
main()
|
[
"zeshi.z@gmail.com"
] |
zeshi.z@gmail.com
|
ffebc46c42e6f9080f406e89e48f0e5d7a17e9be
|
d5efb88e4f1e98dd877f7469c6c916d69d197a15
|
/myvenv/lib/python3.5/rlcompleter.py
|
90cd2ba2470b0c07037a874f4fa2d7d5ef35b0f6
|
[] |
no_license
|
Andreilys/djangoblog
|
6ace871d1d1edfaa8268458a9104664ef8721ef5
|
a5317a98bb60ce7171277b8cbc1d10df6e5a17e6
|
refs/heads/master
| 2021-01-21T13:30:09.956455
| 2018-08-15T04:38:57
| 2018-08-15T04:38:57
| 102,130,652
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 57
|
py
|
/Users/andreilyskov/anaconda/lib/python3.5/rlcompleter.py
|
[
"andreilyskov@gmail.com"
] |
andreilyskov@gmail.com
|
9233c30251d8a8a6cef4d471c67503966ea441c6
|
3004b651cd555592ed36f72920552d4caba57dff
|
/cart/cart.py
|
ff1a760d0c8f069d5f67543631480507fdfab5d0
|
[] |
no_license
|
Zakhar-Tkachenko/django-shop
|
6291c446785c02e7906692d60011b09d1c9883fb
|
09ee4431857836f2a88337df78fd87591b394fa9
|
refs/heads/master
| 2022-08-12T06:16:11.946018
| 2020-05-21T17:05:03
| 2020-05-21T17:05:03
| 262,155,309
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,710
|
py
|
from decimal import Decimal
from django.conf import settings
from shop.models import Product
class Cart(object):
def __init__(self, request):
self.session = request.session
cart = self.session.get(settings.CART_SESSION_ID)
if not cart:
cart = self.session[settings.CART_SESSION_ID] = {}
self.cart = cart
def add(self, product, quantity=1, update_quantity=False):
product_id = str(product.id)
if product_id not in self.cart:
self.cart[product_id] = {'quantity': 0, 'price': str(product.price)}
if update_quantity:
self.cart[product_id]['quantity'] = quantity
else:
self.cart[product_id]['quantity'] += quantity
self.save()
def save(self):
self.session.modified = True
def remove(self, product):
product_id = str(product.id)
if product_id in self.cart:
del self.cart[product_id]
self.save()
def __iter__(self):
product_ids = self.cart.keys()
products = Product.objects.filter(id__in=product_ids)
cart = self.cart.copy()
for product in products:
cart[str(product.id)]['product'] = product
for item in cart.values():
item['price'] = Decimal(item['price'])
item['total_price'] = item['price'] * item['quantity']
yield item
def __len__(self):
return sum(item['quantity'] for item in self.cart.values())
def get_total_price(self):
return sum(Decimal(item['price']) * item['quantity'] for item in self.cart.values())
def clear(self):
del self.session[settings.CART_SESSION_ID]
self.save()
|
[
"z.tkachenko@MacBook-Pro-Tkacenko.local"
] |
z.tkachenko@MacBook-Pro-Tkacenko.local
|
6f936a69972bf5a561b2d8839fd0af036ded10b3
|
910917653b3f76f842de09a0f0c14649696dcb1a
|
/standart-module/use_logging.py
|
6a9c412cc866394352bb03886cf35759b633c21b
|
[] |
no_license
|
osmanseytablaev/Learn-Python
|
e3707950d85200d07e2c06206584cf32318bbbb8
|
6be726a181cf3f6104b32975605a3e70b95dc69e
|
refs/heads/master
| 2021-05-17T23:09:40.573940
| 2020-06-18T17:45:33
| 2020-06-18T17:45:33
| 250,994,929
| 2
| 0
| null | 2020-06-18T17:45:35
| 2020-03-29T09:23:27
|
Python
|
UTF-8
|
Python
| false
| false
| 658
|
py
|
import os, platform, logging
if platform.platform().startswith('Windows'):
logging_file = os.path.join(os.getenv('HOMEDRIVE'), \
os.getenv('HOMEPATH'), \
'test.log')
else:
logging_file = os.path.join(os.getenv('HOME'), 'test.log')
print("Сохраняем лог в", logging_file)
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s : %(levelname)s : %(message)s',
filename = logging_file,
filemode = 'w',
)
logging.debug("Начало программы")
logging.info("Какие-то действия")
logging.warning("Программа умирает")
|
[
"osman.seytablaev@gmail.com"
] |
osman.seytablaev@gmail.com
|
1194695507201d2a283a4a0ce5059a1b9d613bc2
|
e322d01555aebbcf9f23a68fa9160e75d4397969
|
/YouCompleteMe/third_party/ycmd/third_party/JediHTTP/vendor/bottle/test/test_environ.py
|
2b8079b27d5c3e8fca4440ccf4e403e5b03a15cf
|
[
"Apache-2.0",
"MIT",
"GPL-1.0-or-later",
"GPL-3.0-only"
] |
permissive
|
liqiang0330/i3ForDebian9
|
3b2bb5ce104f25cadab7a57cdc7096fadeb4a9ef
|
37a63bdaf18dab847e57d328cdcb678668ab6207
|
refs/heads/master
| 2022-10-25T13:30:26.723690
| 2018-03-17T05:22:55
| 2018-03-17T05:22:55
| 162,018,419
| 1
| 1
|
Apache-2.0
| 2022-10-08T20:30:23
| 2018-12-16T16:11:57
|
Python
|
UTF-8
|
Python
| false
| false
| 34,702
|
py
|
# -*- coding: utf-8 -*-
''' Tests for the BaseRequest and BaseResponse objects and their subclasses. '''
import unittest
import sys
import bottle
from bottle import request, tob, touni, tonat, json_dumps, _e, HTTPError, parse_date
import tools
import wsgiref.util
import base64
from bottle import BaseRequest, BaseResponse, LocalRequest
class TestRequest(unittest.TestCase):
def test_app_property(self):
e = {}
r = BaseRequest(e)
self.assertRaises(RuntimeError, lambda: r.app)
e.update({'bottle.app': 5})
self.assertEqual(r.app, 5)
def test_route_property(self):
e = {'bottle.route': 5}
r = BaseRequest(e)
self.assertEqual(r.route, 5)
def test_url_for_property(self):
e = {}
r = BaseRequest(e)
self.assertRaises(RuntimeError, lambda: r.url_args)
e.update({'route.url_args': {'a': 5}})
self.assertEqual(r.url_args, {'a': 5})
def test_path(self):
""" PATH_INFO normalization. """
# Legal paths
tests = [('', '/'), ('x','/x'), ('x/', '/x/'), ('/x', '/x'), ('/x/', '/x/')]
for raw, norm in tests:
self.assertEqual(norm, BaseRequest({'PATH_INFO': raw}).path)
# Strange paths
tests = [('///', '/'), ('//x','/x')]
for raw, norm in tests:
self.assertEqual(norm, BaseRequest({'PATH_INFO': raw}).path)
# No path at all
self.assertEqual('/', BaseRequest({}).path)
def test_method(self):
self.assertEqual(BaseRequest({}).method, 'GET')
self.assertEqual(BaseRequest({'REQUEST_METHOD':'GET'}).method, 'GET')
self.assertEqual(BaseRequest({'REQUEST_METHOD':'GeT'}).method, 'GET')
self.assertEqual(BaseRequest({'REQUEST_METHOD':'get'}).method, 'GET')
self.assertEqual(BaseRequest({'REQUEST_METHOD':'POst'}).method, 'POST')
self.assertEqual(BaseRequest({'REQUEST_METHOD':'FanTASY'}).method, 'FANTASY')
def test_script_name(self):
""" SCRIPT_NAME normalization. """
# Legal paths
tests = [('', '/'), ('x','/x/'), ('x/', '/x/'), ('/x', '/x/'), ('/x/', '/x/')]
for raw, norm in tests:
self.assertEqual(norm, BaseRequest({'SCRIPT_NAME': raw}).script_name)
# Strange paths
tests = [('///', '/'), ('///x///','/x/')]
for raw, norm in tests:
self.assertEqual(norm, BaseRequest({'SCRIPT_NAME': raw}).script_name)
# No path at all
self.assertEqual('/', BaseRequest({}).script_name)
def test_pathshift(self):
""" Request.path_shift() """
def test_shift(s, p, c):
request = BaseRequest({'SCRIPT_NAME': s, 'PATH_INFO': p})
request.path_shift(c)
return [request['SCRIPT_NAME'], request.path]
self.assertEqual(['/a/b', '/c/d'], test_shift('/a/b', '/c/d', 0))
self.assertEqual(['/a/b', '/c/d/'], test_shift('/a/b', '/c/d/', 0))
self.assertEqual(['/a/b/c', '/d'], test_shift('/a/b', '/c/d', 1))
self.assertEqual(['/a', '/b/c/d'], test_shift('/a/b', '/c/d', -1))
self.assertEqual(['/a/b/c', '/d/'], test_shift('/a/b', '/c/d/', 1))
self.assertEqual(['/a', '/b/c/d/'], test_shift('/a/b', '/c/d/', -1))
self.assertEqual(['/a/b/c', '/d/'], test_shift('/a/b/', '/c/d/', 1))
self.assertEqual(['/a', '/b/c/d/'], test_shift('/a/b/', '/c/d/', -1))
self.assertEqual(['/a/b/c/d', '/'], test_shift('/', '/a/b/c/d', 4))
self.assertEqual(['/', '/a/b/c/d/'], test_shift('/a/b/c/d', '/', -4))
self.assertRaises(AssertionError, test_shift, '/a/b', '/c/d', 3)
self.assertRaises(AssertionError, test_shift, '/a/b', '/c/d', -3)
def test_url(self):
""" Environ: URL building """
request = BaseRequest({'HTTP_HOST':'example.com'})
self.assertEqual('http://example.com/', request.url)
request = BaseRequest({'SERVER_NAME':'example.com'})
self.assertEqual('http://example.com/', request.url)
request = BaseRequest({'SERVER_NAME':'example.com', 'SERVER_PORT':'81'})
self.assertEqual('http://example.com:81/', request.url)
request = BaseRequest({'wsgi.url_scheme':'https', 'SERVER_NAME':'example.com'})
self.assertEqual('https://example.com/', request.url)
request = BaseRequest({'HTTP_HOST':'example.com', 'PATH_INFO':'/path',
'QUERY_STRING':'1=b&c=d', 'SCRIPT_NAME':'/sp'})
self.assertEqual('http://example.com/sp/path?1=b&c=d', request.url)
request = BaseRequest({'HTTP_HOST':'example.com', 'PATH_INFO':'/pa th',
'SCRIPT_NAME':'/s p'})
self.assertEqual('http://example.com/s%20p/pa%20th', request.url)
def test_dict_access(self):
""" Environ: request objects are environment dicts """
e = {}
wsgiref.util.setup_testing_defaults(e)
request = BaseRequest(e)
self.assertEqual(list(request), list(e.keys()))
self.assertEqual(len(request), len(e))
for k, v in e.items():
self.assertTrue(k in request)
self.assertEqual(request[k], v)
request[k] = 'test'
self.assertEqual(request[k], 'test')
del request['PATH_INFO']
self.assertTrue('PATH_INFO' not in request)
def test_readonly_environ(self):
request = BaseRequest({'bottle.request.readonly':True})
def test(): request['x']='y'
self.assertRaises(KeyError, test)
def test_header_access(self):
""" Environ: Request objects decode headers """
e = {}
wsgiref.util.setup_testing_defaults(e)
e['HTTP_SOME_HEADER'] = 'some value'
request = BaseRequest(e)
request['HTTP_SOME_OTHER_HEADER'] = 'some other value'
self.assertTrue('Some-Header' in request.headers)
self.assertTrue(request.headers['Some-Header'] == 'some value')
self.assertTrue(request.headers['Some-Other-Header'] == 'some other value')
def test_header_access_special(self):
e = {}
wsgiref.util.setup_testing_defaults(e)
request = BaseRequest(e)
request['CONTENT_TYPE'] = 'test'
request['CONTENT_LENGTH'] = '123'
self.assertEqual(request.headers['Content-Type'], 'test')
self.assertEqual(request.headers['Content-Length'], '123')
def test_cookie_dict(self):
""" Environ: Cookie dict """
t = dict()
t['a=a'] = {'a': 'a'}
t['a=a; b=b'] = {'a': 'a', 'b':'b'}
t['a=a; a=b'] = {'a': 'b'}
for k, v in t.items():
request = BaseRequest({'HTTP_COOKIE': k})
for n in v:
self.assertEqual(v[n], request.cookies[n])
self.assertEqual(v[n], request.get_cookie(n))
def test_get(self):
""" Environ: GET data """
qs = tonat(tob('a=a&a=1&b=b&c=c&cn=%e7%93%b6'), 'latin1')
request = BaseRequest({'QUERY_STRING':qs})
self.assertTrue('a' in request.query)
self.assertTrue('b' in request.query)
self.assertEqual(['a','1'], request.query.getall('a'))
self.assertEqual(['b'], request.query.getall('b'))
self.assertEqual('1', request.query['a'])
self.assertEqual('b', request.query['b'])
self.assertEqual(tonat(tob('瓶'), 'latin1'), request.query['cn'])
self.assertEqual(touni('瓶'), request.query.cn)
def test_post(self):
""" Environ: POST data """
sq = tob('a=a&a=1&b=b&c=&d&cn=%e7%93%b6')
e = {}
wsgiref.util.setup_testing_defaults(e)
e['wsgi.input'].write(sq)
e['wsgi.input'].seek(0)
e['CONTENT_LENGTH'] = str(len(sq))
e['REQUEST_METHOD'] = "POST"
request = BaseRequest(e)
self.assertTrue('a' in request.POST)
self.assertTrue('b' in request.POST)
self.assertEqual(['a','1'], request.POST.getall('a'))
self.assertEqual(['b'], request.POST.getall('b'))
self.assertEqual('1', request.POST['a'])
self.assertEqual('b', request.POST['b'])
self.assertEqual('', request.POST['c'])
self.assertEqual('', request.POST['d'])
self.assertEqual(tonat(tob('瓶'), 'latin1'), request.POST['cn'])
self.assertEqual(touni('瓶'), request.POST.cn)
def test_bodypost(self):
sq = tob('foobar')
e = {}
wsgiref.util.setup_testing_defaults(e)
e['wsgi.input'].write(sq)
e['wsgi.input'].seek(0)
e['CONTENT_LENGTH'] = str(len(sq))
e['REQUEST_METHOD'] = "POST"
request = BaseRequest(e)
self.assertEqual('', request.POST['foobar'])
def test_body_noclose(self):
""" Test that the body file handler is not closed after request.POST """
sq = tob('a=a&a=1&b=b&c=&d')
e = {}
wsgiref.util.setup_testing_defaults(e)
e['wsgi.input'].write(sq)
e['wsgi.input'].seek(0)
e['CONTENT_LENGTH'] = str(len(sq))
e['REQUEST_METHOD'] = "POST"
request = BaseRequest(e)
self.assertEqual(sq, request.body.read())
request.POST # This caused a body.close() with Python 3.x
self.assertEqual(sq, request.body.read())
def test_params(self):
""" Environ: GET and POST are combined in request.param """
e = {}
wsgiref.util.setup_testing_defaults(e)
e['wsgi.input'].write(tob('b=b&c=p'))
e['wsgi.input'].seek(0)
e['CONTENT_LENGTH'] = '7'
e['QUERY_STRING'] = 'a=a&c=g'
e['REQUEST_METHOD'] = "POST"
request = BaseRequest(e)
self.assertEqual(['a','b','c'], sorted(request.params.keys()))
self.assertEqual('p', request.params['c'])
def test_getpostleak(self):
""" Environ: GET and POST should not leak into each other """
e = {}
wsgiref.util.setup_testing_defaults(e)
e['wsgi.input'].write(tob('b=b'))
e['wsgi.input'].seek(0)
e['CONTENT_LENGTH'] = '3'
e['QUERY_STRING'] = 'a=a'
e['REQUEST_METHOD'] = "POST"
request = BaseRequest(e)
self.assertEqual(['a'], list(request.GET.keys()))
self.assertEqual(['b'], list(request.POST.keys()))
def test_body(self):
""" Environ: Request.body should behave like a file object factory """
e = {}
wsgiref.util.setup_testing_defaults(e)
e['wsgi.input'].write(tob('abc'))
e['wsgi.input'].seek(0)
e['CONTENT_LENGTH'] = str(3)
request = BaseRequest(e)
self.assertEqual(tob('abc'), request.body.read())
self.assertEqual(tob('abc'), request.body.read(3))
self.assertEqual(tob('abc'), request.body.readline())
self.assertEqual(tob('abc'), request.body.readline(3))
def test_bigbody(self):
""" Environ: Request.body should handle big uploads using files """
e = {}
wsgiref.util.setup_testing_defaults(e)
e['wsgi.input'].write(tob('x')*1024*1000)
e['wsgi.input'].seek(0)
e['CONTENT_LENGTH'] = str(1024*1000)
request = BaseRequest(e)
self.assertTrue(hasattr(request.body, 'fileno'))
self.assertEqual(1024*1000, len(request.body.read()))
self.assertEqual(1024, len(request.body.read(1024)))
self.assertEqual(1024*1000, len(request.body.readline()))
self.assertEqual(1024, len(request.body.readline(1024)))
def test_tobigbody(self):
""" Environ: Request.body should truncate to Content-Length bytes """
e = {}
wsgiref.util.setup_testing_defaults(e)
e['wsgi.input'].write(tob('x')*1024)
e['wsgi.input'].seek(0)
e['CONTENT_LENGTH'] = '42'
request = BaseRequest(e)
self.assertEqual(42, len(request.body.read()))
self.assertEqual(42, len(request.body.read(1024)))
self.assertEqual(42, len(request.body.readline()))
self.assertEqual(42, len(request.body.readline(1024)))
def _test_chunked(self, body, expect):
e = {}
wsgiref.util.setup_testing_defaults(e)
e['wsgi.input'].write(tob(body))
e['wsgi.input'].seek(0)
e['HTTP_TRANSFER_ENCODING'] = 'chunked'
if isinstance(expect, str):
self.assertEquals(tob(expect), BaseRequest(e).body.read())
else:
self.assertRaises(expect, lambda: BaseRequest(e).body)
def test_chunked(self):
self._test_chunked('1\r\nx\r\nff\r\n' + 'y'*255 + '\r\n0\r\n',
'x' + 'y'*255)
self._test_chunked('8\r\nxxxxxxxx\r\n0\r\n','xxxxxxxx')
self._test_chunked('0\r\n', '')
def test_chunked_meta_fields(self):
self._test_chunked('8 ; foo\r\nxxxxxxxx\r\n0\r\n','xxxxxxxx')
self._test_chunked('8;foo\r\nxxxxxxxx\r\n0\r\n','xxxxxxxx')
self._test_chunked('8;foo=bar\r\nxxxxxxxx\r\n0\r\n','xxxxxxxx')
def test_chunked_not_terminated(self):
self._test_chunked('1\r\nx\r\n', HTTPError)
def test_chunked_wrong_size(self):
self._test_chunked('2\r\nx\r\n', HTTPError)
def test_chunked_illegal_size(self):
self._test_chunked('x\r\nx\r\n', HTTPError)
def test_chunked_not_chunked_at_all(self):
self._test_chunked('abcdef', HTTPError)
def test_multipart(self):
""" Environ: POST (multipart files and multible values per key) """
fields = [('field1','value1'), ('field2','value2'), ('field2','value3')]
files = [('file1','filename1.txt','content1'), ('万难','万难foo.py', 'ä\nö\rü')]
e = tools.multipart_environ(fields=fields, files=files)
request = BaseRequest(e)
# File content
self.assertTrue('file1' in request.POST)
self.assertTrue('file1' in request.files)
self.assertTrue('file1' not in request.forms)
cmp = tob('content1') if sys.version_info >= (3,2,0) else 'content1'
self.assertEqual(cmp, request.POST['file1'].file.read())
# File name and meta data
self.assertTrue('万难' in request.POST)
self.assertTrue('万难' in request.files)
self.assertTrue('万难' not in request.forms)
self.assertEqual('foo.py', request.POST['万难'].filename)
self.assertTrue(request.files['万难'])
self.assertFalse(request.files.file77)
# UTF-8 files
x = request.POST['万难'].file.read()
if (3,2,0) > sys.version_info >= (3,0,0):
x = x.encode('utf8')
self.assertEqual(tob('ä\nö\rü'), x)
# No file
self.assertTrue('file3' not in request.POST)
self.assertTrue('file3' not in request.files)
self.assertTrue('file3' not in request.forms)
# Field (single)
self.assertEqual('value1', request.POST['field1'])
self.assertTrue('field1' not in request.files)
self.assertEqual('value1', request.forms['field1'])
# Field (multi)
self.assertEqual(2, len(request.POST.getall('field2')))
self.assertEqual(['value2', 'value3'], request.POST.getall('field2'))
self.assertEqual(['value2', 'value3'], request.forms.getall('field2'))
self.assertTrue('field2' not in request.files)
def test_json_empty(self):
""" Environ: Request.json property with empty body. """
self.assertEqual(BaseRequest({}).json, None)
def test_json_noheader(self):
""" Environ: Request.json property with missing content-type header. """
test = dict(a=5, b='test', c=[1,2,3])
e = {}
wsgiref.util.setup_testing_defaults(e)
e['wsgi.input'].write(tob(json_dumps(test)))
e['wsgi.input'].seek(0)
e['CONTENT_LENGTH'] = str(len(json_dumps(test)))
self.assertEqual(BaseRequest(e).json, None)
def test_json_tobig(self):
""" Environ: Request.json property with huge body. """
test = dict(a=5, tobig='x' * bottle.BaseRequest.MEMFILE_MAX)
e = {'CONTENT_TYPE': 'application/json'}
wsgiref.util.setup_testing_defaults(e)
e['wsgi.input'].write(tob(json_dumps(test)))
e['wsgi.input'].seek(0)
e['CONTENT_LENGTH'] = str(len(json_dumps(test)))
self.assertRaises(HTTPError, lambda: BaseRequest(e).json)
def test_json_valid(self):
""" Environ: Request.json property. """
test = dict(a=5, b='test', c=[1,2,3])
e = {'CONTENT_TYPE': 'application/json; charset=UTF-8'}
wsgiref.util.setup_testing_defaults(e)
e['wsgi.input'].write(tob(json_dumps(test)))
e['wsgi.input'].seek(0)
e['CONTENT_LENGTH'] = str(len(json_dumps(test)))
self.assertEqual(BaseRequest(e).json, test)
def test_json_forged_header_issue616(self):
test = dict(a=5, b='test', c=[1,2,3])
e = {'CONTENT_TYPE': 'text/plain;application/json'}
wsgiref.util.setup_testing_defaults(e)
e['wsgi.input'].write(tob(json_dumps(test)))
e['wsgi.input'].seek(0)
e['CONTENT_LENGTH'] = str(len(json_dumps(test)))
self.assertEqual(BaseRequest(e).json, None)
def test_json_header_empty_body(self):
"""Request Content-Type is application/json but body is empty"""
e = {'CONTENT_TYPE': 'application/json'}
wsgiref.util.setup_testing_defaults(e)
wsgiref.util.setup_testing_defaults(e)
e['CONTENT_LENGTH'] = "0"
self.assertEqual(BaseRequest(e).json, None)
def test_isajax(self):
e = {}
wsgiref.util.setup_testing_defaults(e)
self.assertFalse(BaseRequest(e.copy()).is_ajax)
e['HTTP_X_REQUESTED_WITH'] = 'XMLHttpRequest'
self.assertTrue(BaseRequest(e.copy()).is_ajax)
def test_auth(self):
user, pwd = 'marc', 'secret'
basic = touni(base64.b64encode(tob('%s:%s' % (user, pwd))))
r = BaseRequest({})
self.assertEqual(r.auth, None)
r.environ['HTTP_AUTHORIZATION'] = 'basic %s' % basic
self.assertEqual(r.auth, (user, pwd))
r.environ['REMOTE_USER'] = user
self.assertEqual(r.auth, (user, pwd))
del r.environ['HTTP_AUTHORIZATION']
self.assertEqual(r.auth, (user, None))
def test_remote_route(self):
ips = ['1.2.3.4', '2.3.4.5', '3.4.5.6']
r = BaseRequest({})
self.assertEqual(r.remote_route, [])
r.environ['HTTP_X_FORWARDED_FOR'] = ', '.join(ips)
self.assertEqual(r.remote_route, ips)
r.environ['REMOTE_ADDR'] = ips[1]
self.assertEqual(r.remote_route, ips)
del r.environ['HTTP_X_FORWARDED_FOR']
self.assertEqual(r.remote_route, [ips[1]])
def test_remote_addr(self):
ips = ['1.2.3.4', '2.3.4.5', '3.4.5.6']
r = BaseRequest({})
self.assertEqual(r.remote_addr, None)
r.environ['HTTP_X_FORWARDED_FOR'] = ', '.join(ips)
self.assertEqual(r.remote_addr, ips[0])
r.environ['REMOTE_ADDR'] = ips[1]
self.assertEqual(r.remote_addr, ips[0])
del r.environ['HTTP_X_FORWARDED_FOR']
self.assertEqual(r.remote_addr, ips[1])
def test_user_defined_attributes(self):
for cls in (BaseRequest, LocalRequest):
r = cls()
# New attributes go to the environ dict.
r.foo = 'somevalue'
self.assertEqual(r.foo, 'somevalue')
self.assertTrue('somevalue' in r.environ.values())
# Unknown attributes raise AttributeError.
self.assertRaises(AttributeError, getattr, r, 'somevalue')
class TestResponse(unittest.TestCase):
def test_constructor_body(self):
self.assertEqual('',
BaseResponse('').body)
self.assertEqual('YAY',
BaseResponse('YAY').body)
def test_constructor_status(self):
self.assertEqual(200,
BaseResponse('YAY', 200).status_code)
self.assertEqual('200 OK',
BaseResponse('YAY', 200).status_line)
self.assertEqual('200 YAY',
BaseResponse('YAY', '200 YAY').status_line)
self.assertEqual('200 YAY',
BaseResponse('YAY', '200 YAY').status_line)
def test_constructor_headerlist(self):
from functools import partial
make_res = partial(BaseResponse, '', 200)
self.assertTrue('yay',
make_res([('x-test','yay')])['x-test'])
def test_constructor_headerlist(self):
from functools import partial
make_res = partial(BaseResponse, '', 200)
self.assertTrue('yay', make_res(x_test='yay')['x-test'])
def test_set_status(self):
rs = BaseResponse()
rs.status = 200
self.assertEqual(rs.status, rs.status_line)
self.assertEqual(rs.status_code, 200)
self.assertEqual(rs.status_line, '200 OK')
rs.status = 999
self.assertEqual(rs.status, rs.status_line)
self.assertEqual(rs.status_code, 999)
self.assertEqual(rs.status_line, '999 Unknown')
rs.status = 404
self.assertEqual(rs.status, rs.status_line)
self.assertEqual(rs.status_code, 404)
self.assertEqual(rs.status_line, '404 Not Found')
def test(): rs.status = -200
self.assertRaises(ValueError, test)
self.assertEqual(rs.status, rs.status_line) # last value
self.assertEqual(rs.status_code, 404) # last value
self.assertEqual(rs.status_line, '404 Not Found') # last value
def test(): rs.status = 5
self.assertRaises(ValueError, test)
self.assertEqual(rs.status, rs.status_line) # last value
self.assertEqual(rs.status_code, 404) # last value
self.assertEqual(rs.status_line, '404 Not Found') # last value
rs.status = '999 Who knows?' # Illegal, but acceptable three digit code
self.assertEqual(rs.status, rs.status_line)
self.assertEqual(rs.status_code, 999)
self.assertEqual(rs.status_line, '999 Who knows?')
rs.status = 555 # Strange code
self.assertEqual(rs.status, rs.status_line)
self.assertEqual(rs.status_code, 555)
self.assertEqual(rs.status_line, '555 Unknown')
rs.status = '404 Brain not Found' # Custom reason
self.assertEqual(rs.status, rs.status_line)
self.assertEqual(rs.status_code, 404)
self.assertEqual(rs.status_line, '404 Brain not Found')
def test(): rs.status = '5 Illegal Code'
self.assertRaises(ValueError, test)
self.assertEqual(rs.status, rs.status_line) # last value
self.assertEqual(rs.status_code, 404) # last value
self.assertEqual(rs.status_line, '404 Brain not Found') # last value
def test(): rs.status = '-99 Illegal Code'
self.assertRaises(ValueError, test)
self.assertEqual(rs.status, rs.status_line) # last value
self.assertEqual(rs.status_code, 404) # last value
self.assertEqual(rs.status_line, '404 Brain not Found') # last value
def test(): rs.status = '1000 Illegal Code'
self.assertRaises(ValueError, test)
self.assertEqual(rs.status, rs.status_line) # last value
self.assertEqual(rs.status_code, 404) # last value
self.assertEqual(rs.status_line, '404 Brain not Found') # last value
def test(): rs.status = '555' # No reason
self.assertRaises(ValueError, test)
self.assertEqual(rs.status, rs.status_line) # last value
self.assertEqual(rs.status_code, 404) # last value
self.assertEqual(rs.status_line, '404 Brain not Found') # last value
def test_content_type(self):
rs = BaseResponse()
rs.content_type = 'test/some'
self.assertEquals('test/some', rs.headers.get('Content-Type'))
def test_charset(self):
rs = BaseResponse()
self.assertEqual(rs.charset, 'UTF-8')
rs.content_type = 'text/html; charset=latin9'
self.assertEqual(rs.charset, 'latin9')
rs.content_type = 'text/html'
self.assertEqual(rs.charset, 'UTF-8')
def test_set_cookie(self):
r = BaseResponse()
r.set_cookie('name1', 'value', max_age=5)
r.set_cookie('name2', 'value 2', path='/foo')
cookies = [value for name, value in r.headerlist
if name.title() == 'Set-Cookie']
cookies.sort()
self.assertEqual(cookies[0], 'name1=value; Max-Age=5')
self.assertEqual(cookies[1], 'name2="value 2"; Path=/foo')
def test_set_cookie_maxage(self):
import datetime
r = BaseResponse()
r.set_cookie('name1', 'value', max_age=5)
r.set_cookie('name2', 'value', max_age=datetime.timedelta(days=1))
cookies = sorted([value for name, value in r.headerlist
if name.title() == 'Set-Cookie'])
self.assertEqual(cookies[0], 'name1=value; Max-Age=5')
self.assertEqual(cookies[1], 'name2=value; Max-Age=86400')
def test_set_cookie_expires(self):
import datetime
r = BaseResponse()
r.set_cookie('name1', 'value', expires=42)
r.set_cookie('name2', 'value', expires=datetime.datetime(1970,1,1,0,0,43))
cookies = sorted([value for name, value in r.headerlist
if name.title() == 'Set-Cookie'])
self.assertEqual(cookies[0], 'name1=value; expires=Thu, 01 Jan 1970 00:00:42 GMT')
self.assertEqual(cookies[1], 'name2=value; expires=Thu, 01 Jan 1970 00:00:43 GMT')
def test_delete_cookie(self):
response = BaseResponse()
response.set_cookie('name', 'value')
response.delete_cookie('name')
cookies = [value for name, value in response.headerlist
if name.title() == 'Set-Cookie']
self.assertTrue('name=;' in cookies[0])
def test_set_header(self):
response = BaseResponse()
response['x-test'] = 'foo'
headers = [value for name, value in response.headerlist
if name.title() == 'X-Test']
self.assertEqual(['foo'], headers)
self.assertEqual('foo', response['x-test'])
response['X-Test'] = 'bar'
headers = [value for name, value in response.headerlist
if name.title() == 'X-Test']
self.assertEqual(['bar'], headers)
self.assertEqual('bar', response['x-test'])
def test_append_header(self):
response = BaseResponse()
response.set_header('x-test', 'foo')
headers = [value for name, value in response.headerlist
if name.title() == 'X-Test']
self.assertEqual(['foo'], headers)
self.assertEqual('foo', response['x-test'])
response.add_header('X-Test', 'bar')
headers = [value for name, value in response.headerlist
if name.title() == 'X-Test']
self.assertEqual(['foo', 'bar'], headers)
self.assertEqual('bar', response['x-test'])
def test_delete_header(self):
response = BaseResponse()
response['x-test'] = 'foo'
self.assertEqual('foo', response['x-test'])
del response['X-tESt']
self.assertRaises(KeyError, lambda: response['x-test'])
def test_non_string_header(self):
response = BaseResponse()
response['x-test'] = 5
self.assertEqual('5', response['x-test'])
response['x-test'] = None
self.assertEqual('None', response['x-test'])
def test_expires_header(self):
import datetime
response = BaseResponse()
now = datetime.datetime.now()
response.expires = now
def seconds(a, b):
td = max(a,b) - min(a,b)
return td.days*360*24 + td.seconds
self.assertEqual(0, seconds(response.expires, now))
now2 = datetime.datetime.utcfromtimestamp(
parse_date(response.headers['Expires']))
self.assertEqual(0, seconds(now, now2))
class TestRedirect(unittest.TestCase):
def assertRedirect(self, target, result, query=None, status=303, **args):
env = {'SERVER_PROTOCOL':'HTTP/1.1'}
for key in list(args):
if key.startswith('wsgi'):
args[key.replace('_', '.', 1)] = args[key]
del args[key]
env.update(args)
request.bind(env)
bottle.response.bind()
try:
bottle.redirect(target, **(query or {}))
except bottle.HTTPResponse:
r = _e()
self.assertEqual(status, r.status_code)
self.assertTrue(r.headers)
self.assertEqual(result, r.headers['Location'])
def test_absolute_path(self):
self.assertRedirect('/', 'http://127.0.0.1/')
self.assertRedirect('/test.html', 'http://127.0.0.1/test.html')
self.assertRedirect('/test.html', 'http://127.0.0.1/test.html',
PATH_INFO='/some/sub/path/')
self.assertRedirect('/test.html', 'http://127.0.0.1/test.html',
PATH_INFO='/some/sub/file.html')
self.assertRedirect('/test.html', 'http://127.0.0.1/test.html',
SCRIPT_NAME='/some/sub/path/')
self.assertRedirect('/foo/test.html', 'http://127.0.0.1/foo/test.html')
self.assertRedirect('/foo/test.html', 'http://127.0.0.1/foo/test.html',
PATH_INFO='/some/sub/file.html')
def test_relative_path(self):
self.assertRedirect('./', 'http://127.0.0.1/')
self.assertRedirect('./test.html', 'http://127.0.0.1/test.html')
self.assertRedirect('./test.html', 'http://127.0.0.1/foo/test.html',
PATH_INFO='/foo/')
self.assertRedirect('./test.html', 'http://127.0.0.1/foo/test.html',
PATH_INFO='/foo/bar.html')
self.assertRedirect('./test.html', 'http://127.0.0.1/foo/test.html',
SCRIPT_NAME='/foo/')
self.assertRedirect('./test.html', 'http://127.0.0.1/foo/bar/test.html',
SCRIPT_NAME='/foo/', PATH_INFO='/bar/baz.html')
self.assertRedirect('./foo/test.html', 'http://127.0.0.1/foo/test.html')
self.assertRedirect('./foo/test.html', 'http://127.0.0.1/bar/foo/test.html',
PATH_INFO='/bar/file.html')
self.assertRedirect('../test.html', 'http://127.0.0.1/test.html',
PATH_INFO='/foo/')
self.assertRedirect('../test.html', 'http://127.0.0.1/foo/test.html',
PATH_INFO='/foo/bar/')
self.assertRedirect('../test.html', 'http://127.0.0.1/test.html',
PATH_INFO='/foo/bar.html')
self.assertRedirect('../test.html', 'http://127.0.0.1/test.html',
SCRIPT_NAME='/foo/')
self.assertRedirect('../test.html', 'http://127.0.0.1/foo/test.html',
SCRIPT_NAME='/foo/', PATH_INFO='/bar/baz.html')
self.assertRedirect('../baz/../test.html', 'http://127.0.0.1/foo/test.html',
PATH_INFO='/foo/bar/')
def test_sheme(self):
self.assertRedirect('./test.html', 'https://127.0.0.1/test.html',
wsgi_url_scheme='https')
self.assertRedirect('./test.html', 'https://127.0.0.1:80/test.html',
wsgi_url_scheme='https', SERVER_PORT='80')
def test_host_http_1_0(self):
# No HTTP_HOST, just SERVER_NAME and SERVER_PORT.
self.assertRedirect('./test.html', 'http://example.com/test.html',
SERVER_NAME='example.com',
SERVER_PROTOCOL='HTTP/1.0', status=302)
self.assertRedirect('./test.html', 'http://127.0.0.1:81/test.html',
SERVER_PORT='81',
SERVER_PROTOCOL='HTTP/1.0', status=302)
def test_host_http_1_1(self):
self.assertRedirect('./test.html', 'http://example.com/test.html',
HTTP_HOST='example.com')
self.assertRedirect('./test.html', 'http://example.com:81/test.html',
HTTP_HOST='example.com:81')
# Trust HTTP_HOST over SERVER_NAME and PORT.
self.assertRedirect('./test.html', 'http://example.com:81/test.html',
HTTP_HOST='example.com:81', SERVER_NAME='foobar')
self.assertRedirect('./test.html', 'http://example.com:81/test.html',
HTTP_HOST='example.com:81', SERVER_PORT='80')
def test_host_http_proxy(self):
# Trust proxy headers over original header.
self.assertRedirect('./test.html', 'http://example.com/test.html',
HTTP_X_FORWARDED_HOST='example.com',
HTTP_HOST='127.0.0.1')
def test_specialchars(self):
''' The target URL is not quoted automatically. '''
self.assertRedirect('./te st.html',
'http://example.com/a%20a/b%20b/te st.html',
HTTP_HOST='example.com', SCRIPT_NAME='/a a/', PATH_INFO='/b b/')
def test_redirect_preserve_cookies(self):
env = {'SERVER_PROTOCOL':'HTTP/1.1'}
request.bind(env)
bottle.response.bind()
try:
bottle.response.set_cookie('xxx', 'yyy')
bottle.redirect('...')
except bottle.HTTPResponse:
h = [v for (k, v) in _e().headerlist if k == 'Set-Cookie']
self.assertEqual(h, ['xxx=yyy'])
class TestWSGIHeaderDict(unittest.TestCase):
def setUp(self):
self.env = {}
self.headers = bottle.WSGIHeaderDict(self.env)
def test_empty(self):
self.assertEqual(0, len(bottle.WSGIHeaderDict({})))
def test_native(self):
self.env['HTTP_TEST_HEADER'] = 'foobar'
self.assertEqual(self.headers['Test-header'], 'foobar')
def test_bytes(self):
self.env['HTTP_TEST_HEADER'] = tob('foobar')
self.assertEqual(self.headers['Test-Header'], 'foobar')
def test_unicode(self):
self.env['HTTP_TEST_HEADER'] = touni('foobar')
self.assertEqual(self.headers['Test-Header'], 'foobar')
def test_dict(self):
for key in 'foo-bar Foo-Bar foo-Bar FOO-BAR'.split():
self.assertTrue(key not in self.headers)
self.assertEqual(self.headers.get(key), None)
self.assertEqual(self.headers.get(key, 5), 5)
self.assertRaises(KeyError, lambda x: self.headers[x], key)
self.env['HTTP_FOO_BAR'] = 'test'
for key in 'foo-bar Foo-Bar foo-Bar FOO-BAR'.split():
self.assertTrue(key in self.headers)
self.assertEqual(self.headers.get(key), 'test')
self.assertEqual(self.headers.get(key, 5), 'test')
if __name__ == '__main__': #pragma: no cover
unittest.main()
|
[
"yuan705791627@gmail.com"
] |
yuan705791627@gmail.com
|
c522e96d77e1516ca4f0d34d276e1fdc8960939d
|
a8494ac812d41567ff8427b7314410390195d7f1
|
/manager/urls.py
|
f92cf9bc75933f3504cd0435cc7dc0e0c03a5466
|
[] |
no_license
|
iikyara/rktabot
|
527a05585e505d9c31398003ad87a78795a7d922
|
baf44732abb9aa1ac3da1256c129c4fd02ec83fc
|
refs/heads/master
| 2023-02-03T02:35:51.113283
| 2021-06-17T09:30:23
| 2021-06-17T09:30:23
| 87,445,624
| 0
| 0
| null | 2023-02-02T02:28:40
| 2017-04-06T15:31:37
|
Python
|
UTF-8
|
Python
| false
| false
| 304
|
py
|
from django.conf.urls import include, url
from . import views
urlpatterns = [
url(r'^$', views.post_list),
url(r'^post/new/$', views.post_new, name='post_new'),
url(r'^stop', views.post_stop, name='stop'),
url(r'^post/new/edit/$', views.post_edit_detail, name='edit_detail'),
]
|
[
"noreply@github.com"
] |
iikyara.noreply@github.com
|
5d11cdebf01ea72aefd1cfbefcfed022931860e5
|
d9bdbfc83eaf5b1f002df50493aacd1ed122b3d7
|
/absen.py
|
d3c437eb9296843a620851306951692d8613bfee
|
[
"MIT"
] |
permissive
|
SultanKs4/cheabsi
|
894749c1803204f4374bcd98e17799ec1ad27a4e
|
bfa233586e5a028bd4e021bcd55aa631eec8efe9
|
refs/heads/master
| 2023-05-18T22:03:33.073868
| 2020-06-03T12:35:28
| 2020-06-03T12:35:28
| 266,580,919
| 3
| 1
|
MIT
| 2021-06-02T01:54:27
| 2020-05-24T16:30:02
|
Python
|
UTF-8
|
Python
| false
| false
| 4,421
|
py
|
import sys
import os
import platform
from datetime import datetime
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
class absen(object):
def __init__(self, username, password):
self.options = Options()
self.options.headless = True
self.options.add_argument("--window-size=1366x768")
self.driver = webdriver.Firefox(options=self.options)
self.username = username
self.password = password
def wait(self, xpath, message, timeout=15):
timeout = timeout
try:
element_present = EC.presence_of_element_located(
(By.XPATH, xpath))
WebDriverWait(self.driver, timeout).until(element_present)
except TimeoutException:
print("Timed out waiting for {} loaded".format(message))
self.driver.quit()
sys.exit(1)
def login(self):
print("Login Start")
self.driver.get('http://siakad.polinema.ac.id/')
username_input = '//*[@id="username"]'
password_input = '//*[@id="password"]'
login_submit = '//*[@id="form_login"]/div[9]/button'
self.wait(xpath=username_input, message="username input")
self.driver.find_element_by_xpath(
username_input).send_keys(self.username)
self.driver.find_element_by_xpath(
password_input).send_keys(self.password)
self.driver.find_element_by_xpath(login_submit).click()
home_presensi = '//*[@id="gm_akademik"]/ul/li[2]/a'
self.wait(xpath=home_presensi, message="home page")
print("Login Completed")
def presensi(self):
print("Presensi Start")
p = []
for i in range(14):
p.append(
'//*[@id="form-action-wrapper-tr_absensi"]/div[1]/div[1]/div/div/div[2]/table/tbody/tr[{}]/td[3]/div/span/input'.format(i+1))
presensi_submit = '/html/body/div[3]/div[2]/div/div[2]/div/div/div/div/div[2]/div[3]/form/div[1]/div[2]/div/div/button'
self.driver.get(
'http://siakad.polinema.ac.id/mahasiswa/tr_absensi/add')
self.wait(xpath=p[0], message="question")
for i in range(14):
self.driver.find_element_by_xpath(p[i]).click()
self.driver.find_element_by_xpath(presensi_submit).click()
address_warning = '//*[@id="block_alamat"]/div'
self.wait(xpath=address_warning, message="address")
print("Presensi Completed")
def create_log(self):
print("Create Log Start")
self.driver.get(
'http://siakad.polinema.ac.id/mahasiswa/tr_absensi/index')
latest_absen = '/html/body/div[3]/table/tbody/tr[1]/td[1]'
view_detail = '/html/body/div[3]/table/tbody/tr[1]/td[2]/a'
self.wait(xpath=latest_absen, message="latest absen")
value_latest = self.driver.find_element_by_xpath(latest_absen).text
self.driver.find_element_by_xpath(view_detail).click()
p_last = '/html/body/div[3]/div[2]/div/div[2]/div/div/div/div/div[2]/div[3]/form/div[1]/div/div/div/div[2]/table/tbody/tr[14]'
self.wait(xpath=p_last, message="last question")
element = self.driver.find_element_by_xpath(p_last)
self.driver.execute_script("window.scrollBy(0, 210)")
action = ActionChains(self.driver)
action.move_to_element(element).perform()
now = datetime.now()
path_ss = os.path.join(os.getcwd(), 'log', 'screenshot')
filename = 'Capture_{0}_{1}.png'.format(
now.strftime('%d-%m-%Y_%H-%M-%S'), now.astimezone().tzinfo)
fullname = os.path.join(path_ss, filename)
self.driver.get_screenshot_as_file(fullname)
self.driver.quit()
file = open(os.path.join(os.getcwd(), 'log', 'log_absen.txt'), "a")
file.write(
"Job for date : {0} complete at {1}_{2}, please double check at log screenshot, filename : {3} - {4} \n".format(value_latest, now.strftime('%d-%m-%Y_%H-%M-%S'), now.astimezone().tzinfo, filename, self.username))
file.close()
print("Job done! see log for further information")
|
[
"sultangendut@gmail.com"
] |
sultangendut@gmail.com
|
d21abe3dd96fd14b8f82dcc31abaaedb885b16cf
|
375bb378bc995ff4acb99604c52c723e49a0e327
|
/DBTools/python/osusub_cfg.py
|
0cae36413f93e050a858a1edb3e83cdabd8f3623
|
[] |
no_license
|
qpythonn/OSUT3Analysis
|
a9f1b77b8a0247bc27901217e8abfa1d266de56a
|
b68c2acf8191b9a27d4ad5135f53cbe9baf3ea68
|
refs/heads/master
| 2020-06-02T07:25:21.600800
| 2014-07-10T11:36:57
| 2014-07-10T11:36:57
| 21,691,511
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 483
|
py
|
import sys
import math
if len (sys.argv) == 7 and sys.argv[2] == "True":
nJobs = float (sys.argv[3])
jobNumber = int (sys.argv[4])
if int (sys.argv[3]) != 0 and sys.argv[5] != "NULL":
import runList as runList
filesPerJob = int (math.ceil (len (runList.runList) / nJobs))
runList = runList.runList[(jobNumber * filesPerJob):(jobNumber * filesPerJob + filesPerJob)]
dataset = sys.argv[5]
datasetLabel = sys.argv[6]
batchMode = True
else:
batchMode = False
|
[
"lantonel@interactive-0-1.local"
] |
lantonel@interactive-0-1.local
|
75bea3c90138d862e8c85eb6544cbe5f50354937
|
a1c20ec292350a4c8f2164ba21715414f8a77d19
|
/Udemy/Python3Bootcamp/Testing/assertions.py
|
bbb4373d32b6996791166c6477190eec7b94384f
|
[] |
no_license
|
nowacki69/Python
|
2ae621b098241a614836a50cb1102094bf8e689f
|
e5325562801624e43b3975d9f246af25517be55b
|
refs/heads/master
| 2021-08-22T21:30:39.534186
| 2019-09-03T01:21:11
| 2019-09-03T01:21:11
| 168,528,687
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 180
|
py
|
def add_positive_numbers(x, y):
assert x > 0 and y > 0, "Both numbers must be positive"
return x + y
print(add_positive_numbers(1, 1))
print(add_positive_numbers(1, -1))
|
[
"walter.nowacki@aa.com"
] |
walter.nowacki@aa.com
|
9fe9bafdb628168eab42a34734a1421951e279fe
|
786027545626c24486753351d6e19093b261cd7d
|
/ghidra9.2.1_pyi/ghidra/app/merge/listing/ListingMergeManager.pyi
|
6ad157e75b8b94f99d491f15dc82c81be30f5e06
|
[
"MIT"
] |
permissive
|
kohnakagawa/ghidra_scripts
|
51cede1874ef2b1fed901b802316449b4bf25661
|
5afed1234a7266c0624ec445133280993077c376
|
refs/heads/main
| 2023-03-25T08:25:16.842142
| 2021-03-18T13:31:40
| 2021-03-18T13:31:40
| 338,577,905
| 14
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,865
|
pyi
|
from typing import List
import ghidra.app.merge
import ghidra.app.merge.listing
import ghidra.program.model.address
import ghidra.util.task
import java.lang
class ListingMergeManager(object, ghidra.app.merge.MergeResolver, ghidra.app.merge.listing.ListingMergeConstants):
"""
Manages program listing changes and conflicts between the latest versioned
program (LATEST) and the modified program (MY) being checked into version control.
Listing changes include:
bytes
code units [instructions and data]
equates
functions
symbols
references [memory, stack, and external]
comments [plate, pre, end-of-line, repeatable, and post]
properties
bookmarks
"""
ASK_USER: int = 0
CANCELED: int = -1
CHECKED_OUT_BUTTON_NAME: unicode = u'CheckedOutVersionRB'
CHECKED_OUT_CHECK_BOX_NAME: unicode = u'CheckedOutVersionCheckBox'
CHECKED_OUT_LABEL_NAME: unicode = u'CheckedOutVersionLabel'
CHECKED_OUT_LIST_BUTTON_NAME: unicode = u'CheckedOutListRB'
INFO_ROW: int = 0
KEEP_ALL: int = 7
KEEP_BOTH: int = 6
KEEP_LATEST: int = 2
KEEP_MY: int = 4
KEEP_ORIGINAL: int = 1
KEEP_RESULT: int = 8
LATEST_BUTTON_NAME: unicode = u'LatestVersionRB'
LATEST_CHECK_BOX_NAME: unicode = u'LatestVersionCheckBox'
LATEST_LABEL_NAME: unicode = u'LatestVersionLabel'
LATEST_LIST_BUTTON_NAME: unicode = u'LatestListRB'
LATEST_TITLE: unicode = u'Latest'
MY_TITLE: unicode = u'Checked Out'
ORIGINAL_BUTTON_NAME: unicode = u'OriginalVersionRB'
ORIGINAL_CHECK_BOX_NAME: unicode = u'OriginalVersionCheckBox'
ORIGINAL_LABEL_NAME: unicode = u'OriginalVersionLabel'
ORIGINAL_TITLE: unicode = u'Original'
REMOVE_CHECKED_OUT_BUTTON_NAME: unicode = u'RemoveCheckedOutRB'
REMOVE_LATEST: int = 8
REMOVE_LATEST_BUTTON_NAME: unicode = u'RemoveLatestRB'
REMOVE_MY: int = 32
RENAME_CHECKED_OUT_BUTTON_NAME: unicode = u'RenameCheckedOutRB'
RENAME_LATEST: int = 16
RENAME_LATEST_BUTTON_NAME: unicode = u'RenameLatestRB'
RENAME_MY: int = 64
RESULT_BUTTON_NAME: unicode = u'ResultVersionRB'
RESULT_TITLE: unicode = u'Result'
TRUNCATE_LENGTH: int = 160
def __init__(self, mergeManager: ghidra.app.merge.ProgramMultiUserMergeManager, resultPgm: ghidra.program.model.listing.Program, originalPgm: ghidra.program.model.listing.Program, latestPgm: ghidra.program.model.listing.Program, myPgm: ghidra.program.model.listing.Program, latestChanges: ghidra.program.model.listing.ProgramChangeSet, myChanges: ghidra.program.model.listing.ProgramChangeSet):
"""
Manages listing changes and conflicts between the latest versioned
program and the modified program being checked into version control.
@param mergeManager the top level merge manager for merging a program version.
@param resultPgm the program to be updated with the result of the merge.
This is the program that will actually get checked in.
@param originalPgm the program that was checked out.
@param latestPgm the latest checked-in version of the program.
@param myPgm the program requesting to be checked in.
@param latestChanges the address set of changes between original and latest versioned program.
@param myChanges the address set of changes between original and my modified program.
"""
...
def apply(self) -> None: ...
def cancel(self) -> None: ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getDescription(self) -> unicode: ...
def getFunctionTagListingMerger(self) -> ghidra.app.merge.listing.FunctionTagListingMerger: ...
def getMergedCodeUnits(self) -> ghidra.program.model.address.AddressSet:
"""
Gets the address set for the code units that were changed in the result
by the merge.
@return the address set indicating the code units that changed in the
result program due to the merge
"""
...
def getName(self) -> unicode: ...
def getPhases(self) -> List[unicode]:
"""
This method returns all of the phases of the Listing Merge Manager that will be
displayed in the Program Merge Manager.
The first item is a phase indicator for the Listing Phase as a whole and
the others are for each sub-phase of the Listing.
"""
...
def hashCode(self) -> int: ...
def initMergeInfo(self) -> None:
"""
Sets up the change address sets, Diffs between the various program versions,
and Merges from various versions to the resulting program.
"""
...
def merge(self, monitor: ghidra.util.task.TaskMonitor) -> None: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def setShowListingPanel(self, showListingPanel: bool) -> None:
"""
True signals to show the listing panel (default); false signals to show an empty listing (faster)
@param showListingPanel
"""
...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def description(self) -> unicode: ...
@property
def functionTagListingMerger(self) -> ghidra.app.merge.listing.FunctionTagListingMerger: ...
@property
def mergedCodeUnits(self) -> ghidra.program.model.address.AddressSet: ...
@property
def name(self) -> unicode: ...
@property
def phases(self) -> List[unicode]: ...
@property
def showListingPanel(self) -> None: ... # No getter available.
@showListingPanel.setter
def showListingPanel(self, value: bool) -> None: ...
|
[
"tsunekou1019@gmail.com"
] |
tsunekou1019@gmail.com
|
479580ebbcb16174a1b8bd5e5bef1202a3258bbf
|
4635d49ec28d80e1f5324a458cb2c32aa87d7eaf
|
/calaccess_processed/special_elections.py
|
3ea38f0c10f2757333a6f869d7bd0de8913d3f18
|
[
"MIT"
] |
permissive
|
chrisamurao/django-calaccess-processed-data
|
0355f57e7a1fae87870417e80b10355ccf9afbdf
|
9fe7f89ba79ae590d565a19724034ae44021f2a3
|
refs/heads/master
| 2021-01-21T14:17:18.650619
| 2017-06-23T23:31:51
| 2017-06-23T23:31:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,362
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Includes a mapping of special election names to dates.
"""
# This list was compiled from CAL-ACCESS:
# http://cal-access.sos.ca.gov/Campaign/Candidates/
# And also the documents available from the Secretary of State site:
# * http://www.sos.ca.gov/elections/prior-elections/special-elections/
# * http://elections.cdn.sos.ca.gov/special-elections/pdf/special-elections-history.pdf
names_to_dates = (
('2016 SPECIAL ELECTION (ASSEMBLY 31)', '2016-4-5'),
('2015 SPECIAL RUNOFF (STATE SENATE 07)', '2015-5-19'),
('2015 SPECIAL ELECTION (STATE SENATE 07)', '2015-3-17'),
('2015 SPECIAL ELECTION (STATE SENATE 21)', '2015-3-17'),
('2015 SPECIAL ELECTION (STATE SENATE 37)', '2015-3-17'),
('2014 SPECIAL ELECTION (STATE SENATE 35)', '2014-12-9'),
('2014 SPECIAL ELECTION (STATE SENATE 23)', '2014-3-25'),
('2013 SPECIAL ELECTION (ASSEMBLY 54)', '2013-12-3'),
('2013 SPECIAL RUNOFF (ASSEMBLY 45)', '2013-11-19'),
('2013 SPECIAL ELECTION (ASSEMBLY 45)', '2013-9-17'),
('2013 SPECIAL RUNOFF (ASSEMBLY 52)', '2013-9-24'),
('2013 SPECIAL ELECTION (ASSEMBLY 52)', '2013-7-23'),
('2013 SPECIAL ELECTION (STATE SENATE 26)', '2013-9-17'),
('2013 SPECIAL RUNOFF (STATE SENATE 16)', '2013-7-23'),
('2013 SPECIAL ELECTION (STATE SENATE 16)', '2013-5-21'),
('2013 SPECIAL ELECTION (ASSEMBLY 80)', '2013-5-21'),
('2013 SPECIAL RUNOFF (STATE SENATE 32)', '2013-5-14'),
('2013 SPECIAL ELECTION (STATE SENATE 32)', '2013-3-12'),
('2013 SPECIAL ELECTION (STATE SENATE 40)', '2013-3-12'),
('2013 SPECIAL ELECTION (STATE SENATE 04)', '2013-1-8'),
('2012 SPECIAL ELECTION (STATE SENATE 04)', '2012-11-6'),
('2011 SPECIAL RUNOFF (ASSEMBLY 04)', '2011-5-3'),
('2011 SPECIAL ELECTION (ASSEMBLY 04)', '2011-3-8'),
('2011 SPECIAL ELECTION (STATE SENATE 28)', '2011-2-15'),
('2011 SPECIAL ELECTION (STATE SENATE 17)', '2011-2-15'),
('2011 SPECIAL RUNOFF (STATE SENATE 01)', '2011-1-4'),
('2010 SPECIAL ELECTION (STATE SENATE 01)', '2010-11-2'),
('2010 SPECIAL RUNOFF (STATE SENATE 15)', '2010-8-17'),
('2010 SPECIAL ELECTION (STATE SENATE 15)', '2010-6-22'),
('2010 SPECIAL RUNOFF (STATE SENATE 37)', '2010-6-8'),
('2010 SPECIAL ELECTION (STATE SENATE 37)', '2010-4-13'),
('2010 SPECIAL RUNOFF (ASSEMBLY 43)', '2010-6-8'),
('2010 SPECIAL ELECTION (ASSEMBLY 43)', '2010-4-13'),
('2010 SPECIAL RUNOFF (ASSEMBLY 72)', '2010-1-12'),
('2009 SPECIAL ELECTION (ASSEMBLY 72)', '2009-11-17'),
('2009 SPECIAL ELECTION (ASSEMBLY 51)', '2009-9-1'),
('2009 SPECIAL RUNOFF (STATE SENATE 26)', '2009-5-19'),
('2009 SPECIAL ELECTION (STATE SENATE 26)', '2009-3-24'),
('2008 SPECIAL RUNOFF (ASSEMBLY 55)', '2008-2-5'),
('2007 SPECIAL ELECTION (ASSEMBLY 55)', '2007-12-11'),
('2007 SPECIAL ELECTION (ASSEMBLY 39)', '2007-5-15'),
('2006 SPECIAL RUNOFF (STATE SENATE 35)', '2006-6-6'),
('2006 SPECIAL ELECTION (STATE SENATE 35)', '2006-4-11'),
('2005 SPECIAL ELECTION (ASSEMBLY 53)', '2005-9-13'),
('2003 SPECIAL ELECTION (GOVERNOR)', '2003-10-7'),
('2001 SPECIAL ELECTION (ASSEMBLY 49)', '2001-5-15'),
('2001 SPECIAL RUNOFF (ASSEMBLY 65)', '2001-4-3'),
('2001 SPECIAL ELECTION (ASSEMBLY 65)', '2001-2-6'),
('2001 SPECIAL ELECTION (STATE SENATE 24)', '2001-3-26'),
)
|
[
"gordon.je@gmail.com"
] |
gordon.je@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.